a small revers for bcheck.o changes (d80593bc4d43)
[tinycc.git] / x86_64-gen.c
blobd5ed4f67a0002fec7ca73cd507a0a40624a8dc8c
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 8
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
34 #define RC_RAX 0x0004
35 #define RC_RCX 0x0008
36 #define RC_RDX 0x0010
37 #define RC_ST0 0x0080 /* only for long double */
38 #define RC_R8 0x0100
39 #define RC_R9 0x0200
40 #define RC_R10 0x0400
41 #define RC_R11 0x0800
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
56 enum {
57 TREG_RAX = 0,
58 TREG_RCX = 1,
59 TREG_RDX = 2,
60 TREG_RSP = 4,
61 TREG_RSI = 6,
62 TREG_RDI = 7,
64 TREG_R8 = 8,
65 TREG_R9 = 9,
66 TREG_R10 = 10,
67 TREG_R11 = 11,
69 TREG_XMM0 = 16,
70 TREG_XMM1 = 17,
71 TREG_XMM2 = 18,
72 TREG_XMM3 = 19,
73 TREG_XMM4 = 20,
74 TREG_XMM5 = 21,
75 TREG_XMM6 = 22,
76 TREG_XMM7 = 23,
78 TREG_ST0 = 24,
80 TREG_MEM = 0x20,
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
96 #define PTR_SIZE 8
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
102 #define MAX_ALIGN 16
104 /******************************************************/
105 /* ELF defines */
107 #define EM_TCC_TARGET EM_X86_64
109 /* relocation type for 32 bit data relocation */
110 #define R_DATA_32 R_X86_64_32
111 #define R_DATA_PTR R_X86_64_64
112 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
113 #define R_COPY R_X86_64_COPY
115 #define ELF_START_ADDR 0x400000
116 #define ELF_PAGE_SIZE 0x200000
118 /******************************************************/
119 #else /* ! TARGET_DEFS_ONLY */
120 /******************************************************/
121 #include "tcc.h"
122 #include <assert.h>
124 ST_DATA const int reg_classes[NB_REGS] = {
125 /* eax */ RC_INT | RC_RAX,
126 /* ecx */ RC_INT | RC_RCX,
127 /* edx */ RC_INT | RC_RDX,
133 RC_R8,
134 RC_R9,
135 RC_R10,
136 RC_R11,
141 /* xmm0 */ RC_FLOAT | RC_XMM0,
142 /* xmm1 */ RC_FLOAT | RC_XMM1,
143 /* xmm2 */ RC_FLOAT | RC_XMM2,
144 /* xmm3 */ RC_FLOAT | RC_XMM3,
145 /* xmm4 */ RC_FLOAT | RC_XMM4,
146 /* xmm5 */ RC_FLOAT | RC_XMM5,
147 /* xmm6 an xmm7 are included so gv() can be used on them,
148 but they are not tagged with RC_FLOAT because they are
149 callee saved on Windows */
150 RC_XMM6,
151 RC_XMM7,
152 /* st0 */ RC_ST0
155 static unsigned long func_sub_sp_offset;
156 static int func_ret_sub;
158 /* XXX: make it faster ? */
159 void g(int c)
161 int ind1;
162 ind1 = ind + 1;
163 if (ind1 > cur_text_section->data_allocated)
164 section_realloc(cur_text_section, ind1);
165 cur_text_section->data[ind] = c;
166 ind = ind1;
169 void o(unsigned int c)
171 while (c) {
172 g(c);
173 c = c >> 8;
177 void gen_le16(int v)
179 g(v);
180 g(v >> 8);
183 void gen_le32(int c)
185 g(c);
186 g(c >> 8);
187 g(c >> 16);
188 g(c >> 24);
191 void gen_le64(int64_t c)
193 g(c);
194 g(c >> 8);
195 g(c >> 16);
196 g(c >> 24);
197 g(c >> 32);
198 g(c >> 40);
199 g(c >> 48);
200 g(c >> 56);
203 void orex(int ll, int r, int r2, int b)
205 if ((r & VT_VALMASK) >= VT_CONST)
206 r = 0;
207 if ((r2 & VT_VALMASK) >= VT_CONST)
208 r2 = 0;
209 if (ll || REX_BASE(r) || REX_BASE(r2))
210 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
211 o(b);
214 /* output a symbol and patch all calls to it */
215 void gsym_addr(int t, int a)
217 int n, *ptr;
218 while (t) {
219 ptr = (int *)(cur_text_section->data + t);
220 n = *ptr; /* next value */
221 *ptr = a - t - 4;
222 t = n;
226 void gsym(int t)
228 gsym_addr(t, ind);
231 /* psym is used to put an instruction with a data field which is a
232 reference to a symbol. It is in fact the same as oad ! */
233 #define psym oad
235 static int is64_type(int t)
237 return ((t & VT_BTYPE) == VT_PTR ||
238 (t & VT_BTYPE) == VT_FUNC ||
239 (t & VT_BTYPE) == VT_LLONG);
242 /* instruction + 4 bytes data. Return the address of the data */
243 ST_FUNC int oad(int c, int s)
245 int ind1;
247 o(c);
248 ind1 = ind + 4;
249 if (ind1 > cur_text_section->data_allocated)
250 section_realloc(cur_text_section, ind1);
251 *(int *)(cur_text_section->data + ind) = s;
252 s = ind;
253 ind = ind1;
254 return s;
257 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
259 if (r & VT_SYM)
260 greloc(cur_text_section, sym, ind, R_X86_64_32);
261 gen_le32(c);
264 /* output constant with relocation if 'r & VT_SYM' is true */
265 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
267 if (r & VT_SYM)
268 greloc(cur_text_section, sym, ind, R_X86_64_64);
269 gen_le64(c);
272 /* output constant with relocation if 'r & VT_SYM' is true */
273 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
275 if (r & VT_SYM)
276 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
277 gen_le32(c-4);
280 /* output got address with relocation */
281 static void gen_gotpcrel(int r, Sym *sym, int c)
283 #ifndef TCC_TARGET_PE
284 Section *sr;
285 ElfW(Rela) *rel;
286 greloc(cur_text_section, sym, ind, R_X86_64_GOTPCREL);
287 sr = cur_text_section->reloc;
288 rel = (ElfW(Rela) *)(sr->data + sr->data_offset - sizeof(ElfW(Rela)));
289 rel->r_addend = -4;
290 #else
291 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
292 get_tok_str(sym->v, NULL), c, r,
293 cur_text_section->data[ind-3],
294 cur_text_section->data[ind-2],
295 cur_text_section->data[ind-1]
297 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
298 #endif
299 gen_le32(0);
300 if (c) {
301 /* we use add c, %xxx for displacement */
302 orex(1, r, 0, 0x81);
303 o(0xc0 + REG_VALUE(r));
304 gen_le32(c);
308 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
310 op_reg = REG_VALUE(op_reg) << 3;
311 if ((r & VT_VALMASK) == VT_CONST) {
312 /* constant memory reference */
313 o(0x05 | op_reg);
314 if (is_got) {
315 gen_gotpcrel(r, sym, c);
316 } else {
317 gen_addrpc32(r, sym, c);
319 } else if ((r & VT_VALMASK) == VT_LOCAL) {
320 /* currently, we use only ebp as base */
321 if (c == (char)c) {
322 /* short reference */
323 o(0x45 | op_reg);
324 g(c);
325 } else {
326 oad(0x85 | op_reg, c);
328 } else if ((r & VT_VALMASK) >= TREG_MEM) {
329 if (c) {
330 g(0x80 | op_reg | REG_VALUE(r));
331 gen_le32(c);
332 } else {
333 g(0x00 | op_reg | REG_VALUE(r));
335 } else {
336 g(0x00 | op_reg | REG_VALUE(r));
340 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
341 opcode bits */
342 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
344 gen_modrm_impl(op_reg, r, sym, c, 0);
347 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
348 opcode bits */
349 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
351 int is_got;
352 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
353 orex(1, r, op_reg, opcode);
354 gen_modrm_impl(op_reg, r, sym, c, is_got);
358 /* load 'r' from value 'sv' */
359 void load(int r, SValue *sv)
361 int v, t, ft, fc, fr;
362 SValue v1;
364 #ifdef TCC_TARGET_PE
365 SValue v2;
366 sv = pe_getimport(sv, &v2);
367 #endif
369 fr = sv->r;
370 ft = sv->type.t & ~VT_DEFSIGN;
371 fc = sv->c.ul;
373 #ifndef TCC_TARGET_PE
374 /* we use indirect access via got */
375 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
376 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
377 /* use the result register as a temporal register */
378 int tr = r | TREG_MEM;
379 if (is_float(ft)) {
380 /* we cannot use float registers as a temporal register */
381 tr = get_reg(RC_INT) | TREG_MEM;
383 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
385 /* load from the temporal register */
386 fr = tr | VT_LVAL;
388 #endif
390 v = fr & VT_VALMASK;
391 if (fr & VT_LVAL) {
392 int b, ll;
393 if (v == VT_LLOCAL) {
394 v1.type.t = VT_PTR;
395 v1.r = VT_LOCAL | VT_LVAL;
396 v1.c.ul = fc;
397 fr = r;
398 if (!(reg_classes[fr] & RC_INT))
399 fr = get_reg(RC_INT);
400 load(fr, &v1);
402 ll = 0;
403 if ((ft & VT_BTYPE) == VT_FLOAT) {
404 b = 0x6e0f66;
405 r = REG_VALUE(r); /* movd */
406 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
407 b = 0x7e0ff3; /* movq */
408 r = REG_VALUE(r);
409 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
410 b = 0xdb, r = 5; /* fldt */
411 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
412 b = 0xbe0f; /* movsbl */
413 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
414 b = 0xb60f; /* movzbl */
415 } else if ((ft & VT_TYPE) == VT_SHORT) {
416 b = 0xbf0f; /* movswl */
417 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
418 b = 0xb70f; /* movzwl */
419 } else {
420 assert(((ft & VT_BTYPE) == VT_INT) || ((ft & VT_BTYPE) == VT_LLONG)
421 || ((ft & VT_BTYPE) == VT_PTR) || ((ft & VT_BTYPE) == VT_ENUM)
422 || ((ft & VT_BTYPE) == VT_FUNC));
423 ll = is64_type(ft);
424 b = 0x8b;
426 if (ll) {
427 gen_modrm64(b, r, fr, sv->sym, fc);
428 } else {
429 orex(ll, fr, r, b);
430 gen_modrm(r, fr, sv->sym, fc);
432 } else {
433 if (v == VT_CONST) {
434 if (fr & VT_SYM) {
435 #ifdef TCC_TARGET_PE
436 orex(1,0,r,0x8d);
437 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
438 gen_addrpc32(fr, sv->sym, fc);
439 #else
440 if (sv->sym->type.t & VT_STATIC) {
441 orex(1,0,r,0x8d);
442 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
443 gen_addrpc32(fr, sv->sym, fc);
444 } else {
445 orex(1,0,r,0x8b);
446 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
447 gen_gotpcrel(r, sv->sym, fc);
449 #endif
450 } else if (is64_type(ft)) {
451 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
452 gen_le64(sv->c.ull);
453 } else {
454 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
455 gen_le32(fc);
457 } else if (v == VT_LOCAL) {
458 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
459 gen_modrm(r, VT_LOCAL, sv->sym, fc);
460 } else if (v == VT_CMP) {
461 orex(0,r,0,0);
462 if ((fc & ~0x100) != TOK_NE)
463 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
464 else
465 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
466 if (fc & 0x100)
468 /* This was a float compare. If the parity bit is
469 set the result was unordered, meaning false for everything
470 except TOK_NE, and true for TOK_NE. */
471 fc &= ~0x100;
472 o(0x037a + (REX_BASE(r) << 8));
474 orex(0,r,0, 0x0f); /* setxx %br */
475 o(fc);
476 o(0xc0 + REG_VALUE(r));
477 } else if (v == VT_JMP || v == VT_JMPI) {
478 t = v & 1;
479 orex(0,r,0,0);
480 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
481 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
482 gsym(fc);
483 orex(0,r,0,0);
484 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
485 } else if (v != r) {
486 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
487 if (v == TREG_ST0) {
488 /* gen_cvt_ftof(VT_DOUBLE); */
489 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
490 /* movsd -0x10(%rsp),%xmmN */
491 o(0x100ff2);
492 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
493 o(0xf024);
494 } else {
495 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
496 if ((ft & VT_BTYPE) == VT_FLOAT) {
497 o(0x100ff3);
498 } else {
499 assert((ft & VT_BTYPE) == VT_DOUBLE);
500 o(0x100ff2);
502 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
504 } else if (r == TREG_ST0) {
505 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
506 /* gen_cvt_ftof(VT_LDOUBLE); */
507 /* movsd %xmmN,-0x10(%rsp) */
508 o(0x110ff2);
509 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
510 o(0xf024);
511 o(0xf02444dd); /* fldl -0x10(%rsp) */
512 } else {
513 orex(1,r,v, 0x89);
514 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
520 /* store register 'r' in lvalue 'v' */
521 void store(int r, SValue *v)
523 int fr, bt, ft, fc;
524 int op64 = 0;
525 /* store the REX prefix in this variable when PIC is enabled */
526 int pic = 0;
528 #ifdef TCC_TARGET_PE
529 SValue v2;
530 v = pe_getimport(v, &v2);
531 #endif
533 ft = v->type.t;
534 fc = v->c.ul;
535 fr = v->r & VT_VALMASK;
536 bt = ft & VT_BTYPE;
538 #ifndef TCC_TARGET_PE
539 /* we need to access the variable via got */
540 if (fr == VT_CONST && (v->r & VT_SYM)) {
541 /* mov xx(%rip), %r11 */
542 o(0x1d8b4c);
543 gen_gotpcrel(TREG_R11, v->sym, v->c.ul);
544 pic = is64_type(bt) ? 0x49 : 0x41;
546 #endif
548 /* XXX: incorrect if float reg to reg */
549 if (bt == VT_FLOAT) {
550 o(0x66);
551 o(pic);
552 o(0x7e0f); /* movd */
553 r = REG_VALUE(r);
554 } else if (bt == VT_DOUBLE) {
555 o(0x66);
556 o(pic);
557 o(0xd60f); /* movq */
558 r = REG_VALUE(r);
559 } else if (bt == VT_LDOUBLE) {
560 o(0xc0d9); /* fld %st(0) */
561 o(pic);
562 o(0xdb); /* fstpt */
563 r = 7;
564 } else {
565 if (bt == VT_SHORT)
566 o(0x66);
567 o(pic);
568 if (bt == VT_BYTE || bt == VT_BOOL)
569 orex(0, 0, r, 0x88);
570 else if (is64_type(bt))
571 op64 = 0x89;
572 else
573 orex(0, 0, r, 0x89);
575 if (pic) {
576 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
577 if (op64)
578 o(op64);
579 o(3 + (r << 3));
580 } else if (op64) {
581 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
582 gen_modrm64(op64, r, v->r, v->sym, fc);
583 } else if (fr != r) {
584 /* XXX: don't we really come here? */
585 abort();
586 o(0xc0 + fr + r * 8); /* mov r, fr */
588 } else {
589 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
590 gen_modrm(r, v->r, v->sym, fc);
591 } else if (fr != r) {
592 /* XXX: don't we really come here? */
593 abort();
594 o(0xc0 + fr + r * 8); /* mov r, fr */
599 /* 'is_jmp' is '1' if it is a jump */
600 static void gcall_or_jmp(int is_jmp)
602 int r;
603 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
604 /* constant case */
605 if (vtop->r & VT_SYM) {
606 /* relocation case */
607 #ifdef TCC_TARGET_PE
608 greloc(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32);
609 #else
610 greloc(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32);
611 #endif
612 } else {
613 /* put an empty PC32 relocation */
614 put_elf_reloc(symtab_section, cur_text_section,
615 ind + 1, R_X86_64_PC32, 0);
617 oad(0xe8 + is_jmp, vtop->c.ul - 4); /* call/jmp im */
618 } else {
619 /* otherwise, indirect call */
620 r = TREG_R11;
621 load(r, vtop);
622 o(0x41); /* REX */
623 o(0xff); /* call/jmp *r */
624 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
628 #ifdef TCC_TARGET_PE
630 #define REGN 4
631 static const uint8_t arg_regs[REGN] = {
632 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
635 /* Prepare arguments in R10 and R11 rather than RCX and RDX
636 because gv() will not ever use these */
637 static int arg_prepare_reg(int idx) {
638 if (idx == 0 || idx == 1)
639 /* idx=0: r10, idx=1: r11 */
640 return idx + 10;
641 else
642 return arg_regs[idx];
645 static int func_scratch;
647 /* Generate function call. The function address is pushed first, then
648 all the parameters in call order. This functions pops all the
649 parameters and the function address. */
651 void gen_offs_sp(int b, int r, int d)
653 orex(1,0,r & 0x100 ? 0 : r, b);
654 if (d == (char)d) {
655 o(0x2444 | (REG_VALUE(r) << 3));
656 g(d);
657 } else {
658 o(0x2484 | (REG_VALUE(r) << 3));
659 gen_le32(d);
663 /* Return the number of registers needed to return the struct, or 0 if
664 returning via struct pointer. */
665 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
667 int size, align;
668 *regsize = 8;
669 *ret_align = 1; // Never have to re-align return values for x86-64
670 size = type_size(vt, &align);
671 ret->ref = NULL;
672 if (size > 8) {
673 return 0;
674 } else if (size > 4) {
675 ret->t = VT_LLONG;
676 return 1;
677 } else if (size > 2) {
678 ret->t = VT_INT;
679 return 1;
680 } else if (size > 1) {
681 ret->t = VT_SHORT;
682 return 1;
683 } else {
684 ret->t = VT_BYTE;
685 return 1;
689 static int is_sse_float(int t) {
690 int bt;
691 bt = t & VT_BTYPE;
692 return bt == VT_DOUBLE || bt == VT_FLOAT;
695 int gfunc_arg_size(CType *type) {
696 int align;
697 if (type->t & (VT_ARRAY|VT_BITFIELD))
698 return 8;
699 return type_size(type, &align);
702 void gfunc_call(int nb_args)
704 int size, r, args_size, i, d, bt, struct_size;
705 int arg;
707 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
708 arg = nb_args;
710 /* for struct arguments, we need to call memcpy and the function
711 call breaks register passing arguments we are preparing.
712 So, we process arguments which will be passed by stack first. */
713 struct_size = args_size;
714 for(i = 0; i < nb_args; i++) {
715 SValue *sv;
717 --arg;
718 sv = &vtop[-i];
719 bt = (sv->type.t & VT_BTYPE);
720 size = gfunc_arg_size(&sv->type);
722 if (size <= 8)
723 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
725 if (bt == VT_STRUCT) {
726 /* align to stack align size */
727 size = (size + 15) & ~15;
728 /* generate structure store */
729 r = get_reg(RC_INT);
730 gen_offs_sp(0x8d, r, struct_size);
731 struct_size += size;
733 /* generate memcpy call */
734 vset(&sv->type, r | VT_LVAL, 0);
735 vpushv(sv);
736 vstore();
737 --vtop;
738 } else if (bt == VT_LDOUBLE) {
739 gv(RC_ST0);
740 gen_offs_sp(0xdb, 0x107, struct_size);
741 struct_size += 16;
745 if (func_scratch < struct_size)
746 func_scratch = struct_size;
748 arg = nb_args;
749 struct_size = args_size;
751 for(i = 0; i < nb_args; i++) {
752 --arg;
753 bt = (vtop->type.t & VT_BTYPE);
755 size = gfunc_arg_size(&vtop->type);
756 if (size > 8) {
757 /* align to stack align size */
758 size = (size + 15) & ~15;
759 if (arg >= REGN) {
760 d = get_reg(RC_INT);
761 gen_offs_sp(0x8d, d, struct_size);
762 gen_offs_sp(0x89, d, arg*8);
763 } else {
764 d = arg_prepare_reg(arg);
765 gen_offs_sp(0x8d, d, struct_size);
767 struct_size += size;
768 } else {
769 if (is_sse_float(vtop->type.t)) {
770 gv(RC_XMM0); /* only use one float register */
771 if (arg >= REGN) {
772 /* movq %xmm0, j*8(%rsp) */
773 gen_offs_sp(0xd60f66, 0x100, arg*8);
774 } else {
775 /* movaps %xmm0, %xmmN */
776 o(0x280f);
777 o(0xc0 + (arg << 3));
778 d = arg_prepare_reg(arg);
779 /* mov %xmm0, %rxx */
780 o(0x66);
781 orex(1,d,0, 0x7e0f);
782 o(0xc0 + REG_VALUE(d));
784 } else {
785 if (bt == VT_STRUCT) {
786 vtop->type.ref = NULL;
787 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
788 : size > 1 ? VT_SHORT : VT_BYTE;
791 r = gv(RC_INT);
792 if (arg >= REGN) {
793 gen_offs_sp(0x89, r, arg*8);
794 } else {
795 d = arg_prepare_reg(arg);
796 orex(1,d,r,0x89); /* mov */
797 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
801 vtop--;
803 save_regs(0);
805 /* Copy R10 and R11 into RCX and RDX, respectively */
806 if (nb_args > 0) {
807 o(0xd1894c); /* mov %r10, %rcx */
808 if (nb_args > 1) {
809 o(0xda894c); /* mov %r11, %rdx */
813 gcall_or_jmp(0);
814 vtop--;
818 #define FUNC_PROLOG_SIZE 11
820 /* generate function prolog of type 't' */
821 void gfunc_prolog(CType *func_type)
823 int addr, reg_param_index, bt, size;
824 Sym *sym;
825 CType *type;
827 func_ret_sub = 0;
828 func_scratch = 0;
829 loc = 0;
831 addr = PTR_SIZE * 2;
832 ind += FUNC_PROLOG_SIZE;
833 func_sub_sp_offset = ind;
834 reg_param_index = 0;
836 sym = func_type->ref;
838 /* if the function returns a structure, then add an
839 implicit pointer parameter */
840 func_vt = sym->type;
841 func_var = (sym->c == FUNC_ELLIPSIS);
842 size = gfunc_arg_size(&func_vt);
843 if (size > 8) {
844 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
845 func_vc = addr;
846 reg_param_index++;
847 addr += 8;
850 /* define parameters */
851 while ((sym = sym->next) != NULL) {
852 type = &sym->type;
853 bt = type->t & VT_BTYPE;
854 size = gfunc_arg_size(type);
855 if (size > 8) {
856 if (reg_param_index < REGN) {
857 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
859 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL | VT_REF, addr);
860 } else {
861 if (reg_param_index < REGN) {
862 /* save arguments passed by register */
863 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
864 o(0xd60f66); /* movq */
865 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
866 } else {
867 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
870 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
872 addr += 8;
873 reg_param_index++;
876 while (reg_param_index < REGN) {
877 if (func_type->ref->c == FUNC_ELLIPSIS) {
878 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
879 addr += 8;
881 reg_param_index++;
885 /* generate function epilog */
886 void gfunc_epilog(void)
888 int v, saved_ind;
890 o(0xc9); /* leave */
891 if (func_ret_sub == 0) {
892 o(0xc3); /* ret */
893 } else {
894 o(0xc2); /* ret n */
895 g(func_ret_sub);
896 g(func_ret_sub >> 8);
899 saved_ind = ind;
900 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
901 /* align local size to word & save local variables */
902 v = (func_scratch + -loc + 15) & -16;
904 if (v >= 4096) {
905 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
906 oad(0xb8, v); /* mov stacksize, %eax */
907 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
908 greloc(cur_text_section, sym, ind-4, R_X86_64_PC32);
909 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
910 } else {
911 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
912 o(0xec8148); /* sub rsp, stacksize */
913 gen_le32(v);
916 cur_text_section->data_offset = saved_ind;
917 pe_add_unwind_data(ind, saved_ind, v);
918 ind = cur_text_section->data_offset;
921 #else
923 static void gadd_sp(int val)
925 if (val == (char)val) {
926 o(0xc48348);
927 g(val);
928 } else {
929 oad(0xc48148, val); /* add $xxx, %rsp */
933 typedef enum X86_64_Mode {
934 x86_64_mode_none,
935 x86_64_mode_memory,
936 x86_64_mode_integer,
937 x86_64_mode_sse,
938 x86_64_mode_x87
939 } X86_64_Mode;
941 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
943 if (a == b)
944 return a;
945 else if (a == x86_64_mode_none)
946 return b;
947 else if (b == x86_64_mode_none)
948 return a;
949 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
950 return x86_64_mode_memory;
951 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
952 return x86_64_mode_integer;
953 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
954 return x86_64_mode_memory;
955 else
956 return x86_64_mode_sse;
959 static X86_64_Mode classify_x86_64_inner(CType *ty)
961 X86_64_Mode mode;
962 Sym *f;
964 switch (ty->t & VT_BTYPE) {
965 case VT_VOID: return x86_64_mode_none;
967 case VT_INT:
968 case VT_BYTE:
969 case VT_SHORT:
970 case VT_LLONG:
971 case VT_BOOL:
972 case VT_PTR:
973 case VT_FUNC:
974 case VT_ENUM: return x86_64_mode_integer;
976 case VT_FLOAT:
977 case VT_DOUBLE: return x86_64_mode_sse;
979 case VT_LDOUBLE: return x86_64_mode_x87;
981 case VT_STRUCT:
982 f = ty->ref;
984 // Detect union
985 if (f->next && (f->c == f->next->c))
986 return x86_64_mode_memory;
988 mode = x86_64_mode_none;
989 for (; f; f = f->next)
990 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
992 return mode;
995 assert(0);
998 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1000 X86_64_Mode mode;
1001 int size, align, ret_t = 0;
1003 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1004 *psize = 8;
1005 *palign = 8;
1006 *reg_count = 1;
1007 ret_t = ty->t;
1008 mode = x86_64_mode_integer;
1009 } else {
1010 size = type_size(ty, &align);
1011 *psize = (size + 7) & ~7;
1012 *palign = (align + 7) & ~7;
1014 if (size > 16) {
1015 mode = x86_64_mode_memory;
1016 } else {
1017 mode = classify_x86_64_inner(ty);
1018 switch (mode) {
1019 case x86_64_mode_integer:
1020 if (size > 8) {
1021 *reg_count = 2;
1022 ret_t = VT_QLONG;
1023 } else {
1024 *reg_count = 1;
1025 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1027 break;
1029 case x86_64_mode_x87:
1030 *reg_count = 1;
1031 ret_t = VT_LDOUBLE;
1032 break;
1034 case x86_64_mode_sse:
1035 if (size > 8) {
1036 *reg_count = 2;
1037 ret_t = VT_QFLOAT;
1038 } else {
1039 *reg_count = 1;
1040 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1042 break;
1043 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1048 if (ret) {
1049 ret->ref = NULL;
1050 ret->t = ret_t;
1053 return mode;
1056 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1058 /* This definition must be synced with stdarg.h */
1059 enum __va_arg_type {
1060 __va_gen_reg, __va_float_reg, __va_stack
1062 int size, align, reg_count;
1063 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1064 switch (mode) {
1065 default: return __va_stack;
1066 case x86_64_mode_integer: return __va_gen_reg;
1067 case x86_64_mode_sse: return __va_float_reg;
1071 /* Return the number of registers needed to return the struct, or 0 if
1072 returning via struct pointer. */
1073 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1075 int size, align, reg_count;
1076 *ret_align = 1; // Never have to re-align return values for x86-64
1077 *regsize = 8;
1078 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1081 #define REGN 6
1082 static const uint8_t arg_regs[REGN] = {
1083 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1086 static int arg_prepare_reg(int idx) {
1087 if (idx == 2 || idx == 3)
1088 /* idx=2: r10, idx=3: r11 */
1089 return idx + 8;
1090 else
1091 return arg_regs[idx];
1094 /* Generate function call. The function address is pushed first, then
1095 all the parameters in call order. This functions pops all the
1096 parameters and the function address. */
1097 void gfunc_call(int nb_args)
1099 X86_64_Mode mode;
1100 CType type;
1101 int size, align, r, args_size, stack_adjust, run_start, run_end, i, reg_count;
1102 int nb_reg_args = 0;
1103 int nb_sse_args = 0;
1104 int sse_reg, gen_reg;
1106 /* calculate the number of integer/float register arguments */
1107 for(i = 0; i < nb_args; i++) {
1108 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1109 if (mode == x86_64_mode_sse)
1110 nb_sse_args += reg_count;
1111 else if (mode == x86_64_mode_integer)
1112 nb_reg_args += reg_count;
1115 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1116 and ended by a 16-byte aligned argument. This is because, from the point of view of
1117 the callee, argument alignment is computed from the bottom up. */
1118 /* for struct arguments, we need to call memcpy and the function
1119 call breaks register passing arguments we are preparing.
1120 So, we process arguments which will be passed by stack first. */
1121 gen_reg = nb_reg_args;
1122 sse_reg = nb_sse_args;
1123 run_start = 0;
1124 args_size = 0;
1125 while (run_start != nb_args) {
1126 int run_gen_reg = gen_reg, run_sse_reg = sse_reg;
1128 run_end = nb_args;
1129 stack_adjust = 0;
1130 for(i = run_start; (i < nb_args) && (run_end == nb_args); i++) {
1131 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1132 switch (mode) {
1133 case x86_64_mode_memory:
1134 case x86_64_mode_x87:
1135 stack_arg:
1136 if (align == 16)
1137 run_end = i;
1138 else
1139 stack_adjust += size;
1140 break;
1142 case x86_64_mode_sse:
1143 sse_reg -= reg_count;
1144 if (sse_reg + reg_count > 8) goto stack_arg;
1145 break;
1147 case x86_64_mode_integer:
1148 gen_reg -= reg_count;
1149 if (gen_reg + reg_count > REGN) goto stack_arg;
1150 break;
1151 default: break; /* nothing to be done for x86_64_mode_none */
1155 gen_reg = run_gen_reg;
1156 sse_reg = run_sse_reg;
1158 /* adjust stack to align SSE boundary */
1159 if (stack_adjust &= 15) {
1160 /* fetch cpu flag before the following sub will change the value */
1161 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1162 gv(RC_INT);
1164 stack_adjust = 16 - stack_adjust;
1165 o(0x48);
1166 oad(0xec81, stack_adjust); /* sub $xxx, %rsp */
1167 args_size += stack_adjust;
1170 for(i = run_start; i < run_end;) {
1171 /* Swap argument to top, it will possibly be changed here,
1172 and might use more temps. At the end of the loop we keep
1173 in on the stack and swap it back to its original position
1174 if it is a register. */
1175 SValue tmp = vtop[0];
1176 vtop[0] = vtop[-i];
1177 vtop[-i] = tmp;
1179 mode = classify_x86_64_arg(&vtop->type, NULL, &size, &align, &reg_count);
1181 int arg_stored = 1;
1182 switch (vtop->type.t & VT_BTYPE) {
1183 case VT_STRUCT:
1184 if (mode == x86_64_mode_sse) {
1185 if (sse_reg > 8)
1186 sse_reg -= reg_count;
1187 else
1188 arg_stored = 0;
1189 } else if (mode == x86_64_mode_integer) {
1190 if (gen_reg > REGN)
1191 gen_reg -= reg_count;
1192 else
1193 arg_stored = 0;
1196 if (arg_stored) {
1197 /* allocate the necessary size on stack */
1198 o(0x48);
1199 oad(0xec81, size); /* sub $xxx, %rsp */
1200 /* generate structure store */
1201 r = get_reg(RC_INT);
1202 orex(1, r, 0, 0x89); /* mov %rsp, r */
1203 o(0xe0 + REG_VALUE(r));
1204 vset(&vtop->type, r | VT_LVAL, 0);
1205 vswap();
1206 vstore();
1207 args_size += size;
1209 break;
1211 case VT_LDOUBLE:
1212 assert(0);
1213 break;
1215 case VT_FLOAT:
1216 case VT_DOUBLE:
1217 assert(mode == x86_64_mode_sse);
1218 if (sse_reg > 8) {
1219 --sse_reg;
1220 r = gv(RC_FLOAT);
1221 o(0x50); /* push $rax */
1222 /* movq %xmmN, (%rsp) */
1223 o(0xd60f66);
1224 o(0x04 + REG_VALUE(r)*8);
1225 o(0x24);
1226 args_size += size;
1227 } else {
1228 arg_stored = 0;
1230 break;
1232 default:
1233 assert(mode == x86_64_mode_integer);
1234 /* simple type */
1235 /* XXX: implicit cast ? */
1236 if (gen_reg > REGN) {
1237 --gen_reg;
1238 r = gv(RC_INT);
1239 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1240 args_size += size;
1241 } else {
1242 arg_stored = 0;
1244 break;
1247 /* And swap the argument back to it's original position. */
1248 tmp = vtop[0];
1249 vtop[0] = vtop[-i];
1250 vtop[-i] = tmp;
1252 if (arg_stored) {
1253 vrotb(i+1);
1254 assert((vtop->type.t == tmp.type.t) && (vtop->r == tmp.r));
1255 vpop();
1256 --nb_args;
1257 --run_end;
1258 } else {
1259 ++i;
1263 /* handle 16 byte aligned arguments at end of run */
1264 run_start = i = run_end;
1265 while (i < nb_args) {
1266 /* Rotate argument to top since it will always be popped */
1267 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1268 if (align != 16)
1269 break;
1271 vrotb(i+1);
1273 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1274 gv(RC_ST0);
1275 oad(0xec8148, size); /* sub $xxx, %rsp */
1276 o(0x7cdb); /* fstpt 0(%rsp) */
1277 g(0x24);
1278 g(0x00);
1279 args_size += size;
1280 } else {
1281 assert(mode == x86_64_mode_memory);
1283 /* allocate the necessary size on stack */
1284 o(0x48);
1285 oad(0xec81, size); /* sub $xxx, %rsp */
1286 /* generate structure store */
1287 r = get_reg(RC_INT);
1288 orex(1, r, 0, 0x89); /* mov %rsp, r */
1289 o(0xe0 + REG_VALUE(r));
1290 vset(&vtop->type, r | VT_LVAL, 0);
1291 vswap();
1292 vstore();
1293 args_size += size;
1296 vpop();
1297 --nb_args;
1301 /* XXX This should be superfluous. */
1302 save_regs(0); /* save used temporary registers */
1304 /* then, we prepare register passing arguments.
1305 Note that we cannot set RDX and RCX in this loop because gv()
1306 may break these temporary registers. Let's use R10 and R11
1307 instead of them */
1308 assert(gen_reg <= REGN);
1309 assert(sse_reg <= 8);
1310 for(i = 0; i < nb_args; i++) {
1311 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1312 /* Alter stack entry type so that gv() knows how to treat it */
1313 vtop->type = type;
1314 if (mode == x86_64_mode_sse) {
1315 if (reg_count == 2) {
1316 sse_reg -= 2;
1317 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1318 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1319 /* movaps %xmm0, %xmmN */
1320 o(0x280f);
1321 o(0xc0 + (sse_reg << 3));
1322 /* movaps %xmm1, %xmmN */
1323 o(0x280f);
1324 o(0xc1 + ((sse_reg+1) << 3));
1326 } else {
1327 assert(reg_count == 1);
1328 --sse_reg;
1329 /* Load directly to register */
1330 gv(RC_XMM0 << sse_reg);
1332 } else if (mode == x86_64_mode_integer) {
1333 /* simple type */
1334 /* XXX: implicit cast ? */
1335 gen_reg -= reg_count;
1336 r = gv(RC_INT);
1337 int d = arg_prepare_reg(gen_reg);
1338 orex(1,d,r,0x89); /* mov */
1339 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1340 if (reg_count == 2) {
1341 d = arg_prepare_reg(gen_reg+1);
1342 orex(1,d,vtop->r2,0x89); /* mov */
1343 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1346 vtop--;
1348 assert(gen_reg == 0);
1349 assert(sse_reg == 0);
1351 /* We shouldn't have many operands on the stack anymore, but the
1352 call address itself is still there, and it might be in %eax
1353 (or edx/ecx) currently, which the below writes would clobber.
1354 So evict all remaining operands here. */
1355 save_regs(0);
1357 /* Copy R10 and R11 into RDX and RCX, respectively */
1358 if (nb_reg_args > 2) {
1359 o(0xd2894c); /* mov %r10, %rdx */
1360 if (nb_reg_args > 3) {
1361 o(0xd9894c); /* mov %r11, %rcx */
1365 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1366 gcall_or_jmp(0);
1367 if (args_size)
1368 gadd_sp(args_size);
1369 vtop--;
1373 #define FUNC_PROLOG_SIZE 11
1375 static void push_arg_reg(int i) {
1376 loc -= 8;
1377 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1380 /* generate function prolog of type 't' */
1381 void gfunc_prolog(CType *func_type)
1383 X86_64_Mode mode;
1384 int i, addr, align, size, reg_count;
1385 int param_addr = 0, reg_param_index, sse_param_index;
1386 Sym *sym;
1387 CType *type;
1389 sym = func_type->ref;
1390 addr = PTR_SIZE * 2;
1391 loc = 0;
1392 ind += FUNC_PROLOG_SIZE;
1393 func_sub_sp_offset = ind;
1394 func_ret_sub = 0;
1396 if (func_type->ref->c == FUNC_ELLIPSIS) {
1397 int seen_reg_num, seen_sse_num, seen_stack_size;
1398 seen_reg_num = seen_sse_num = 0;
1399 /* frame pointer and return address */
1400 seen_stack_size = PTR_SIZE * 2;
1401 /* count the number of seen parameters */
1402 sym = func_type->ref;
1403 while ((sym = sym->next) != NULL) {
1404 type = &sym->type;
1405 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1406 switch (mode) {
1407 default:
1408 stack_arg:
1409 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1410 break;
1412 case x86_64_mode_integer:
1413 if (seen_reg_num + reg_count <= 8) {
1414 seen_reg_num += reg_count;
1415 } else {
1416 seen_reg_num = 8;
1417 goto stack_arg;
1419 break;
1421 case x86_64_mode_sse:
1422 if (seen_sse_num + reg_count <= 8) {
1423 seen_sse_num += reg_count;
1424 } else {
1425 seen_sse_num = 8;
1426 goto stack_arg;
1428 break;
1432 loc -= 16;
1433 /* movl $0x????????, -0x10(%rbp) */
1434 o(0xf045c7);
1435 gen_le32(seen_reg_num * 8);
1436 /* movl $0x????????, -0xc(%rbp) */
1437 o(0xf445c7);
1438 gen_le32(seen_sse_num * 16 + 48);
1439 /* movl $0x????????, -0x8(%rbp) */
1440 o(0xf845c7);
1441 gen_le32(seen_stack_size);
1443 /* save all register passing arguments */
1444 for (i = 0; i < 8; i++) {
1445 loc -= 16;
1446 o(0xd60f66); /* movq */
1447 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1448 /* movq $0, loc+8(%rbp) */
1449 o(0x85c748);
1450 gen_le32(loc + 8);
1451 gen_le32(0);
1453 for (i = 0; i < REGN; i++) {
1454 push_arg_reg(REGN-1-i);
1458 sym = func_type->ref;
1459 reg_param_index = 0;
1460 sse_param_index = 0;
1462 /* if the function returns a structure, then add an
1463 implicit pointer parameter */
1464 func_vt = sym->type;
1465 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1466 if (mode == x86_64_mode_memory) {
1467 push_arg_reg(reg_param_index);
1468 func_vc = loc;
1469 reg_param_index++;
1471 /* define parameters */
1472 while ((sym = sym->next) != NULL) {
1473 type = &sym->type;
1474 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1475 switch (mode) {
1476 case x86_64_mode_sse:
1477 if (sse_param_index + reg_count <= 8) {
1478 /* save arguments passed by register */
1479 loc -= reg_count * 8;
1480 param_addr = loc;
1481 for (i = 0; i < reg_count; ++i) {
1482 o(0xd60f66); /* movq */
1483 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1484 ++sse_param_index;
1486 } else {
1487 addr = (addr + align - 1) & -align;
1488 param_addr = addr;
1489 addr += size;
1490 sse_param_index += reg_count;
1492 break;
1494 case x86_64_mode_memory:
1495 case x86_64_mode_x87:
1496 addr = (addr + align - 1) & -align;
1497 param_addr = addr;
1498 addr += size;
1499 break;
1501 case x86_64_mode_integer: {
1502 if (reg_param_index + reg_count <= REGN) {
1503 /* save arguments passed by register */
1504 loc -= reg_count * 8;
1505 param_addr = loc;
1506 for (i = 0; i < reg_count; ++i) {
1507 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1508 ++reg_param_index;
1510 } else {
1511 addr = (addr + align - 1) & -align;
1512 param_addr = addr;
1513 addr += size;
1514 reg_param_index += reg_count;
1516 break;
1518 default: break; /* nothing to be done for x86_64_mode_none */
1520 sym_push(sym->v & ~SYM_FIELD, type,
1521 VT_LOCAL | VT_LVAL, param_addr);
1525 /* generate function epilog */
1526 void gfunc_epilog(void)
1528 int v, saved_ind;
1530 o(0xc9); /* leave */
1531 if (func_ret_sub == 0) {
1532 o(0xc3); /* ret */
1533 } else {
1534 o(0xc2); /* ret n */
1535 g(func_ret_sub);
1536 g(func_ret_sub >> 8);
1538 /* align local size to word & save local variables */
1539 v = (-loc + 15) & -16;
1540 saved_ind = ind;
1541 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1542 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1543 o(0xec8148); /* sub rsp, stacksize */
1544 gen_le32(v);
1545 ind = saved_ind;
1548 #endif /* not PE */
1550 /* generate a jump to a label */
1551 int gjmp(int t)
1553 return psym(0xe9, t);
1556 /* generate a jump to a fixed address */
1557 void gjmp_addr(int a)
1559 int r;
1560 r = a - ind - 2;
1561 if (r == (char)r) {
1562 g(0xeb);
1563 g(r);
1564 } else {
1565 oad(0xe9, a - ind - 5);
1569 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1570 int gtst(int inv, int t)
1572 int v, *p;
1574 v = vtop->r & VT_VALMASK;
1575 if (v == VT_CMP) {
1576 /* fast case : can jump directly since flags are set */
1577 if (vtop->c.i & 0x100)
1579 /* This was a float compare. If the parity flag is set
1580 the result was unordered. For anything except != this
1581 means false and we don't jump (anding both conditions).
1582 For != this means true (oring both).
1583 Take care about inverting the test. We need to jump
1584 to our target if the result was unordered and test wasn't NE,
1585 otherwise if unordered we don't want to jump. */
1586 vtop->c.i &= ~0x100;
1587 if (!inv == (vtop->c.i != TOK_NE))
1588 o(0x067a); /* jp +6 */
1589 else
1591 g(0x0f);
1592 t = psym(0x8a, t); /* jp t */
1595 g(0x0f);
1596 t = psym((vtop->c.i - 16) ^ inv, t);
1597 } else if (v == VT_JMP || v == VT_JMPI) {
1598 /* && or || optimization */
1599 if ((v & 1) == inv) {
1600 /* insert vtop->c jump list in t */
1601 p = &vtop->c.i;
1602 while (*p != 0)
1603 p = (int *)(cur_text_section->data + *p);
1604 *p = t;
1605 t = vtop->c.i;
1606 } else {
1607 t = gjmp(t);
1608 gsym(vtop->c.i);
1611 vtop--;
1612 return t;
1615 /* generate an integer binary operation */
1616 void gen_opi(int op)
1618 int r, fr, opc, c;
1619 int ll, uu, cc;
1621 ll = is64_type(vtop[-1].type.t);
1622 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1623 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1625 switch(op) {
1626 case '+':
1627 case TOK_ADDC1: /* add with carry generation */
1628 opc = 0;
1629 gen_op8:
1630 if (cc && (!ll || (int)vtop->c.ll == vtop->c.ll)) {
1631 /* constant case */
1632 vswap();
1633 r = gv(RC_INT);
1634 vswap();
1635 c = vtop->c.i;
1636 if (c == (char)c) {
1637 /* XXX: generate inc and dec for smaller code ? */
1638 orex(ll, r, 0, 0x83);
1639 o(0xc0 | (opc << 3) | REG_VALUE(r));
1640 g(c);
1641 } else {
1642 orex(ll, r, 0, 0x81);
1643 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1645 } else {
1646 gv2(RC_INT, RC_INT);
1647 r = vtop[-1].r;
1648 fr = vtop[0].r;
1649 orex(ll, r, fr, (opc << 3) | 0x01);
1650 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1652 vtop--;
1653 if (op >= TOK_ULT && op <= TOK_GT) {
1654 vtop->r = VT_CMP;
1655 vtop->c.i = op;
1657 break;
1658 case '-':
1659 case TOK_SUBC1: /* sub with carry generation */
1660 opc = 5;
1661 goto gen_op8;
1662 case TOK_ADDC2: /* add with carry use */
1663 opc = 2;
1664 goto gen_op8;
1665 case TOK_SUBC2: /* sub with carry use */
1666 opc = 3;
1667 goto gen_op8;
1668 case '&':
1669 opc = 4;
1670 goto gen_op8;
1671 case '^':
1672 opc = 6;
1673 goto gen_op8;
1674 case '|':
1675 opc = 1;
1676 goto gen_op8;
1677 case '*':
1678 gv2(RC_INT, RC_INT);
1679 r = vtop[-1].r;
1680 fr = vtop[0].r;
1681 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1682 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1683 vtop--;
1684 break;
1685 case TOK_SHL:
1686 opc = 4;
1687 goto gen_shift;
1688 case TOK_SHR:
1689 opc = 5;
1690 goto gen_shift;
1691 case TOK_SAR:
1692 opc = 7;
1693 gen_shift:
1694 opc = 0xc0 | (opc << 3);
1695 if (cc) {
1696 /* constant case */
1697 vswap();
1698 r = gv(RC_INT);
1699 vswap();
1700 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1701 o(opc | REG_VALUE(r));
1702 g(vtop->c.i & (ll ? 63 : 31));
1703 } else {
1704 /* we generate the shift in ecx */
1705 gv2(RC_INT, RC_RCX);
1706 r = vtop[-1].r;
1707 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1708 o(opc | REG_VALUE(r));
1710 vtop--;
1711 break;
1712 case TOK_UDIV:
1713 case TOK_UMOD:
1714 uu = 1;
1715 goto divmod;
1716 case '/':
1717 case '%':
1718 case TOK_PDIV:
1719 uu = 0;
1720 divmod:
1721 /* first operand must be in eax */
1722 /* XXX: need better constraint for second operand */
1723 gv2(RC_RAX, RC_RCX);
1724 r = vtop[-1].r;
1725 fr = vtop[0].r;
1726 vtop--;
1727 save_reg(TREG_RDX);
1728 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1729 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1730 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1731 if (op == '%' || op == TOK_UMOD)
1732 r = TREG_RDX;
1733 else
1734 r = TREG_RAX;
1735 vtop->r = r;
1736 break;
1737 default:
1738 opc = 7;
1739 goto gen_op8;
1743 void gen_opl(int op)
1745 gen_opi(op);
1748 /* generate a floating point operation 'v = t1 op t2' instruction. The
1749 two operands are guaranted to have the same floating point type */
1750 /* XXX: need to use ST1 too */
1751 void gen_opf(int op)
1753 int a, ft, fc, swapped, r;
1754 int float_type =
1755 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1757 /* convert constants to memory references */
1758 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1759 vswap();
1760 gv(float_type);
1761 vswap();
1763 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1764 gv(float_type);
1766 /* must put at least one value in the floating point register */
1767 if ((vtop[-1].r & VT_LVAL) &&
1768 (vtop[0].r & VT_LVAL)) {
1769 vswap();
1770 gv(float_type);
1771 vswap();
1773 swapped = 0;
1774 /* swap the stack if needed so that t1 is the register and t2 is
1775 the memory reference */
1776 if (vtop[-1].r & VT_LVAL) {
1777 vswap();
1778 swapped = 1;
1780 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1781 if (op >= TOK_ULT && op <= TOK_GT) {
1782 /* load on stack second operand */
1783 load(TREG_ST0, vtop);
1784 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1785 if (op == TOK_GE || op == TOK_GT)
1786 swapped = !swapped;
1787 else if (op == TOK_EQ || op == TOK_NE)
1788 swapped = 0;
1789 if (swapped)
1790 o(0xc9d9); /* fxch %st(1) */
1791 if (op == TOK_EQ || op == TOK_NE)
1792 o(0xe9da); /* fucompp */
1793 else
1794 o(0xd9de); /* fcompp */
1795 o(0xe0df); /* fnstsw %ax */
1796 if (op == TOK_EQ) {
1797 o(0x45e480); /* and $0x45, %ah */
1798 o(0x40fC80); /* cmp $0x40, %ah */
1799 } else if (op == TOK_NE) {
1800 o(0x45e480); /* and $0x45, %ah */
1801 o(0x40f480); /* xor $0x40, %ah */
1802 op = TOK_NE;
1803 } else if (op == TOK_GE || op == TOK_LE) {
1804 o(0x05c4f6); /* test $0x05, %ah */
1805 op = TOK_EQ;
1806 } else {
1807 o(0x45c4f6); /* test $0x45, %ah */
1808 op = TOK_EQ;
1810 vtop--;
1811 vtop->r = VT_CMP;
1812 vtop->c.i = op;
1813 } else {
1814 /* no memory reference possible for long double operations */
1815 load(TREG_ST0, vtop);
1816 swapped = !swapped;
1818 switch(op) {
1819 default:
1820 case '+':
1821 a = 0;
1822 break;
1823 case '-':
1824 a = 4;
1825 if (swapped)
1826 a++;
1827 break;
1828 case '*':
1829 a = 1;
1830 break;
1831 case '/':
1832 a = 6;
1833 if (swapped)
1834 a++;
1835 break;
1837 ft = vtop->type.t;
1838 fc = vtop->c.ul;
1839 o(0xde); /* fxxxp %st, %st(1) */
1840 o(0xc1 + (a << 3));
1841 vtop--;
1843 } else {
1844 if (op >= TOK_ULT && op <= TOK_GT) {
1845 /* if saved lvalue, then we must reload it */
1846 r = vtop->r;
1847 fc = vtop->c.ul;
1848 if ((r & VT_VALMASK) == VT_LLOCAL) {
1849 SValue v1;
1850 r = get_reg(RC_INT);
1851 v1.type.t = VT_PTR;
1852 v1.r = VT_LOCAL | VT_LVAL;
1853 v1.c.ul = fc;
1854 load(r, &v1);
1855 fc = 0;
1858 if (op == TOK_EQ || op == TOK_NE) {
1859 swapped = 0;
1860 } else {
1861 if (op == TOK_LE || op == TOK_LT)
1862 swapped = !swapped;
1863 if (op == TOK_LE || op == TOK_GE) {
1864 op = 0x93; /* setae */
1865 } else {
1866 op = 0x97; /* seta */
1870 if (swapped) {
1871 gv(RC_FLOAT);
1872 vswap();
1874 assert(!(vtop[-1].r & VT_LVAL));
1876 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1877 o(0x66);
1878 if (op == TOK_EQ || op == TOK_NE)
1879 o(0x2e0f); /* ucomisd */
1880 else
1881 o(0x2f0f); /* comisd */
1883 if (vtop->r & VT_LVAL) {
1884 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1885 } else {
1886 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1889 vtop--;
1890 vtop->r = VT_CMP;
1891 vtop->c.i = op | 0x100;
1892 } else {
1893 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1894 switch(op) {
1895 default:
1896 case '+':
1897 a = 0;
1898 break;
1899 case '-':
1900 a = 4;
1901 break;
1902 case '*':
1903 a = 1;
1904 break;
1905 case '/':
1906 a = 6;
1907 break;
1909 ft = vtop->type.t;
1910 fc = vtop->c.ul;
1911 assert((ft & VT_BTYPE) != VT_LDOUBLE);
1913 r = vtop->r;
1914 /* if saved lvalue, then we must reload it */
1915 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
1916 SValue v1;
1917 r = get_reg(RC_INT);
1918 v1.type.t = VT_PTR;
1919 v1.r = VT_LOCAL | VT_LVAL;
1920 v1.c.ul = fc;
1921 load(r, &v1);
1922 fc = 0;
1925 assert(!(vtop[-1].r & VT_LVAL));
1926 if (swapped) {
1927 assert(vtop->r & VT_LVAL);
1928 gv(RC_FLOAT);
1929 vswap();
1932 if ((ft & VT_BTYPE) == VT_DOUBLE) {
1933 o(0xf2);
1934 } else {
1935 o(0xf3);
1937 o(0x0f);
1938 o(0x58 + a);
1940 if (vtop->r & VT_LVAL) {
1941 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1942 } else {
1943 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1946 vtop--;
1951 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1952 and 'long long' cases. */
1953 void gen_cvt_itof(int t)
1955 if ((t & VT_BTYPE) == VT_LDOUBLE) {
1956 save_reg(TREG_ST0);
1957 gv(RC_INT);
1958 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
1959 /* signed long long to float/double/long double (unsigned case
1960 is handled generically) */
1961 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1962 o(0x242cdf); /* fildll (%rsp) */
1963 o(0x08c48348); /* add $8, %rsp */
1964 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
1965 (VT_INT | VT_UNSIGNED)) {
1966 /* unsigned int to float/double/long double */
1967 o(0x6a); /* push $0 */
1968 g(0x00);
1969 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1970 o(0x242cdf); /* fildll (%rsp) */
1971 o(0x10c48348); /* add $16, %rsp */
1972 } else {
1973 /* int to float/double/long double */
1974 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1975 o(0x2404db); /* fildl (%rsp) */
1976 o(0x08c48348); /* add $8, %rsp */
1978 vtop->r = TREG_ST0;
1979 } else {
1980 int r = get_reg(RC_FLOAT);
1981 gv(RC_INT);
1982 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
1983 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
1984 (VT_INT | VT_UNSIGNED) ||
1985 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
1986 o(0x48); /* REX */
1988 o(0x2a0f);
1989 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
1990 vtop->r = r;
1994 /* convert from one floating point type to another */
1995 void gen_cvt_ftof(int t)
1997 int ft, bt, tbt;
1999 ft = vtop->type.t;
2000 bt = ft & VT_BTYPE;
2001 tbt = t & VT_BTYPE;
2003 if (bt == VT_FLOAT) {
2004 gv(RC_FLOAT);
2005 if (tbt == VT_DOUBLE) {
2006 o(0x140f); /* unpcklps */
2007 o(0xc0 + REG_VALUE(vtop->r)*9);
2008 o(0x5a0f); /* cvtps2pd */
2009 o(0xc0 + REG_VALUE(vtop->r)*9);
2010 } else if (tbt == VT_LDOUBLE) {
2011 save_reg(RC_ST0);
2012 /* movss %xmm0,-0x10(%rsp) */
2013 o(0x110ff3);
2014 o(0x44 + REG_VALUE(vtop->r)*8);
2015 o(0xf024);
2016 o(0xf02444d9); /* flds -0x10(%rsp) */
2017 vtop->r = TREG_ST0;
2019 } else if (bt == VT_DOUBLE) {
2020 gv(RC_FLOAT);
2021 if (tbt == VT_FLOAT) {
2022 o(0x140f66); /* unpcklpd */
2023 o(0xc0 + REG_VALUE(vtop->r)*9);
2024 o(0x5a0f66); /* cvtpd2ps */
2025 o(0xc0 + REG_VALUE(vtop->r)*9);
2026 } else if (tbt == VT_LDOUBLE) {
2027 save_reg(RC_ST0);
2028 /* movsd %xmm0,-0x10(%rsp) */
2029 o(0x110ff2);
2030 o(0x44 + REG_VALUE(vtop->r)*8);
2031 o(0xf024);
2032 o(0xf02444dd); /* fldl -0x10(%rsp) */
2033 vtop->r = TREG_ST0;
2035 } else {
2036 int r;
2037 gv(RC_ST0);
2038 r = get_reg(RC_FLOAT);
2039 if (tbt == VT_DOUBLE) {
2040 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2041 /* movsd -0x10(%rsp),%xmm0 */
2042 o(0x100ff2);
2043 o(0x44 + REG_VALUE(r)*8);
2044 o(0xf024);
2045 vtop->r = r;
2046 } else if (tbt == VT_FLOAT) {
2047 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2048 /* movss -0x10(%rsp),%xmm0 */
2049 o(0x100ff3);
2050 o(0x44 + REG_VALUE(r)*8);
2051 o(0xf024);
2052 vtop->r = r;
2057 /* convert fp to int 't' type */
2058 void gen_cvt_ftoi(int t)
2060 int ft, bt, size, r;
2061 ft = vtop->type.t;
2062 bt = ft & VT_BTYPE;
2063 if (bt == VT_LDOUBLE) {
2064 gen_cvt_ftof(VT_DOUBLE);
2065 bt = VT_DOUBLE;
2068 gv(RC_FLOAT);
2069 if (t != VT_INT)
2070 size = 8;
2071 else
2072 size = 4;
2074 r = get_reg(RC_INT);
2075 if (bt == VT_FLOAT) {
2076 o(0xf3);
2077 } else if (bt == VT_DOUBLE) {
2078 o(0xf2);
2079 } else {
2080 assert(0);
2082 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2083 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2084 vtop->r = r;
2087 /* computed goto support */
2088 void ggoto(void)
2090 gcall_or_jmp(1);
2091 vtop--;
2094 /* Save the stack pointer onto the stack and return the location of its address */
2095 ST_FUNC void gen_vla_sp_save(int addr) {
2096 /* mov %rsp,addr(%rbp)*/
2097 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2100 /* Restore the SP from a location on the stack */
2101 ST_FUNC void gen_vla_sp_restore(int addr) {
2102 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2105 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2106 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2107 #ifdef TCC_TARGET_PE
2108 /* alloca does more than just adjust %rsp on Windows */
2109 vpush_global_sym(&func_old_type, TOK_alloca);
2110 vswap(); /* Move alloca ref past allocation size */
2111 gfunc_call(1);
2112 vset(type, REG_IRET, 0);
2113 #else
2114 int r;
2115 r = gv(RC_INT); /* allocation size */
2116 /* sub r,%rsp */
2117 o(0x2b48);
2118 o(0xe0 | REG_VALUE(r));
2119 /* We align to 16 bytes rather than align */
2120 /* and ~15, %rsp */
2121 o(0xf0e48348);
2122 /* mov %rsp, r */
2123 o(0x8948);
2124 o(0xe0 | REG_VALUE(r));
2125 vpop();
2126 vset(type, r, 0);
2127 #endif
2131 /* end of x86-64 code generator */
2132 /*************************************************************/
2133 #endif /* ! TARGET_DEFS_ONLY */
2134 /******************************************************/