Fix commit 0f5942c6b382105075dabb6f975a313efc63a5f9
[tinycc.git] / x86_64-gen.c
blob690236e551db7e82bd9838e3bfa92ea39cb03a1c
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 8
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
34 #define RC_RAX 0x0004
35 #define RC_RCX 0x0008
36 #define RC_RDX 0x0010
37 #define RC_ST0 0x0080 /* only for long double */
38 #define RC_R8 0x0100
39 #define RC_R9 0x0200
40 #define RC_R10 0x0400
41 #define RC_R11 0x0800
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
56 enum {
57 TREG_RAX = 0,
58 TREG_RCX = 1,
59 TREG_RDX = 2,
60 TREG_RSP = 4,
61 TREG_RSI = 6,
62 TREG_RDI = 7,
64 TREG_R8 = 8,
65 TREG_R9 = 9,
66 TREG_R10 = 10,
67 TREG_R11 = 11,
69 TREG_XMM0 = 16,
70 TREG_XMM1 = 17,
71 TREG_XMM2 = 18,
72 TREG_XMM3 = 19,
73 TREG_XMM4 = 20,
74 TREG_XMM5 = 21,
75 TREG_XMM6 = 22,
76 TREG_XMM7 = 23,
78 TREG_ST0 = 24,
80 TREG_MEM = 0x20,
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
96 #define PTR_SIZE 8
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
102 #define MAX_ALIGN 16
104 /******************************************************/
105 /* ELF defines */
107 #define EM_TCC_TARGET EM_X86_64
109 /* relocation type for 32 bit data relocation */
110 #define R_DATA_32 R_X86_64_32
111 #define R_DATA_PTR R_X86_64_64
112 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
113 #define R_COPY R_X86_64_COPY
115 #define ELF_START_ADDR 0x08048000
116 #define ELF_PAGE_SIZE 0x1000
118 /******************************************************/
119 #else /* ! TARGET_DEFS_ONLY */
120 /******************************************************/
121 #include "tcc.h"
122 #include <assert.h>
124 ST_DATA const int reg_classes[NB_REGS] = {
125 /* eax */ RC_INT | RC_RAX,
126 /* ecx */ RC_INT | RC_RCX,
127 /* edx */ RC_INT | RC_RDX,
133 RC_R8,
134 RC_R9,
135 RC_R10,
136 RC_R11,
141 /* xmm0 */ RC_FLOAT | RC_XMM0,
142 /* xmm1 */ RC_FLOAT | RC_XMM1,
143 /* xmm2 */ RC_FLOAT | RC_XMM2,
144 /* xmm3 */ RC_FLOAT | RC_XMM3,
145 /* xmm4 */ RC_FLOAT | RC_XMM4,
146 /* xmm5 */ RC_FLOAT | RC_XMM5,
147 /* xmm6 an xmm7 are included so gv() can be used on them,
148 but they are not tagged with RC_FLOAT because they are
149 callee saved on Windows */
150 RC_XMM6,
151 RC_XMM7,
152 /* st0 */ RC_ST0
155 static unsigned long func_sub_sp_offset;
156 static int func_ret_sub;
158 /* XXX: make it faster ? */
159 void g(int c)
161 int ind1;
162 ind1 = ind + 1;
163 if (ind1 > cur_text_section->data_allocated)
164 section_realloc(cur_text_section, ind1);
165 cur_text_section->data[ind] = c;
166 ind = ind1;
169 void o(unsigned int c)
171 while (c) {
172 g(c);
173 c = c >> 8;
177 void gen_le16(int v)
179 g(v);
180 g(v >> 8);
183 void gen_le32(int c)
185 g(c);
186 g(c >> 8);
187 g(c >> 16);
188 g(c >> 24);
191 void gen_le64(int64_t c)
193 g(c);
194 g(c >> 8);
195 g(c >> 16);
196 g(c >> 24);
197 g(c >> 32);
198 g(c >> 40);
199 g(c >> 48);
200 g(c >> 56);
203 void orex(int ll, int r, int r2, int b)
205 if ((r & VT_VALMASK) >= VT_CONST)
206 r = 0;
207 if ((r2 & VT_VALMASK) >= VT_CONST)
208 r2 = 0;
209 if (ll || REX_BASE(r) || REX_BASE(r2))
210 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
211 o(b);
214 /* output a symbol and patch all calls to it */
215 void gsym_addr(int t, int a)
217 int n, *ptr;
218 while (t) {
219 ptr = (int *)(cur_text_section->data + t);
220 n = *ptr; /* next value */
221 *ptr = a - t - 4;
222 t = n;
226 void gsym(int t)
228 gsym_addr(t, ind);
231 /* psym is used to put an instruction with a data field which is a
232 reference to a symbol. It is in fact the same as oad ! */
233 #define psym oad
235 static int is64_type(int t)
237 return ((t & VT_BTYPE) == VT_PTR ||
238 (t & VT_BTYPE) == VT_FUNC ||
239 (t & VT_BTYPE) == VT_LLONG);
242 /* instruction + 4 bytes data. Return the address of the data */
243 ST_FUNC int oad(int c, int s)
245 int ind1;
247 o(c);
248 ind1 = ind + 4;
249 if (ind1 > cur_text_section->data_allocated)
250 section_realloc(cur_text_section, ind1);
251 *(int *)(cur_text_section->data + ind) = s;
252 s = ind;
253 ind = ind1;
254 return s;
257 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
259 if (r & VT_SYM)
260 greloc(cur_text_section, sym, ind, R_X86_64_32);
261 gen_le32(c);
264 /* output constant with relocation if 'r & VT_SYM' is true */
265 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
267 if (r & VT_SYM)
268 greloc(cur_text_section, sym, ind, R_X86_64_64);
269 gen_le64(c);
272 /* output constant with relocation if 'r & VT_SYM' is true */
273 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
275 if (r & VT_SYM)
276 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
277 gen_le32(c-4);
280 /* output got address with relocation */
281 static void gen_gotpcrel(int r, Sym *sym, int c)
283 #ifndef TCC_TARGET_PE
284 Section *sr;
285 ElfW(Rela) *rel;
286 greloc(cur_text_section, sym, ind, R_X86_64_GOTPCREL);
287 sr = cur_text_section->reloc;
288 rel = (ElfW(Rela) *)(sr->data + sr->data_offset - sizeof(ElfW(Rela)));
289 rel->r_addend = -4;
290 #else
291 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym->v, NULL), c, r,
292 cur_text_section->data[ind-3],
293 cur_text_section->data[ind-2],
294 cur_text_section->data[ind-1]
296 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
297 #endif
298 gen_le32(0);
299 if (c) {
300 /* we use add c, %xxx for displacement */
301 orex(1, r, 0, 0x81);
302 o(0xc0 + REG_VALUE(r));
303 gen_le32(c);
307 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
309 op_reg = REG_VALUE(op_reg) << 3;
310 if ((r & VT_VALMASK) == VT_CONST) {
311 /* constant memory reference */
312 o(0x05 | op_reg);
313 if (is_got) {
314 gen_gotpcrel(r, sym, c);
315 } else {
316 gen_addrpc32(r, sym, c);
318 } else if ((r & VT_VALMASK) == VT_LOCAL) {
319 /* currently, we use only ebp as base */
320 if (c == (char)c) {
321 /* short reference */
322 o(0x45 | op_reg);
323 g(c);
324 } else {
325 oad(0x85 | op_reg, c);
327 } else if ((r & VT_VALMASK) >= TREG_MEM) {
328 if (c) {
329 g(0x80 | op_reg | REG_VALUE(r));
330 gen_le32(c);
331 } else {
332 g(0x00 | op_reg | REG_VALUE(r));
334 } else {
335 g(0x00 | op_reg | REG_VALUE(r));
339 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
340 opcode bits */
341 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
343 gen_modrm_impl(op_reg, r, sym, c, 0);
346 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
347 opcode bits */
348 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
350 int is_got;
351 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
352 orex(1, r, op_reg, opcode);
353 gen_modrm_impl(op_reg, r, sym, c, is_got);
357 /* load 'r' from value 'sv' */
358 void load(int r, SValue *sv)
360 int v, t, ft, fc, fr;
361 SValue v1;
363 #ifdef TCC_TARGET_PE
364 SValue v2;
365 sv = pe_getimport(sv, &v2);
366 #endif
368 fr = sv->r;
369 ft = sv->type.t;
370 fc = sv->c.ul;
372 #ifndef TCC_TARGET_PE
373 /* we use indirect access via got */
374 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
375 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
376 /* use the result register as a temporal register */
377 int tr = r | TREG_MEM;
378 if (is_float(ft)) {
379 /* we cannot use float registers as a temporal register */
380 tr = get_reg(RC_INT) | TREG_MEM;
382 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
384 /* load from the temporal register */
385 fr = tr | VT_LVAL;
387 #endif
389 v = fr & VT_VALMASK;
390 if (fr & VT_LVAL) {
391 int b, ll;
392 if (v == VT_LLOCAL) {
393 v1.type.t = VT_PTR;
394 v1.r = VT_LOCAL | VT_LVAL;
395 v1.c.ul = fc;
396 fr = r;
397 if (!(reg_classes[fr] & RC_INT))
398 fr = get_reg(RC_INT);
399 load(fr, &v1);
401 ll = 0;
402 if ((ft & VT_BTYPE) == VT_FLOAT) {
403 b = 0x6e0f66;
404 r = REG_VALUE(r); /* movd */
405 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
406 b = 0x7e0ff3; /* movq */
407 r = REG_VALUE(r);
408 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
409 b = 0xdb, r = 5; /* fldt */
410 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
411 b = 0xbe0f; /* movsbl */
412 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
413 b = 0xb60f; /* movzbl */
414 } else if ((ft & VT_TYPE) == VT_SHORT) {
415 b = 0xbf0f; /* movswl */
416 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
417 b = 0xb70f; /* movzwl */
418 } else {
419 assert(((ft & VT_BTYPE) == VT_INT) || ((ft & VT_BTYPE) == VT_LLONG)
420 || ((ft & VT_BTYPE) == VT_PTR) || ((ft & VT_BTYPE) == VT_ENUM)
421 || ((ft & VT_BTYPE) == VT_FUNC));
422 ll = is64_type(ft);
423 b = 0x8b;
425 if (ll) {
426 gen_modrm64(b, r, fr, sv->sym, fc);
427 } else {
428 orex(ll, fr, r, b);
429 gen_modrm(r, fr, sv->sym, fc);
431 } else {
432 if (v == VT_CONST) {
433 if (fr & VT_SYM) {
434 #ifdef TCC_TARGET_PE
435 orex(1,0,r,0x8d);
436 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
437 gen_addrpc32(fr, sv->sym, fc);
438 #else
439 if (sv->sym->type.t & VT_STATIC) {
440 orex(1,0,r,0x8d);
441 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
442 gen_addrpc32(fr, sv->sym, fc);
443 } else {
444 orex(1,0,r,0x8b);
445 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
446 gen_gotpcrel(r, sv->sym, fc);
448 #endif
449 } else if (is64_type(ft)) {
450 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
451 gen_le64(sv->c.ull);
452 } else {
453 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
454 gen_le32(fc);
456 } else if (v == VT_LOCAL) {
457 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
458 gen_modrm(r, VT_LOCAL, sv->sym, fc);
459 } else if (v == VT_CMP) {
460 orex(0,r,0,0);
461 if ((fc & ~0x100) != TOK_NE)
462 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
463 else
464 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
465 if (fc & 0x100)
467 /* This was a float compare. If the parity bit is
468 set the result was unordered, meaning false for everything
469 except TOK_NE, and true for TOK_NE. */
470 fc &= ~0x100;
471 o(0x037a + (REX_BASE(r) << 8));
473 orex(0,r,0, 0x0f); /* setxx %br */
474 o(fc);
475 o(0xc0 + REG_VALUE(r));
476 } else if (v == VT_JMP || v == VT_JMPI) {
477 t = v & 1;
478 orex(0,r,0,0);
479 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
480 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
481 gsym(fc);
482 orex(0,r,0,0);
483 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
484 } else if (v != r) {
485 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
486 if (v == TREG_ST0) {
487 /* gen_cvt_ftof(VT_DOUBLE); */
488 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
489 /* movsd -0x10(%rsp),%xmmN */
490 o(0x100ff2);
491 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
492 o(0xf024);
493 } else {
494 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
495 if ((ft & VT_BTYPE) == VT_FLOAT) {
496 o(0x100ff3);
497 } else {
498 assert((ft & VT_BTYPE) == VT_DOUBLE);
499 o(0x100ff2);
501 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
503 } else if (r == TREG_ST0) {
504 assert((v >= TREG_XMM0) || (v <= TREG_XMM7));
505 /* gen_cvt_ftof(VT_LDOUBLE); */
506 /* movsd %xmmN,-0x10(%rsp) */
507 o(0x110ff2);
508 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
509 o(0xf024);
510 o(0xf02444dd); /* fldl -0x10(%rsp) */
511 } else {
512 orex(1,r,v, 0x89);
513 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
519 /* store register 'r' in lvalue 'v' */
520 void store(int r, SValue *v)
522 int fr, bt, ft, fc;
523 int op64 = 0;
524 /* store the REX prefix in this variable when PIC is enabled */
525 int pic = 0;
527 #ifdef TCC_TARGET_PE
528 SValue v2;
529 v = pe_getimport(v, &v2);
530 #endif
532 ft = v->type.t;
533 fc = v->c.ul;
534 fr = v->r & VT_VALMASK;
535 bt = ft & VT_BTYPE;
537 #ifndef TCC_TARGET_PE
538 /* we need to access the variable via got */
539 if (fr == VT_CONST && (v->r & VT_SYM)) {
540 /* mov xx(%rip), %r11 */
541 o(0x1d8b4c);
542 gen_gotpcrel(TREG_R11, v->sym, v->c.ul);
543 pic = is64_type(bt) ? 0x49 : 0x41;
545 #endif
547 /* XXX: incorrect if float reg to reg */
548 if (bt == VT_FLOAT) {
549 o(0x66);
550 o(pic);
551 o(0x7e0f); /* movd */
552 r = REG_VALUE(r);
553 } else if (bt == VT_DOUBLE) {
554 o(0x66);
555 o(pic);
556 o(0xd60f); /* movq */
557 r = REG_VALUE(r);
558 } else if (bt == VT_LDOUBLE) {
559 o(0xc0d9); /* fld %st(0) */
560 o(pic);
561 o(0xdb); /* fstpt */
562 r = 7;
563 } else {
564 if (bt == VT_SHORT)
565 o(0x66);
566 o(pic);
567 if (bt == VT_BYTE || bt == VT_BOOL)
568 orex(0, 0, r, 0x88);
569 else if (is64_type(bt))
570 op64 = 0x89;
571 else
572 orex(0, 0, r, 0x89);
574 if (pic) {
575 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
576 if (op64)
577 o(op64);
578 o(3 + (r << 3));
579 } else if (op64) {
580 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
581 gen_modrm64(op64, r, v->r, v->sym, fc);
582 } else if (fr != r) {
583 /* XXX: don't we really come here? */
584 abort();
585 o(0xc0 + fr + r * 8); /* mov r, fr */
587 } else {
588 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
589 gen_modrm(r, v->r, v->sym, fc);
590 } else if (fr != r) {
591 /* XXX: don't we really come here? */
592 abort();
593 o(0xc0 + fr + r * 8); /* mov r, fr */
598 /* 'is_jmp' is '1' if it is a jump */
599 static void gcall_or_jmp(int is_jmp)
601 int r;
602 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
603 /* constant case */
604 if (vtop->r & VT_SYM) {
605 /* relocation case */
606 greloc(cur_text_section, vtop->sym,
607 ind + 1, R_X86_64_PC32);
608 } else {
609 /* put an empty PC32 relocation */
610 put_elf_reloc(symtab_section, cur_text_section,
611 ind + 1, R_X86_64_PC32, 0);
613 oad(0xe8 + is_jmp, vtop->c.ul - 4); /* call/jmp im */
614 } else {
615 /* otherwise, indirect call */
616 r = TREG_R11;
617 load(r, vtop);
618 o(0x41); /* REX */
619 o(0xff); /* call/jmp *r */
620 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
624 #ifdef TCC_TARGET_PE
626 #define REGN 4
627 static const uint8_t arg_regs[REGN] = {
628 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
631 /* Prepare arguments in R10 and R11 rather than RCX and RDX
632 because gv() will not ever use these */
633 static int arg_prepare_reg(int idx) {
634 if (idx == 0 || idx == 1)
635 /* idx=0: r10, idx=1: r11 */
636 return idx + 10;
637 else
638 return arg_regs[idx];
641 static int func_scratch;
643 /* Generate function call. The function address is pushed first, then
644 all the parameters in call order. This functions pops all the
645 parameters and the function address. */
647 void gen_offs_sp(int b, int r, int d)
649 orex(1,0,r & 0x100 ? 0 : r, b);
650 if (d == (char)d) {
651 o(0x2444 | (REG_VALUE(r) << 3));
652 g(d);
653 } else {
654 o(0x2484 | (REG_VALUE(r) << 3));
655 gen_le32(d);
659 /* Return 1 if this function returns via an sret pointer, 0 otherwise */
660 ST_FUNC int gfunc_sret(CType *vt, CType *ret, int *ret_align)
662 int size, align;
663 *ret_align = 1; // Never have to re-align return values for x86-64
664 size = type_size(vt, &align);
665 ret->ref = NULL;
666 if (size > 8) {
667 return 1;
668 } else if (size > 4) {
669 ret->t = VT_LLONG;
670 return 0;
671 } else if (size > 2) {
672 ret->t = VT_INT;
673 return 0;
674 } else if (size > 1) {
675 ret->t = VT_SHORT;
676 return 0;
677 } else {
678 ret->t = VT_BYTE;
679 return 0;
683 static int is_sse_float(int t) {
684 int bt;
685 bt = t & VT_BTYPE;
686 return bt == VT_DOUBLE || bt == VT_FLOAT;
689 int gfunc_arg_size(CType *type) {
690 int align;
691 if (type->t & (VT_ARRAY|VT_BITFIELD))
692 return 8;
693 return type_size(type, &align);
696 void gfunc_call(int nb_args)
698 int size, r, args_size, i, d, bt, struct_size;
699 int arg;
701 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
702 arg = nb_args;
704 /* for struct arguments, we need to call memcpy and the function
705 call breaks register passing arguments we are preparing.
706 So, we process arguments which will be passed by stack first. */
707 struct_size = args_size;
708 for(i = 0; i < nb_args; i++) {
709 SValue *sv;
711 --arg;
712 sv = &vtop[-i];
713 bt = (sv->type.t & VT_BTYPE);
714 size = gfunc_arg_size(&sv->type);
716 if (size <= 8)
717 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
719 if (bt == VT_STRUCT) {
720 /* align to stack align size */
721 size = (size + 15) & ~15;
722 /* generate structure store */
723 r = get_reg(RC_INT);
724 gen_offs_sp(0x8d, r, struct_size);
725 struct_size += size;
727 /* generate memcpy call */
728 vset(&sv->type, r | VT_LVAL, 0);
729 vpushv(sv);
730 vstore();
731 --vtop;
732 } else if (bt == VT_LDOUBLE) {
733 gv(RC_ST0);
734 gen_offs_sp(0xdb, 0x107, struct_size);
735 struct_size += 16;
739 if (func_scratch < struct_size)
740 func_scratch = struct_size;
742 arg = nb_args;
743 struct_size = args_size;
745 for(i = 0; i < nb_args; i++) {
746 --arg;
747 bt = (vtop->type.t & VT_BTYPE);
749 size = gfunc_arg_size(&vtop->type);
750 if (size > 8) {
751 /* align to stack align size */
752 size = (size + 15) & ~15;
753 if (arg >= REGN) {
754 d = get_reg(RC_INT);
755 gen_offs_sp(0x8d, d, struct_size);
756 gen_offs_sp(0x89, d, arg*8);
757 } else {
758 d = arg_prepare_reg(arg);
759 gen_offs_sp(0x8d, d, struct_size);
761 struct_size += size;
762 } else {
763 if (is_sse_float(vtop->type.t)) {
764 gv(RC_XMM0); /* only use one float register */
765 if (arg >= REGN) {
766 /* movq %xmm0, j*8(%rsp) */
767 gen_offs_sp(0xd60f66, 0x100, arg*8);
768 } else {
769 /* movaps %xmm0, %xmmN */
770 o(0x280f);
771 o(0xc0 + (arg << 3));
772 d = arg_prepare_reg(arg);
773 /* mov %xmm0, %rxx */
774 o(0x66);
775 orex(1,d,0, 0x7e0f);
776 o(0xc0 + REG_VALUE(d));
778 } else {
779 if (bt == VT_STRUCT) {
780 vtop->type.ref = NULL;
781 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
782 : size > 1 ? VT_SHORT : VT_BYTE;
785 r = gv(RC_INT);
786 if (arg >= REGN) {
787 gen_offs_sp(0x89, r, arg*8);
788 } else {
789 d = arg_prepare_reg(arg);
790 orex(1,d,r,0x89); /* mov */
791 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
795 vtop--;
797 save_regs(0);
799 /* Copy R10 and R11 into RCX and RDX, respectively */
800 if (nb_args > 0) {
801 o(0xd1894c); /* mov %r10, %rcx */
802 if (nb_args > 1) {
803 o(0xda894c); /* mov %r11, %rdx */
807 gcall_or_jmp(0);
808 vtop--;
812 #define FUNC_PROLOG_SIZE 11
814 /* generate function prolog of type 't' */
815 void gfunc_prolog(CType *func_type)
817 int addr, reg_param_index, bt, size;
818 Sym *sym;
819 CType *type;
821 func_ret_sub = 0;
822 func_scratch = 0;
823 loc = 0;
825 addr = PTR_SIZE * 2;
826 ind += FUNC_PROLOG_SIZE;
827 func_sub_sp_offset = ind;
828 reg_param_index = 0;
830 sym = func_type->ref;
832 /* if the function returns a structure, then add an
833 implicit pointer parameter */
834 func_vt = sym->type;
835 size = gfunc_arg_size(&func_vt);
836 if (size > 8) {
837 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
838 func_vc = addr;
839 reg_param_index++;
840 addr += 8;
843 /* define parameters */
844 while ((sym = sym->next) != NULL) {
845 type = &sym->type;
846 bt = type->t & VT_BTYPE;
847 size = gfunc_arg_size(type);
848 if (size > 8) {
849 if (reg_param_index < REGN) {
850 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
852 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL | VT_REF, addr);
853 } else {
854 if (reg_param_index < REGN) {
855 /* save arguments passed by register */
856 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
857 o(0xd60f66); /* movq */
858 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
859 } else {
860 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
863 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
865 addr += 8;
866 reg_param_index++;
869 while (reg_param_index < REGN) {
870 if (func_type->ref->c == FUNC_ELLIPSIS) {
871 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
872 addr += 8;
874 reg_param_index++;
878 /* generate function epilog */
879 void gfunc_epilog(void)
881 int v, saved_ind;
883 o(0xc9); /* leave */
884 if (func_ret_sub == 0) {
885 o(0xc3); /* ret */
886 } else {
887 o(0xc2); /* ret n */
888 g(func_ret_sub);
889 g(func_ret_sub >> 8);
892 saved_ind = ind;
893 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
894 /* align local size to word & save local variables */
895 v = (func_scratch + -loc + 15) & -16;
897 if (v >= 4096) {
898 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
899 oad(0xb8, v); /* mov stacksize, %eax */
900 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
901 greloc(cur_text_section, sym, ind-4, R_X86_64_PC32);
902 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
903 } else {
904 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
905 o(0xec8148); /* sub rsp, stacksize */
906 gen_le32(v);
909 cur_text_section->data_offset = saved_ind;
910 pe_add_unwind_data(ind, saved_ind, v);
911 ind = cur_text_section->data_offset;
914 #else
916 static void gadd_sp(int val)
918 if (val == (char)val) {
919 o(0xc48348);
920 g(val);
921 } else {
922 oad(0xc48148, val); /* add $xxx, %rsp */
926 typedef enum X86_64_Mode {
927 x86_64_mode_none,
928 x86_64_mode_memory,
929 x86_64_mode_integer,
930 x86_64_mode_sse,
931 x86_64_mode_x87
932 } X86_64_Mode;
934 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b) {
935 if (a == b)
936 return a;
937 else if (a == x86_64_mode_none)
938 return b;
939 else if (b == x86_64_mode_none)
940 return a;
941 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
942 return x86_64_mode_memory;
943 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
944 return x86_64_mode_integer;
945 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
946 return x86_64_mode_memory;
947 else
948 return x86_64_mode_sse;
951 static X86_64_Mode classify_x86_64_inner(CType *ty) {
952 X86_64_Mode mode;
953 Sym *f;
955 switch (ty->t & VT_BTYPE) {
956 case VT_VOID: return x86_64_mode_none;
958 case VT_INT:
959 case VT_BYTE:
960 case VT_SHORT:
961 case VT_LLONG:
962 case VT_BOOL:
963 case VT_PTR:
964 case VT_FUNC:
965 case VT_ENUM: return x86_64_mode_integer;
967 case VT_FLOAT:
968 case VT_DOUBLE: return x86_64_mode_sse;
970 case VT_LDOUBLE: return x86_64_mode_x87;
972 case VT_STRUCT:
973 f = ty->ref;
975 // Detect union
976 if (f->next && (f->c == f->next->c))
977 return x86_64_mode_memory;
979 mode = x86_64_mode_none;
980 for (; f; f = f->next)
981 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
983 return mode;
986 assert(0);
989 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count) {
990 X86_64_Mode mode;
991 int size, align, ret_t = 0;
993 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
994 *psize = 8;
995 *reg_count = 1;
996 ret_t = ty->t;
997 mode = x86_64_mode_integer;
998 } else {
999 size = type_size(ty, &align);
1000 *psize = (size + 7) & ~7;
1001 *palign = (align + 7) & ~7;
1003 if (size > 16) {
1004 mode = x86_64_mode_memory;
1005 } else {
1006 mode = classify_x86_64_inner(ty);
1007 switch (mode) {
1008 case x86_64_mode_integer:
1009 if (size > 8) {
1010 *reg_count = 2;
1011 ret_t = VT_QLONG;
1012 } else {
1013 *reg_count = 1;
1014 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1016 break;
1018 case x86_64_mode_x87:
1019 *reg_count = 1;
1020 ret_t = VT_LDOUBLE;
1021 break;
1023 case x86_64_mode_sse:
1024 if (size > 8) {
1025 *reg_count = 2;
1026 ret_t = VT_QFLOAT;
1027 } else {
1028 *reg_count = 1;
1029 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1031 break;
1032 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1037 if (ret) {
1038 ret->ref = NULL;
1039 ret->t = ret_t;
1042 return mode;
1045 ST_FUNC int classify_x86_64_va_arg(CType *ty) {
1046 /* This definition must be synced with stdarg.h */
1047 enum __va_arg_type {
1048 __va_gen_reg, __va_float_reg, __va_stack
1050 int size, align, reg_count;
1051 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1052 switch (mode) {
1053 default: return __va_stack;
1054 case x86_64_mode_integer: return __va_gen_reg;
1055 case x86_64_mode_sse: return __va_float_reg;
1059 /* Return 1 if this function returns via an sret pointer, 0 otherwise */
1060 int gfunc_sret(CType *vt, CType *ret, int *ret_align) {
1061 int size, align, reg_count;
1062 *ret_align = 1; // Never have to re-align return values for x86-64
1063 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) == x86_64_mode_memory);
1066 #define REGN 6
1067 static const uint8_t arg_regs[REGN] = {
1068 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1071 static int arg_prepare_reg(int idx) {
1072 if (idx == 2 || idx == 3)
1073 /* idx=2: r10, idx=3: r11 */
1074 return idx + 8;
1075 else
1076 return arg_regs[idx];
1079 /* Generate function call. The function address is pushed first, then
1080 all the parameters in call order. This functions pops all the
1081 parameters and the function address. */
1082 void gfunc_call(int nb_args)
1084 X86_64_Mode mode;
1085 CType type;
1086 int size, align, r, args_size, stack_adjust, run_start, run_end, i, reg_count;
1087 int nb_reg_args = 0;
1088 int nb_sse_args = 0;
1089 int sse_reg, gen_reg;
1091 /* calculate the number of integer/float register arguments */
1092 for(i = 0; i < nb_args; i++) {
1093 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1094 if (mode == x86_64_mode_sse)
1095 nb_sse_args += reg_count;
1096 else if (mode == x86_64_mode_integer)
1097 nb_reg_args += reg_count;
1100 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1101 and ended by a 16-byte aligned argument. This is because, from the point of view of
1102 the callee, argument alignment is computed from the bottom up. */
1103 /* for struct arguments, we need to call memcpy and the function
1104 call breaks register passing arguments we are preparing.
1105 So, we process arguments which will be passed by stack first. */
1106 gen_reg = nb_reg_args;
1107 sse_reg = nb_sse_args;
1108 run_start = 0;
1109 args_size = 0;
1110 while (run_start != nb_args) {
1111 int run_gen_reg = gen_reg, run_sse_reg = sse_reg;
1113 run_end = nb_args;
1114 stack_adjust = 0;
1115 for(i = run_start; (i < nb_args) && (run_end == nb_args); i++) {
1116 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1117 switch (mode) {
1118 case x86_64_mode_memory:
1119 case x86_64_mode_x87:
1120 stack_arg:
1121 if (align == 16)
1122 run_end = i;
1123 else
1124 stack_adjust += size;
1125 break;
1127 case x86_64_mode_sse:
1128 sse_reg -= reg_count;
1129 if (sse_reg + reg_count > 8) goto stack_arg;
1130 break;
1132 case x86_64_mode_integer:
1133 gen_reg -= reg_count;
1134 if (gen_reg + reg_count > REGN) goto stack_arg;
1135 break;
1136 default: break; /* nothing to be done for x86_64_mode_none */
1140 gen_reg = run_gen_reg;
1141 sse_reg = run_sse_reg;
1143 /* adjust stack to align SSE boundary */
1144 if (stack_adjust &= 15) {
1145 /* fetch cpu flag before the following sub will change the value */
1146 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1147 gv(RC_INT);
1149 stack_adjust = 16 - stack_adjust;
1150 o(0x48);
1151 oad(0xec81, stack_adjust); /* sub $xxx, %rsp */
1152 args_size += stack_adjust;
1155 for(i = run_start; i < run_end;) {
1156 /* Swap argument to top, it will possibly be changed here,
1157 and might use more temps. At the end of the loop we keep
1158 in on the stack and swap it back to its original position
1159 if it is a register. */
1160 SValue tmp = vtop[0];
1161 vtop[0] = vtop[-i];
1162 vtop[-i] = tmp;
1164 mode = classify_x86_64_arg(&vtop->type, NULL, &size, &align, &reg_count);
1166 int arg_stored = 1;
1167 switch (vtop->type.t & VT_BTYPE) {
1168 case VT_STRUCT:
1169 if (mode == x86_64_mode_sse) {
1170 if (sse_reg > 8)
1171 sse_reg -= reg_count;
1172 else
1173 arg_stored = 0;
1174 } else if (mode == x86_64_mode_integer) {
1175 if (gen_reg > REGN)
1176 gen_reg -= reg_count;
1177 else
1178 arg_stored = 0;
1181 if (arg_stored) {
1182 /* allocate the necessary size on stack */
1183 o(0x48);
1184 oad(0xec81, size); /* sub $xxx, %rsp */
1185 /* generate structure store */
1186 r = get_reg(RC_INT);
1187 orex(1, r, 0, 0x89); /* mov %rsp, r */
1188 o(0xe0 + REG_VALUE(r));
1189 vset(&vtop->type, r | VT_LVAL, 0);
1190 vswap();
1191 vstore();
1192 args_size += size;
1194 break;
1196 case VT_LDOUBLE:
1197 assert(0);
1198 break;
1200 case VT_FLOAT:
1201 case VT_DOUBLE:
1202 assert(mode == x86_64_mode_sse);
1203 if (sse_reg > 8) {
1204 --sse_reg;
1205 r = gv(RC_FLOAT);
1206 o(0x50); /* push $rax */
1207 /* movq %xmmN, (%rsp) */
1208 o(0xd60f66);
1209 o(0x04 + REG_VALUE(r)*8);
1210 o(0x24);
1211 args_size += size;
1212 } else {
1213 arg_stored = 0;
1215 break;
1217 default:
1218 assert(mode == x86_64_mode_integer);
1219 /* simple type */
1220 /* XXX: implicit cast ? */
1221 if (gen_reg > REGN) {
1222 --gen_reg;
1223 r = gv(RC_INT);
1224 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1225 args_size += size;
1226 } else {
1227 arg_stored = 0;
1229 break;
1232 /* And swap the argument back to it's original position. */
1233 tmp = vtop[0];
1234 vtop[0] = vtop[-i];
1235 vtop[-i] = tmp;
1237 if (arg_stored) {
1238 vrotb(i+1);
1239 assert((vtop->type.t == tmp.type.t) && (vtop->r == tmp.r));
1240 vpop();
1241 --nb_args;
1242 --run_end;
1243 } else {
1244 ++i;
1248 /* handle 16 byte aligned arguments at end of run */
1249 run_start = i = run_end;
1250 while (i < nb_args) {
1251 /* Rotate argument to top since it will always be popped */
1252 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1253 if (align != 16)
1254 break;
1256 vrotb(i+1);
1258 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1259 gv(RC_ST0);
1260 oad(0xec8148, size); /* sub $xxx, %rsp */
1261 o(0x7cdb); /* fstpt 0(%rsp) */
1262 g(0x24);
1263 g(0x00);
1264 args_size += size;
1265 } else {
1266 assert(mode == x86_64_mode_memory);
1268 /* allocate the necessary size on stack */
1269 o(0x48);
1270 oad(0xec81, size); /* sub $xxx, %rsp */
1271 /* generate structure store */
1272 r = get_reg(RC_INT);
1273 orex(1, r, 0, 0x89); /* mov %rsp, r */
1274 o(0xe0 + REG_VALUE(r));
1275 vset(&vtop->type, r | VT_LVAL, 0);
1276 vswap();
1277 vstore();
1278 args_size += size;
1281 vpop();
1282 --nb_args;
1286 /* XXX This should be superfluous. */
1287 save_regs(0); /* save used temporary registers */
1289 /* then, we prepare register passing arguments.
1290 Note that we cannot set RDX and RCX in this loop because gv()
1291 may break these temporary registers. Let's use R10 and R11
1292 instead of them */
1293 assert(gen_reg <= REGN);
1294 assert(sse_reg <= 8);
1295 for(i = 0; i < nb_args; i++) {
1296 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1297 /* Alter stack entry type so that gv() knows how to treat it */
1298 vtop->type = type;
1299 if (mode == x86_64_mode_sse) {
1300 if (reg_count == 2) {
1301 sse_reg -= 2;
1302 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1303 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1304 /* movaps %xmm0, %xmmN */
1305 o(0x280f);
1306 o(0xc0 + (sse_reg << 3));
1307 /* movaps %xmm1, %xmmN */
1308 o(0x280f);
1309 o(0xc1 + ((sse_reg+1) << 3));
1311 } else {
1312 assert(reg_count == 1);
1313 --sse_reg;
1314 /* Load directly to register */
1315 gv(RC_XMM0 << sse_reg);
1317 } else if (mode == x86_64_mode_integer) {
1318 /* simple type */
1319 /* XXX: implicit cast ? */
1320 gen_reg -= reg_count;
1321 r = gv(RC_INT);
1322 int d = arg_prepare_reg(gen_reg);
1323 orex(1,d,r,0x89); /* mov */
1324 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1325 if (reg_count == 2) {
1326 d = arg_prepare_reg(gen_reg+1);
1327 orex(1,d,vtop->r2,0x89); /* mov */
1328 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1331 vtop--;
1333 assert(gen_reg == 0);
1334 assert(sse_reg == 0);
1336 /* We shouldn't have many operands on the stack anymore, but the
1337 call address itself is still there, and it might be in %eax
1338 (or edx/ecx) currently, which the below writes would clobber.
1339 So evict all remaining operands here. */
1340 save_regs(0);
1342 /* Copy R10 and R11 into RDX and RCX, respectively */
1343 if (nb_reg_args > 2) {
1344 o(0xd2894c); /* mov %r10, %rdx */
1345 if (nb_reg_args > 3) {
1346 o(0xd9894c); /* mov %r11, %rcx */
1350 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1351 gcall_or_jmp(0);
1352 if (args_size)
1353 gadd_sp(args_size);
1354 vtop--;
1358 #define FUNC_PROLOG_SIZE 11
1360 static void push_arg_reg(int i) {
1361 loc -= 8;
1362 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1365 /* generate function prolog of type 't' */
1366 void gfunc_prolog(CType *func_type)
1368 X86_64_Mode mode;
1369 int i, addr, align, size, reg_count;
1370 int param_addr = 0, reg_param_index, sse_param_index;
1371 Sym *sym;
1372 CType *type;
1374 sym = func_type->ref;
1375 addr = PTR_SIZE * 2;
1376 loc = 0;
1377 ind += FUNC_PROLOG_SIZE;
1378 func_sub_sp_offset = ind;
1379 func_ret_sub = 0;
1381 if (func_type->ref->c == FUNC_ELLIPSIS) {
1382 int seen_reg_num, seen_sse_num, seen_stack_size;
1383 seen_reg_num = seen_sse_num = 0;
1384 /* frame pointer and return address */
1385 seen_stack_size = PTR_SIZE * 2;
1386 /* count the number of seen parameters */
1387 sym = func_type->ref;
1388 while ((sym = sym->next) != NULL) {
1389 type = &sym->type;
1390 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1391 switch (mode) {
1392 default:
1393 stack_arg:
1394 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1395 break;
1397 case x86_64_mode_integer:
1398 if (seen_reg_num + reg_count <= 8) {
1399 seen_reg_num += reg_count;
1400 } else {
1401 seen_reg_num = 8;
1402 goto stack_arg;
1404 break;
1406 case x86_64_mode_sse:
1407 if (seen_sse_num + reg_count <= 8) {
1408 seen_sse_num += reg_count;
1409 } else {
1410 seen_sse_num = 8;
1411 goto stack_arg;
1413 break;
1417 loc -= 16;
1418 /* movl $0x????????, -0x10(%rbp) */
1419 o(0xf045c7);
1420 gen_le32(seen_reg_num * 8);
1421 /* movl $0x????????, -0xc(%rbp) */
1422 o(0xf445c7);
1423 gen_le32(seen_sse_num * 16 + 48);
1424 /* movl $0x????????, -0x8(%rbp) */
1425 o(0xf845c7);
1426 gen_le32(seen_stack_size);
1428 /* save all register passing arguments */
1429 for (i = 0; i < 8; i++) {
1430 loc -= 16;
1431 o(0xd60f66); /* movq */
1432 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1433 /* movq $0, loc+8(%rbp) */
1434 o(0x85c748);
1435 gen_le32(loc + 8);
1436 gen_le32(0);
1438 for (i = 0; i < REGN; i++) {
1439 push_arg_reg(REGN-1-i);
1443 sym = func_type->ref;
1444 reg_param_index = 0;
1445 sse_param_index = 0;
1447 /* if the function returns a structure, then add an
1448 implicit pointer parameter */
1449 func_vt = sym->type;
1450 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1451 if (mode == x86_64_mode_memory) {
1452 push_arg_reg(reg_param_index);
1453 func_vc = loc;
1454 reg_param_index++;
1456 /* define parameters */
1457 while ((sym = sym->next) != NULL) {
1458 type = &sym->type;
1459 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1460 switch (mode) {
1461 case x86_64_mode_sse:
1462 if (sse_param_index + reg_count <= 8) {
1463 /* save arguments passed by register */
1464 loc -= reg_count * 8;
1465 param_addr = loc;
1466 for (i = 0; i < reg_count; ++i) {
1467 o(0xd60f66); /* movq */
1468 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1469 ++sse_param_index;
1471 } else {
1472 addr = (addr + align - 1) & -align;
1473 param_addr = addr;
1474 addr += size;
1475 sse_param_index += reg_count;
1477 break;
1479 case x86_64_mode_memory:
1480 case x86_64_mode_x87:
1481 addr = (addr + align - 1) & -align;
1482 param_addr = addr;
1483 addr += size;
1484 break;
1486 case x86_64_mode_integer: {
1487 if (reg_param_index + reg_count <= REGN) {
1488 /* save arguments passed by register */
1489 loc -= reg_count * 8;
1490 param_addr = loc;
1491 for (i = 0; i < reg_count; ++i) {
1492 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1493 ++reg_param_index;
1495 } else {
1496 addr = (addr + align - 1) & -align;
1497 param_addr = addr;
1498 addr += size;
1499 reg_param_index += reg_count;
1501 break;
1503 default: break; /* nothing to be done for x86_64_mode_none */
1505 sym_push(sym->v & ~SYM_FIELD, type,
1506 VT_LOCAL | VT_LVAL, param_addr);
1510 /* generate function epilog */
1511 void gfunc_epilog(void)
1513 int v, saved_ind;
1515 o(0xc9); /* leave */
1516 if (func_ret_sub == 0) {
1517 o(0xc3); /* ret */
1518 } else {
1519 o(0xc2); /* ret n */
1520 g(func_ret_sub);
1521 g(func_ret_sub >> 8);
1523 /* align local size to word & save local variables */
1524 v = (-loc + 15) & -16;
1525 saved_ind = ind;
1526 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1527 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1528 o(0xec8148); /* sub rsp, stacksize */
1529 gen_le32(v);
1530 ind = saved_ind;
1533 #endif /* not PE */
1535 /* generate a jump to a label */
1536 int gjmp(int t)
1538 return psym(0xe9, t);
1541 /* generate a jump to a fixed address */
1542 void gjmp_addr(int a)
1544 int r;
1545 r = a - ind - 2;
1546 if (r == (char)r) {
1547 g(0xeb);
1548 g(r);
1549 } else {
1550 oad(0xe9, a - ind - 5);
1554 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1555 int gtst(int inv, int t)
1557 int v, *p;
1559 v = vtop->r & VT_VALMASK;
1560 if (v == VT_CMP) {
1561 /* fast case : can jump directly since flags are set */
1562 if (vtop->c.i & 0x100)
1564 /* This was a float compare. If the parity flag is set
1565 the result was unordered. For anything except != this
1566 means false and we don't jump (anding both conditions).
1567 For != this means true (oring both).
1568 Take care about inverting the test. We need to jump
1569 to our target if the result was unordered and test wasn't NE,
1570 otherwise if unordered we don't want to jump. */
1571 vtop->c.i &= ~0x100;
1572 if (!inv == (vtop->c.i != TOK_NE))
1573 o(0x067a); /* jp +6 */
1574 else
1576 g(0x0f);
1577 t = psym(0x8a, t); /* jp t */
1580 g(0x0f);
1581 t = psym((vtop->c.i - 16) ^ inv, t);
1582 } else if (v == VT_JMP || v == VT_JMPI) {
1583 /* && or || optimization */
1584 if ((v & 1) == inv) {
1585 /* insert vtop->c jump list in t */
1586 p = &vtop->c.i;
1587 while (*p != 0)
1588 p = (int *)(cur_text_section->data + *p);
1589 *p = t;
1590 t = vtop->c.i;
1591 } else {
1592 t = gjmp(t);
1593 gsym(vtop->c.i);
1595 } else {
1596 if (is_float(vtop->type.t) ||
1597 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
1598 vpushi(0);
1599 gen_op(TOK_NE);
1601 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
1602 /* constant jmp optimization */
1603 if ((vtop->c.i != 0) != inv)
1604 t = gjmp(t);
1605 } else {
1606 v = gv(RC_INT);
1607 orex(0,v,v,0x85);
1608 o(0xc0 + REG_VALUE(v) * 9);
1609 g(0x0f);
1610 t = psym(0x85 ^ inv, t);
1613 vtop--;
1614 return t;
1617 /* generate an integer binary operation */
1618 void gen_opi(int op)
1620 int r, fr, opc, c;
1621 int ll, uu, cc;
1623 ll = is64_type(vtop[-1].type.t);
1624 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1625 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1627 switch(op) {
1628 case '+':
1629 case TOK_ADDC1: /* add with carry generation */
1630 opc = 0;
1631 gen_op8:
1632 if (cc && (!ll || (int)vtop->c.ll == vtop->c.ll)) {
1633 /* constant case */
1634 vswap();
1635 r = gv(RC_INT);
1636 vswap();
1637 c = vtop->c.i;
1638 if (c == (char)c) {
1639 /* XXX: generate inc and dec for smaller code ? */
1640 orex(ll, r, 0, 0x83);
1641 o(0xc0 | (opc << 3) | REG_VALUE(r));
1642 g(c);
1643 } else {
1644 orex(ll, r, 0, 0x81);
1645 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1647 } else {
1648 gv2(RC_INT, RC_INT);
1649 r = vtop[-1].r;
1650 fr = vtop[0].r;
1651 orex(ll, r, fr, (opc << 3) | 0x01);
1652 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1654 vtop--;
1655 if (op >= TOK_ULT && op <= TOK_GT) {
1656 vtop->r = VT_CMP;
1657 vtop->c.i = op;
1659 break;
1660 case '-':
1661 case TOK_SUBC1: /* sub with carry generation */
1662 opc = 5;
1663 goto gen_op8;
1664 case TOK_ADDC2: /* add with carry use */
1665 opc = 2;
1666 goto gen_op8;
1667 case TOK_SUBC2: /* sub with carry use */
1668 opc = 3;
1669 goto gen_op8;
1670 case '&':
1671 opc = 4;
1672 goto gen_op8;
1673 case '^':
1674 opc = 6;
1675 goto gen_op8;
1676 case '|':
1677 opc = 1;
1678 goto gen_op8;
1679 case '*':
1680 gv2(RC_INT, RC_INT);
1681 r = vtop[-1].r;
1682 fr = vtop[0].r;
1683 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1684 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1685 vtop--;
1686 break;
1687 case TOK_SHL:
1688 opc = 4;
1689 goto gen_shift;
1690 case TOK_SHR:
1691 opc = 5;
1692 goto gen_shift;
1693 case TOK_SAR:
1694 opc = 7;
1695 gen_shift:
1696 opc = 0xc0 | (opc << 3);
1697 if (cc) {
1698 /* constant case */
1699 vswap();
1700 r = gv(RC_INT);
1701 vswap();
1702 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1703 o(opc | REG_VALUE(r));
1704 g(vtop->c.i & (ll ? 63 : 31));
1705 } else {
1706 /* we generate the shift in ecx */
1707 gv2(RC_INT, RC_RCX);
1708 r = vtop[-1].r;
1709 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1710 o(opc | REG_VALUE(r));
1712 vtop--;
1713 break;
1714 case TOK_UDIV:
1715 case TOK_UMOD:
1716 uu = 1;
1717 goto divmod;
1718 case '/':
1719 case '%':
1720 case TOK_PDIV:
1721 uu = 0;
1722 divmod:
1723 /* first operand must be in eax */
1724 /* XXX: need better constraint for second operand */
1725 gv2(RC_RAX, RC_RCX);
1726 r = vtop[-1].r;
1727 fr = vtop[0].r;
1728 vtop--;
1729 save_reg(TREG_RDX);
1730 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1731 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1732 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1733 if (op == '%' || op == TOK_UMOD)
1734 r = TREG_RDX;
1735 else
1736 r = TREG_RAX;
1737 vtop->r = r;
1738 break;
1739 default:
1740 opc = 7;
1741 goto gen_op8;
1745 void gen_opl(int op)
1747 gen_opi(op);
1750 /* generate a floating point operation 'v = t1 op t2' instruction. The
1751 two operands are guaranted to have the same floating point type */
1752 /* XXX: need to use ST1 too */
1753 void gen_opf(int op)
1755 int a, ft, fc, swapped, r;
1756 int float_type =
1757 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1759 /* convert constants to memory references */
1760 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1761 vswap();
1762 gv(float_type);
1763 vswap();
1765 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1766 gv(float_type);
1768 /* must put at least one value in the floating point register */
1769 if ((vtop[-1].r & VT_LVAL) &&
1770 (vtop[0].r & VT_LVAL)) {
1771 vswap();
1772 gv(float_type);
1773 vswap();
1775 swapped = 0;
1776 /* swap the stack if needed so that t1 is the register and t2 is
1777 the memory reference */
1778 if (vtop[-1].r & VT_LVAL) {
1779 vswap();
1780 swapped = 1;
1782 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1783 if (op >= TOK_ULT && op <= TOK_GT) {
1784 /* load on stack second operand */
1785 load(TREG_ST0, vtop);
1786 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1787 if (op == TOK_GE || op == TOK_GT)
1788 swapped = !swapped;
1789 else if (op == TOK_EQ || op == TOK_NE)
1790 swapped = 0;
1791 if (swapped)
1792 o(0xc9d9); /* fxch %st(1) */
1793 o(0xe9da); /* fucompp */
1794 o(0xe0df); /* fnstsw %ax */
1795 if (op == TOK_EQ) {
1796 o(0x45e480); /* and $0x45, %ah */
1797 o(0x40fC80); /* cmp $0x40, %ah */
1798 } else if (op == TOK_NE) {
1799 o(0x45e480); /* and $0x45, %ah */
1800 o(0x40f480); /* xor $0x40, %ah */
1801 op = TOK_NE;
1802 } else if (op == TOK_GE || op == TOK_LE) {
1803 o(0x05c4f6); /* test $0x05, %ah */
1804 op = TOK_EQ;
1805 } else {
1806 o(0x45c4f6); /* test $0x45, %ah */
1807 op = TOK_EQ;
1809 vtop--;
1810 vtop->r = VT_CMP;
1811 vtop->c.i = op;
1812 } else {
1813 /* no memory reference possible for long double operations */
1814 load(TREG_ST0, vtop);
1815 swapped = !swapped;
1817 switch(op) {
1818 default:
1819 case '+':
1820 a = 0;
1821 break;
1822 case '-':
1823 a = 4;
1824 if (swapped)
1825 a++;
1826 break;
1827 case '*':
1828 a = 1;
1829 break;
1830 case '/':
1831 a = 6;
1832 if (swapped)
1833 a++;
1834 break;
1836 ft = vtop->type.t;
1837 fc = vtop->c.ul;
1838 o(0xde); /* fxxxp %st, %st(1) */
1839 o(0xc1 + (a << 3));
1840 vtop--;
1842 } else {
1843 if (op >= TOK_ULT && op <= TOK_GT) {
1844 /* if saved lvalue, then we must reload it */
1845 r = vtop->r;
1846 fc = vtop->c.ul;
1847 if ((r & VT_VALMASK) == VT_LLOCAL) {
1848 SValue v1;
1849 r = get_reg(RC_INT);
1850 v1.type.t = VT_PTR;
1851 v1.r = VT_LOCAL | VT_LVAL;
1852 v1.c.ul = fc;
1853 load(r, &v1);
1854 fc = 0;
1857 if (op == TOK_EQ || op == TOK_NE) {
1858 swapped = 0;
1859 } else {
1860 if (op == TOK_LE || op == TOK_LT)
1861 swapped = !swapped;
1862 if (op == TOK_LE || op == TOK_GE) {
1863 op = 0x93; /* setae */
1864 } else {
1865 op = 0x97; /* seta */
1869 if (swapped) {
1870 gv(RC_FLOAT);
1871 vswap();
1873 assert(!(vtop[-1].r & VT_LVAL));
1875 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1876 o(0x66);
1877 o(0x2e0f); /* ucomisd */
1879 if (vtop->r & VT_LVAL) {
1880 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1881 } else {
1882 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1885 vtop--;
1886 vtop->r = VT_CMP;
1887 vtop->c.i = op | 0x100;
1888 } else {
1889 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1890 switch(op) {
1891 default:
1892 case '+':
1893 a = 0;
1894 break;
1895 case '-':
1896 a = 4;
1897 break;
1898 case '*':
1899 a = 1;
1900 break;
1901 case '/':
1902 a = 6;
1903 break;
1905 ft = vtop->type.t;
1906 fc = vtop->c.ul;
1907 assert((ft & VT_BTYPE) != VT_LDOUBLE);
1909 r = vtop->r;
1910 /* if saved lvalue, then we must reload it */
1911 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
1912 SValue v1;
1913 r = get_reg(RC_INT);
1914 v1.type.t = VT_PTR;
1915 v1.r = VT_LOCAL | VT_LVAL;
1916 v1.c.ul = fc;
1917 load(r, &v1);
1918 fc = 0;
1921 assert(!(vtop[-1].r & VT_LVAL));
1922 if (swapped) {
1923 assert(vtop->r & VT_LVAL);
1924 gv(RC_FLOAT);
1925 vswap();
1928 if ((ft & VT_BTYPE) == VT_DOUBLE) {
1929 o(0xf2);
1930 } else {
1931 o(0xf3);
1933 o(0x0f);
1934 o(0x58 + a);
1936 if (vtop->r & VT_LVAL) {
1937 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1938 } else {
1939 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1942 vtop--;
1947 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1948 and 'long long' cases. */
1949 void gen_cvt_itof(int t)
1951 if ((t & VT_BTYPE) == VT_LDOUBLE) {
1952 save_reg(TREG_ST0);
1953 gv(RC_INT);
1954 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
1955 /* signed long long to float/double/long double (unsigned case
1956 is handled generically) */
1957 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1958 o(0x242cdf); /* fildll (%rsp) */
1959 o(0x08c48348); /* add $8, %rsp */
1960 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
1961 (VT_INT | VT_UNSIGNED)) {
1962 /* unsigned int to float/double/long double */
1963 o(0x6a); /* push $0 */
1964 g(0x00);
1965 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1966 o(0x242cdf); /* fildll (%rsp) */
1967 o(0x10c48348); /* add $16, %rsp */
1968 } else {
1969 /* int to float/double/long double */
1970 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1971 o(0x2404db); /* fildl (%rsp) */
1972 o(0x08c48348); /* add $8, %rsp */
1974 vtop->r = TREG_ST0;
1975 } else {
1976 int r = get_reg(RC_FLOAT);
1977 gv(RC_INT);
1978 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
1979 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
1980 (VT_INT | VT_UNSIGNED) ||
1981 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
1982 o(0x48); /* REX */
1984 o(0x2a0f);
1985 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
1986 vtop->r = r;
1990 /* convert from one floating point type to another */
1991 void gen_cvt_ftof(int t)
1993 int ft, bt, tbt;
1995 ft = vtop->type.t;
1996 bt = ft & VT_BTYPE;
1997 tbt = t & VT_BTYPE;
1999 if (bt == VT_FLOAT) {
2000 gv(RC_FLOAT);
2001 if (tbt == VT_DOUBLE) {
2002 o(0x140f); /* unpcklps */
2003 o(0xc0 + REG_VALUE(vtop->r)*9);
2004 o(0x5a0f); /* cvtps2pd */
2005 o(0xc0 + REG_VALUE(vtop->r)*9);
2006 } else if (tbt == VT_LDOUBLE) {
2007 save_reg(RC_ST0);
2008 /* movss %xmm0,-0x10(%rsp) */
2009 o(0x110ff3);
2010 o(0x44 + REG_VALUE(vtop->r)*8);
2011 o(0xf024);
2012 o(0xf02444d9); /* flds -0x10(%rsp) */
2013 vtop->r = TREG_ST0;
2015 } else if (bt == VT_DOUBLE) {
2016 gv(RC_FLOAT);
2017 if (tbt == VT_FLOAT) {
2018 o(0x140f66); /* unpcklpd */
2019 o(0xc0 + REG_VALUE(vtop->r)*9);
2020 o(0x5a0f66); /* cvtpd2ps */
2021 o(0xc0 + REG_VALUE(vtop->r)*9);
2022 } else if (tbt == VT_LDOUBLE) {
2023 save_reg(RC_ST0);
2024 /* movsd %xmm0,-0x10(%rsp) */
2025 o(0x110ff2);
2026 o(0x44 + REG_VALUE(vtop->r)*8);
2027 o(0xf024);
2028 o(0xf02444dd); /* fldl -0x10(%rsp) */
2029 vtop->r = TREG_ST0;
2031 } else {
2032 int r;
2033 gv(RC_ST0);
2034 r = get_reg(RC_FLOAT);
2035 if (tbt == VT_DOUBLE) {
2036 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2037 /* movsd -0x10(%rsp),%xmm0 */
2038 o(0x100ff2);
2039 o(0x44 + REG_VALUE(r)*8);
2040 o(0xf024);
2041 vtop->r = r;
2042 } else if (tbt == VT_FLOAT) {
2043 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2044 /* movss -0x10(%rsp),%xmm0 */
2045 o(0x100ff3);
2046 o(0x44 + REG_VALUE(r)*8);
2047 o(0xf024);
2048 vtop->r = r;
2053 /* convert fp to int 't' type */
2054 void gen_cvt_ftoi(int t)
2056 int ft, bt, size, r;
2057 ft = vtop->type.t;
2058 bt = ft & VT_BTYPE;
2059 if (bt == VT_LDOUBLE) {
2060 gen_cvt_ftof(VT_DOUBLE);
2061 bt = VT_DOUBLE;
2064 gv(RC_FLOAT);
2065 if (t != VT_INT)
2066 size = 8;
2067 else
2068 size = 4;
2070 r = get_reg(RC_INT);
2071 if (bt == VT_FLOAT) {
2072 o(0xf3);
2073 } else if (bt == VT_DOUBLE) {
2074 o(0xf2);
2075 } else {
2076 assert(0);
2078 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2079 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2080 vtop->r = r;
2083 /* computed goto support */
2084 void ggoto(void)
2086 gcall_or_jmp(1);
2087 vtop--;
2090 /* Save the stack pointer onto the stack and return the location of its address */
2091 ST_FUNC void gen_vla_sp_save(int addr) {
2092 /* mov %rsp,addr(%rbp)*/
2093 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2096 /* Restore the SP from a location on the stack */
2097 ST_FUNC void gen_vla_sp_restore(int addr) {
2098 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2101 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2102 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2103 #ifdef TCC_TARGET_PE
2104 /* alloca does more than just adjust %rsp on Windows */
2105 vpush_global_sym(&func_old_type, TOK_alloca);
2106 vswap(); /* Move alloca ref past allocation size */
2107 gfunc_call(1);
2108 vset(type, REG_IRET, 0);
2109 #else
2110 int r;
2111 r = gv(RC_INT); /* allocation size */
2112 /* sub r,%rsp */
2113 o(0x2b48);
2114 o(0xe0 | REG_VALUE(r));
2115 /* We align to 16 bytes rather than align */
2116 /* and ~15, %rsp */
2117 o(0xf0e48348);
2118 /* mov %rsp, r */
2119 o(0x8948);
2120 o(0xe0 | REG_VALUE(r));
2121 vpop();
2122 vset(type, r, 0);
2123 #endif
2127 /* end of x86-64 code generator */
2128 /*************************************************************/
2129 #endif /* ! TARGET_DEFS_ONLY */
2130 /******************************************************/