fix its own making bug. Improved init_putz (). Modify the tests / Makefile to make...
[tinycc.git] / x86_64-gen.c
blob1f838e315f217f279b3be5205525d7e3d4f83c8c
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 8
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
34 #define RC_RAX 0x0004
35 #define RC_RCX 0x0008
36 #define RC_RDX 0x0010
37 #define RC_ST0 0x0020 /* only for long double */
38 #define RC_R8 0x0040
39 #define RC_R9 0x0080
40 #define RC_XMM0 0x0100
41 #define RC_XMM1 0x0200
42 #define RC_XMM2 0x0400
43 #define RC_XMM3 0x0800
44 #define RC_XMM4 0x1000
45 #define RC_XMM5 0x2000
46 #define RC_XMM6 0x4000
47 #define RC_XMM7 0x8000
48 #define RC_RSI 0x10000
49 #define RC_RDI 0x20000
50 #define RC_INT1 0x40000 /* function_pointer */
51 #define RC_INT2 0x80000
52 #define RC_RBX 0x100000
53 #define RC_R10 0x200000
54 #define RC_R11 0x400000
55 #define RC_R12 0x800000
56 #define RC_R13 0x1000000
57 #define RC_R14 0x2000000
58 #define RC_R15 0x4000000
59 #define RC_IRET RC_RAX /* function return: integer register */
60 #define RC_LRET RC_RDX /* function return: second integer register */
61 #define RC_FRET RC_XMM0 /* function return: float register */
62 #define RC_QRET RC_XMM1 /* function return: second float register */
63 #define RC_MASK (RC_INT|RC_INT1|RC_INT2|RC_FLOAT)
65 /* pretty names for the registers */
66 enum {
67 TREG_RAX = 0,
68 TREG_RCX = 1,
69 TREG_RDX = 2,
70 TREG_RSP = 4,
71 TREG_ST0 = 5,
72 TREG_RSI = 6,
73 TREG_RDI = 7,
75 TREG_R8 = 8,
76 TREG_R9 = 9,
77 TREG_R10 = 10,
78 TREG_R11 = 11,
80 TREG_XMM0 = 16,
81 TREG_XMM1 = 17,
82 TREG_XMM2 = 18,
83 TREG_XMM3 = 19,
84 TREG_XMM4 = 20,
85 TREG_XMM5 = 21,
86 TREG_XMM6 = 22,
87 TREG_XMM7 = 23,
91 #define REX_BASE(reg) (((reg) >> 3) & 1)
92 #define REG_VALUE(reg) ((reg) & 7)
93 #define FLAG_GOT 0X01
95 /* return registers for function */
96 #define REG_IRET TREG_RAX /* single word int return register */
97 #define REG_LRET TREG_RDX /* second word return register (for long long) */
98 #define REG_FRET TREG_XMM0 /* float return register */
99 #define REG_QRET TREG_XMM1 /* second float return register */
101 /* defined if function parameters must be evaluated in reverse order */
102 #define INVERT_FUNC_PARAMS
104 /* pointer size, in bytes */
105 #define PTR_SIZE 8
107 /* long double size and alignment, in bytes */
108 #define LDOUBLE_SIZE 16
109 #define LDOUBLE_ALIGN 16
110 /* maximum alignment (for aligned attribute support) */
111 #define MAX_ALIGN 16
113 /******************************************************/
114 /* ELF defines */
116 #define EM_TCC_TARGET EM_X86_64
118 /* relocation type for 32 bit data relocation */
119 #define R_DATA_32 R_X86_64_32
120 #define R_DATA_PTR R_X86_64_64
121 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
122 #define R_COPY R_X86_64_COPY
124 #define ELF_START_ADDR 0x400000
125 #define ELF_PAGE_SIZE 0x200000
127 /******************************************************/
128 #else /* ! TARGET_DEFS_ONLY */
129 /******************************************************/
130 #include "tcc.h"
131 #include <assert.h>
133 ST_DATA const int reg_classes[NB_REGS] = {
134 /* eax */ RC_INT|RC_RAX|RC_INT2,
135 /* ecx */ RC_INT|RC_RCX|RC_INT2,
136 /* edx */ RC_INT|RC_RDX,
137 RC_INT|RC_INT1|RC_INT2|RC_RBX,
139 /* st0 */ RC_ST0,
140 RC_RSI|RC_INT2,
141 RC_RDI|RC_INT2,
142 RC_INT|RC_R8|RC_INT2,
143 RC_INT|RC_R9|RC_INT2,
144 RC_INT|RC_INT1|RC_INT2|RC_R10,
145 RC_INT|RC_INT1|RC_INT2|RC_R11,
146 RC_INT|RC_INT1|RC_INT2|RC_R12,
147 RC_INT|RC_INT1|RC_INT2|RC_R13,
148 RC_INT|RC_INT1|RC_INT2|RC_R14,
149 RC_INT|RC_INT1|RC_INT2|RC_R15,
150 /* xmm0 */ RC_FLOAT | RC_XMM0,
151 RC_FLOAT|RC_XMM1,
152 RC_FLOAT|RC_XMM2,
153 RC_FLOAT|RC_XMM3,
154 RC_FLOAT|RC_XMM4,
155 RC_FLOAT|RC_XMM5,
156 RC_FLOAT|RC_XMM6,
157 RC_FLOAT|RC_XMM7,
160 static unsigned long func_sub_sp_offset;
161 static int func_ret_sub;
163 /* XXX: make it faster ? */
164 void g(int c)
166 int ind1;
167 ind1 = ind + 1;
168 if (ind1 > cur_text_section->data_allocated)
169 section_realloc(cur_text_section, ind1);
170 cur_text_section->data[ind] = c;
171 ind = ind1;
174 void o(unsigned int c)
176 while (c) {
177 g(c);
178 c = c >> 8;
182 void gen_le16(int v)
184 g(v);
185 g(v >> 8);
188 void gen_le32(int c)
190 g(c);
191 g(c >> 8);
192 g(c >> 16);
193 g(c >> 24);
196 void gen_le64(int64_t c)
198 g(c);
199 g(c >> 8);
200 g(c >> 16);
201 g(c >> 24);
202 g(c >> 32);
203 g(c >> 40);
204 g(c >> 48);
205 g(c >> 56);
208 void orex(int ll, int r, int r2, int b)
210 if ((r & VT_VALMASK) >= VT_CONST)
211 r = 0;
212 if ((r2 & VT_VALMASK) >= VT_CONST)
213 r2 = 0;
214 if (ll || REX_BASE(r) || REX_BASE(r2))
215 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
216 o(b);
219 /* output a symbol and patch all calls to it */
220 void gsym_addr(int t, int a)
222 int n, *ptr;
223 while (t) {
224 ptr = (int *)(cur_text_section->data + t);
225 n = *ptr; /* next value */
226 *ptr = a - t - 4;
227 t = n;
231 void gsym(int t)
233 gsym_addr(t, ind);
236 /* psym is used to put an instruction with a data field which is a
237 reference to a symbol. It is in fact the same as oad ! */
238 #define psym oad
240 static int is64_type(int t)
242 return ((t & VT_BTYPE) == VT_PTR ||
243 (t & VT_BTYPE) == VT_FUNC ||
244 (t & VT_BTYPE) == VT_LLONG);
247 /* instruction + 4 bytes data. Return the address of the data */
248 ST_FUNC int oad(int c, int s)
250 int ind1;
252 o(c);
253 ind1 = ind + 4;
254 if (ind1 > cur_text_section->data_allocated)
255 section_realloc(cur_text_section, ind1);
256 *(int *)(cur_text_section->data + ind) = s;
257 s = ind;
258 ind = ind1;
259 return s;
262 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
264 if (r & VT_SYM)
265 greloc(cur_text_section, sym, ind, R_X86_64_32);
266 gen_le32(c);
269 /* output constant with relocation if 'r & VT_SYM' is true */
270 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
272 if (r & VT_SYM)
273 greloc(cur_text_section, sym, ind, R_X86_64_64);
274 gen_le64(c);
277 /* output constant with relocation if 'r & VT_SYM' is true */
278 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
280 if (r & VT_SYM)
281 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
282 gen_le32(c-4);
285 /* output got address with relocation */
286 static void gen_gotpcrel(int r, Sym *sym, int c)
288 #ifndef TCC_TARGET_PE
289 Section *sr;
290 ElfW(Rela) *rel;
291 greloc(cur_text_section, sym, ind, R_X86_64_GOTPCREL);
292 sr = cur_text_section->reloc;
293 rel = (ElfW(Rela) *)(sr->data + sr->data_offset - sizeof(ElfW(Rela)));
294 rel->r_addend = -4;
295 #else
296 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym->v, NULL), c, r,
297 cur_text_section->data[ind-3],
298 cur_text_section->data[ind-2],
299 cur_text_section->data[ind-1]
301 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
302 #endif
303 gen_le32(0);
304 if (c) {
305 /* we use add c, %xxx for displacement */
306 orex(1, r, 0, 0x81);
307 o(0xc0 + REG_VALUE(r));
308 gen_le32(c);
312 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
314 op_reg = REG_VALUE(op_reg) << 3;
315 if ((r & VT_VALMASK) == VT_CONST) {
316 /* constant memory reference */
317 o(0x05 | op_reg);
318 if (is_got) {
319 gen_gotpcrel(r, sym, c);
320 } else {
321 gen_addrpc32(r, sym, c);
323 } else if ((r & VT_VALMASK) == VT_LOCAL) {
324 /* currently, we use only ebp as base */
325 if (c == (char)c) {
326 /* short reference */
327 o(0x45 | op_reg);
328 g(c);
329 } else {
330 oad(0x85 | op_reg, c);
332 } else if (r & TREG_MEM) {
333 if (c) {
334 g(0x80 | op_reg | REG_VALUE(r));
335 gen_le32(c);
336 } else {
337 g(0x00 | op_reg | REG_VALUE(r));
339 } else {
340 g(0x00 | op_reg | REG_VALUE(r));
344 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
345 opcode bits */
346 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
348 gen_modrm_impl(op_reg, r, sym, c, 0);
351 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
352 opcode bits */
353 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
355 int is_got;
356 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
357 orex(1, r, op_reg, opcode);
358 gen_modrm_impl(op_reg, r, sym, c, is_got);
362 /* load 'r' from value 'sv' */
363 void load(int r, SValue *sv)
365 int v, t, ft, fc, fr;
366 SValue v1;
368 #ifdef TCC_TARGET_PE
369 SValue v2;
370 sv = pe_getimport(sv, &v2);
371 #endif
373 fr = sv->r;
374 ft = sv->type.t & ~VT_DEFSIGN;
375 fc = sv->c.ul;
377 #ifndef TCC_TARGET_PE
378 /* we use indirect access via got */
379 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
380 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
381 /* use the result register as a temporal register */
382 int tr = r | TREG_MEM;
383 if (is_float(ft)) {
384 /* we cannot use float registers as a temporal register */
385 tr = get_reg(RC_INT) | TREG_MEM;
387 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
389 /* load from the temporal register */
390 fr = tr | VT_LVAL;
392 #endif
394 v = fr & VT_VALMASK;
395 if (fr & VT_LVAL) {
396 int b, ll;
397 if (v == VT_LLOCAL) {
398 v1.type.t = VT_PTR;
399 v1.r = VT_LOCAL | VT_LVAL;
400 v1.c.ul = fc;
401 fr = r;
402 if (!(reg_classes[fr] & RC_INT))
403 fr = get_reg(RC_INT);
404 load(fr, &v1);
406 ll = 0;
407 if ((ft & VT_BTYPE) == VT_FLOAT) {
408 b = 0x6e0f66;
409 r = REG_VALUE(r); /* movd */
410 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
411 b = 0x7e0ff3; /* movq */
412 r = REG_VALUE(r);
413 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
414 b = 0xdb, r = 5; /* fldt */
415 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
416 b = 0xbe0f; /* movsbl */
417 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
418 b = 0xb60f; /* movzbl */
419 } else if ((ft & VT_TYPE) == VT_SHORT) {
420 b = 0xbf0f; /* movswl */
421 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
422 b = 0xb70f; /* movzwl */
423 } else {
424 assert(((ft & VT_BTYPE) == VT_INT) || ((ft & VT_BTYPE) == VT_LLONG)
425 || ((ft & VT_BTYPE) == VT_PTR) || ((ft & VT_BTYPE) == VT_ENUM)
426 || ((ft & VT_BTYPE) == VT_FUNC));
427 ll = is64_type(ft);
428 b = 0x8b;
430 if (ll) {
431 gen_modrm64(b, r, fr, sv->sym, fc);
432 } else {
433 orex(ll, fr, r, b);
434 gen_modrm(r, fr, sv->sym, fc);
436 } else {
437 if (v == VT_CONST) {
438 if (fr & VT_SYM) {
439 #ifdef TCC_TARGET_PE
440 orex(1,0,r,0x8d);
441 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
442 gen_addrpc32(fr, sv->sym, fc);
443 #else
444 if (sv->sym->type.t & VT_STATIC) {
445 orex(1,0,r,0x8d);
446 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
447 gen_addrpc32(fr, sv->sym, fc);
448 } else {
449 orex(1,0,r,0x8b);
450 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
451 gen_gotpcrel(r, sv->sym, fc);
453 #endif
454 } else if (is64_type(ft)) {
455 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
456 gen_le64(sv->c.ull);
457 } else {
458 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
459 gen_le32(fc);
461 } else if (v == VT_LOCAL) {
462 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
463 gen_modrm(r, VT_LOCAL, sv->sym, fc);
464 } else if (v == VT_CMP) {
465 orex(0,r,0,0);
466 if ((fc & ~0x100) != TOK_NE)
467 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
468 else
469 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
470 if (fc & 0x100)
472 /* This was a float compare. If the parity bit is
473 set the result was unordered, meaning false for everything
474 except TOK_NE, and true for TOK_NE. */
475 fc &= ~0x100;
476 o(0x037a + (REX_BASE(r) << 8));
478 orex(0,r,0, 0x0f); /* setxx %br */
479 o(fc);
480 o(0xc0 + REG_VALUE(r));
481 } else if (v == VT_JMP || v == VT_JMPI) {
482 t = v & 1;
483 orex(0,r,0,0);
484 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
485 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
486 gsym(fc);
487 orex(0,r,0,0);
488 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
489 } else if (v != r) {
490 if (reg_classes[r] & RC_FLOAT) {
491 if(v == TREG_ST0){
492 /* gen_cvt_ftof(VT_DOUBLE); */
493 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
494 /* movsd -0x10(%rsp),%xmm0 */
495 o(0x100ff2);
496 o(0xf02444 + REG_VALUE(r)*8);
497 }else if(reg_classes[v] & RC_FLOAT){
498 o(0x7e0ff3);
499 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
500 }else
501 assert(0);
502 } else if (r == TREG_ST0) {
503 assert(reg_classes[v] & RC_FLOAT);
504 /* gen_cvt_ftof(VT_LDOUBLE); */
505 /* movsd %xmm0,-0x10(%rsp) */
506 o(0x110ff2);
507 o(0xf02444 + REG_VALUE(v)*8);
508 o(0xf02444dd); /* fldl -0x10(%rsp) */
509 } else {
510 orex(1,r,v, 0x89);
511 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
517 /* store register 'r' in lvalue 'v' */
518 void store(int r, SValue *sv)
520 int fr, bt, ft, fc, ll, v;
522 #ifdef TCC_TARGET_PE
523 SValue v2;
524 sv = pe_getimport(sv, &v2);
525 #endif
526 ft = sv->type.t & ~VT_DEFSIGN;
527 fc = sv->c.ul;
528 fr = sv->r;
529 bt = ft & VT_BTYPE;
530 ll = is64_type(ft);
531 v = fr & VT_VALMASK;
533 //#ifndef TCC_TARGET_PE
534 /* we need to access the variable via got */
535 // if (fr == VT_CONST && (v->r & VT_SYM)) {
536 /* mov xx(%rip), %r11 */
537 // o(0x1d8b4c);
538 // gen_gotpcrel(TREG_R11, v->sym, v->c.ul);
539 //pic = is64_type(bt) ? 0x49 : 0x41;
540 // }
541 //#endif
543 /* XXX: incorrect if float reg to reg */
544 if (bt == VT_FLOAT) {
545 orex(0, fr, r, 0x110ff3); /* movss */
546 } else if (bt == VT_DOUBLE) {
547 orex(0, fr, r, 0x110ff2);/* movds */
548 } else if (bt == VT_LDOUBLE) {
549 o(0xc0d9); /* fld %st(0) */
550 orex(0, fr, r, 0xdb);/* fstpt */
551 r = 7;
552 } else {
553 if (bt == VT_SHORT)
554 o(0x66);
555 if (bt == VT_BYTE || bt == VT_BOOL)
556 orex(ll, fr, r, 0x88);
557 else{
558 orex(ll, fr, r, 0x89);
561 if (v == VT_CONST || v == VT_LOCAL || (fr & VT_LVAL)) {
562 gen_modrm(r, fr, sv->sym, fc);
563 } else if (v != r) {
564 /* XXX: don't we really come here? */
565 abort();
566 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8); /* mov r, fr */
570 /* 'is_jmp' is '1' if it is a jump */
571 static void gcall_or_jmp(int is_jmp)
573 int r;
574 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
575 /* constant case */
576 if (vtop->r & VT_SYM) {
577 /* relocation case */
578 greloc(cur_text_section, vtop->sym,
579 ind + 1, R_X86_64_PLT32);
580 } else {
581 /* put an empty PC32 relocation */
582 put_elf_reloc(symtab_section, cur_text_section,
583 ind + 1, R_X86_64_PC32, 0);
585 oad(0xe8 + is_jmp, vtop->c.ul - 4); /* call/jmp im */
586 } else {
587 /* otherwise, indirect call */
588 r = get_reg(RC_INT1);
589 load(r, vtop);
590 orex(0, r, 0, 0xff); /* REX call/jmp *r */
591 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
595 void struct_copy(SValue *d, SValue *s, SValue *c)
597 if(!c->c.i)
598 return;
599 save_reg(TREG_RCX);
600 load(TREG_RCX, c);
601 load(TREG_RDI, d);
602 load(TREG_RSI, s);
603 o(0xa4f3);// rep movsb
606 void gen_putz(SValue *d, int size)
608 if(!size)
609 return;
610 save_reg(TREG_RAX);
611 o(0xb0);
612 g(0x00);
613 save_reg(TREG_RCX);
614 o(0xb8 + REG_VALUE(TREG_RCX)); /* mov $xx, r */
615 gen_le32(size);
616 load(TREG_RDI, d);
617 o(0xaaf3);//rep stos
620 #ifdef TCC_TARGET_PE
622 #define REGN 4
623 static const uint8_t arg_regs[REGN] = {
624 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
627 /* Prepare arguments in R10 and R11 rather than RCX and RDX
628 because gv() will not ever use these */
629 static int arg_prepare_reg(int idx) {
630 if (idx == 0 || idx == 1)
631 /* idx=0: r10, idx=1: r11 */
632 return idx + 10;
633 else
634 return arg_regs[idx];
637 static int func_scratch;
639 /* Generate function call. The function address is pushed first, then
640 all the parameters in call order. This functions pops all the
641 parameters and the function address. */
643 void gen_offs_sp(int b, int r, int d)
645 orex(1,0,r & 0x100 ? 0 : r, b);
646 if (d == (char)d) {
647 o(0x2444 | (REG_VALUE(r) << 3));
648 g(d);
649 } else {
650 o(0x2484 | (REG_VALUE(r) << 3));
651 gen_le32(d);
655 /* Return the number of registers needed to return the struct, or 0 if
656 returning via struct pointer. */
657 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align)
659 int size, align;
660 *ret_align = 1; // Never have to re-align return values for x86-64
661 size = type_size(vt, &align);
662 ret->ref = NULL;
663 if (size > 8) {
664 return 0;
665 } else if (size > 4) {
666 ret->t = VT_LLONG;
667 return 1;
668 } else if (size > 2) {
669 ret->t = VT_INT;
670 return 1;
671 } else if (size > 1) {
672 ret->t = VT_SHORT;
673 return 1;
674 } else {
675 ret->t = VT_BYTE;
676 return 1;
680 static int is_sse_float(int t) {
681 int bt;
682 bt = t & VT_BTYPE;
683 return bt == VT_DOUBLE || bt == VT_FLOAT;
686 int gfunc_arg_size(CType *type) {
687 int align;
688 if (type->t & (VT_ARRAY|VT_BITFIELD))
689 return 8;
690 return type_size(type, &align);
693 void gfunc_call(int nb_args)
695 int size, r, args_size, i, d, bt, struct_size;
696 int arg;
698 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
699 arg = nb_args;
701 /* for struct arguments, we need to call memcpy and the function
702 call breaks register passing arguments we are preparing.
703 So, we process arguments which will be passed by stack first. */
704 struct_size = args_size;
705 for(i = 0; i < nb_args; i++) {
706 SValue *sv;
708 --arg;
709 sv = &vtop[-i];
710 bt = (sv->type.t & VT_BTYPE);
711 size = gfunc_arg_size(&sv->type);
713 if (size <= 8)
714 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
716 if (bt == VT_STRUCT) {
717 /* align to stack align size */
718 size = (size + 15) & ~15;
719 /* generate structure store */
720 r = get_reg(RC_INT);
721 gen_offs_sp(0x8d, r, struct_size);
722 struct_size += size;
724 /* generate memcpy call */
725 vset(&sv->type, r | VT_LVAL, 0);
726 vpushv(sv);
727 vstore();
728 --vtop;
729 } else if (bt == VT_LDOUBLE) {
730 gv(RC_ST0);
731 gen_offs_sp(0xdb, 0x107, struct_size);
732 struct_size += 16;
736 if (func_scratch < struct_size)
737 func_scratch = struct_size;
739 arg = nb_args;
740 struct_size = args_size;
742 for(i = 0; i < nb_args; i++) {
743 --arg;
744 bt = (vtop->type.t & VT_BTYPE);
746 size = gfunc_arg_size(&vtop->type);
747 if (size > 8) {
748 /* align to stack align size */
749 size = (size + 15) & ~15;
750 if (arg >= REGN) {
751 d = get_reg(RC_INT);
752 gen_offs_sp(0x8d, d, struct_size);
753 gen_offs_sp(0x89, d, arg*8);
754 } else {
755 d = arg_prepare_reg(arg);
756 gen_offs_sp(0x8d, d, struct_size);
758 struct_size += size;
759 } else {
760 if (is_sse_float(vtop->type.t)) {
761 gv(RC_XMM0); /* only use one float register */
762 if (arg >= REGN) {
763 /* movq %xmm0, j*8(%rsp) */
764 gen_offs_sp(0xd60f66, 0x100, arg*8);
765 } else {
766 /* movaps %xmm0, %xmmN */
767 o(0x280f);
768 o(0xc0 + (arg << 3));
769 d = arg_prepare_reg(arg);
770 /* mov %xmm0, %rxx */
771 o(0x66);
772 orex(1,d,0, 0x7e0f);
773 o(0xc0 + REG_VALUE(d));
775 } else {
776 if (bt == VT_STRUCT) {
777 vtop->type.ref = NULL;
778 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
779 : size > 1 ? VT_SHORT : VT_BYTE;
782 r = gv(RC_INT);
783 if (arg >= REGN) {
784 gen_offs_sp(0x89, r, arg*8);
785 } else {
786 d = arg_prepare_reg(arg);
787 orex(1,d,r,0x89); /* mov */
788 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
792 vtop--;
794 save_regs(0);
796 /* Copy R10 and R11 into RCX and RDX, respectively */
797 if (nb_args > 0) {
798 o(0xd1894c); /* mov %r10, %rcx */
799 if (nb_args > 1) {
800 o(0xda894c); /* mov %r11, %rdx */
804 gcall_or_jmp(0);
805 vtop--;
809 #define FUNC_PROLOG_SIZE 11
811 /* generate function prolog of type 't' */
812 void gfunc_prolog(CType *func_type)
814 int addr, reg_param_index, bt, size;
815 Sym *sym;
816 CType *type;
818 func_ret_sub = 0;
819 func_scratch = 0;
820 pop_stack = loc = 0;
822 addr = PTR_SIZE * 2;
823 ind += FUNC_PROLOG_SIZE;
824 func_sub_sp_offset = ind;
825 reg_param_index = 0;
827 sym = func_type->ref;
829 /* if the function returns a structure, then add an
830 implicit pointer parameter */
831 func_vt = sym->type;
832 func_var = (sym->c == FUNC_ELLIPSIS);
833 size = gfunc_arg_size(&func_vt);
834 if (size > 8) {
835 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
836 func_vc = addr;
837 reg_param_index++;
838 addr += 8;
841 /* define parameters */
842 while ((sym = sym->next) != NULL) {
843 type = &sym->type;
844 bt = type->t & VT_BTYPE;
845 size = gfunc_arg_size(type);
846 if (size > 8) {
847 if (reg_param_index < REGN) {
848 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
850 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL | VT_REF, addr);
851 } else {
852 if (reg_param_index < REGN) {
853 /* save arguments passed by register */
854 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
855 o(0xd60f66); /* movq */
856 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
857 } else {
858 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
861 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
863 addr += 8;
864 reg_param_index++;
867 while (reg_param_index < REGN) {
868 if (func_type->ref->c == FUNC_ELLIPSIS) {
869 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
870 addr += 8;
872 reg_param_index++;
876 /* generate function epilog */
877 void gfunc_epilog(void)
879 int v, saved_ind;
881 o(0xc9); /* leave */
882 if (func_ret_sub == 0) {
883 o(0xc3); /* ret */
884 } else {
885 o(0xc2); /* ret n */
886 g(func_ret_sub);
887 g(func_ret_sub >> 8);
890 saved_ind = ind;
891 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
892 /* align local size to word & save local variables */
893 v = (func_scratch + -loc + 15) & -16;
895 if (v >= 4096) {
896 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
897 oad(0xb8, v); /* mov stacksize, %eax */
898 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
899 greloc(cur_text_section, sym, ind-4, R_X86_64_PC32);
900 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
901 } else {
902 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
903 o(0xec8148); /* sub rsp, stacksize */
904 gen_le32(v);
907 cur_text_section->data_offset = saved_ind;
908 pe_add_unwind_data(ind, saved_ind, v);
909 ind = cur_text_section->data_offset;
912 #else
914 static void gadd_sp(int val)
916 if (val == (char)val) {
917 o(0xc48348);
918 g(val);
919 } else {
920 oad(0xc48148, val); /* add $xxx, %rsp */
924 typedef enum X86_64_Mode {
925 x86_64_mode_none,
926 x86_64_mode_memory,
927 x86_64_mode_integer,
928 x86_64_mode_sse,
929 x86_64_mode_x87
930 } X86_64_Mode;
932 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
934 if (a == b)
935 return a;
936 else if (a == x86_64_mode_none)
937 return b;
938 else if (b == x86_64_mode_none)
939 return a;
940 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
941 return x86_64_mode_memory;
942 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
943 return x86_64_mode_integer;
944 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
945 return x86_64_mode_memory;
946 else
947 return x86_64_mode_sse;
950 static X86_64_Mode classify_x86_64_inner(CType *ty)
952 X86_64_Mode mode;
953 Sym *f;
955 switch (ty->t & VT_BTYPE) {
956 case VT_VOID: return x86_64_mode_none;
958 case VT_INT:
959 case VT_BYTE:
960 case VT_SHORT:
961 case VT_LLONG:
962 case VT_QLONG:
963 case VT_BOOL:
964 case VT_PTR:
965 case VT_FUNC:
966 case VT_ENUM: return x86_64_mode_integer;
968 case VT_FLOAT:
969 case VT_QFLOAT:
970 case VT_DOUBLE: return x86_64_mode_sse;
972 case VT_LDOUBLE: return x86_64_mode_x87;
974 case VT_STRUCT:
975 f = ty->ref;
977 // Detect union
978 if (f->next && (f->c == f->next->c))
979 return x86_64_mode_memory;
981 mode = x86_64_mode_none;
982 for (f = f->next; f; f = f->next)
983 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
985 return mode;
988 assert(0);
991 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
993 X86_64_Mode mode;
994 int size, align, ret_t = 0;
996 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
997 *psize = 8;
998 *palign = 8;
999 *reg_count = 1;
1000 ret_t = ty->t;
1001 mode = x86_64_mode_integer;
1002 } else {
1003 size = type_size(ty, &align);
1004 *psize = (size + 7) & ~7;
1005 *palign = (align + 7) & ~7;
1007 if (size > 16) {
1008 mode = x86_64_mode_memory;
1009 ret_t = ty->t;
1010 } else {
1011 mode = classify_x86_64_inner(ty);
1012 switch (mode) {
1013 case x86_64_mode_integer:
1014 if (size > 8) {
1015 *reg_count = 2;
1016 ret_t = VT_QLONG;
1017 } else {
1018 *reg_count = 1;
1019 if(size > 4)
1020 ret_t = VT_LLONG;
1021 else if(size > 2){
1022 ret_t = VT_INT;
1023 }else if(size > 1)
1024 ret_t = VT_SHORT;
1025 else
1026 ret_t = VT_BYTE;
1028 ret_t |= (ty->t & VT_UNSIGNED);
1029 break;
1030 case x86_64_mode_x87:
1031 *reg_count = 1;
1032 ret_t = VT_LDOUBLE;
1033 break;
1034 case x86_64_mode_sse:
1035 if (size > 8) {
1036 *reg_count = 2;
1037 ret_t = VT_QFLOAT;
1038 } else {
1039 *reg_count = 1;
1040 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1042 break;
1043 default:
1044 ret_t = ty->t;
1045 break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1050 if (ret) {
1051 ret->ref = ty->ref;
1052 ret->t = ret_t;
1055 return mode;
1058 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1060 /* This definition must be synced with stdarg.h */
1061 enum __va_arg_type {
1062 __va_gen_reg, __va_float_reg, __va_ld_reg, __va_stack
1064 int size, align, reg_count;
1065 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1066 switch (mode) {
1067 default: return __va_stack;
1068 case x86_64_mode_x87: return __va_ld_reg;
1069 case x86_64_mode_integer: return __va_gen_reg;
1070 case x86_64_mode_sse: return __va_float_reg;
1074 /* Return the number of registers needed to return the struct, or 0 if
1075 returning via struct pointer. */
1076 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align)
1078 int size, align, reg_count;
1079 *ret_align = 1; // Never have to re-align return values for x86-64
1080 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1083 #define REGN 6
1084 static const uint8_t arg_regs[REGN] = {
1085 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1088 /* Generate function call. The function address is pushed first, then
1089 all the parameters in call order. This functions pops all the
1090 parameters and the function address. */
1091 void gfunc_call(int nb_args)
1093 X86_64_Mode mode;
1094 CType type;
1095 int size, align, r, args_size, stack_adjust, run_start, run_end, i, reg_count;
1096 int nb_reg_args = 0;
1097 int nb_sse_args = 0;
1098 int sse_reg, gen_reg;
1100 /* fetch cpu flag before the following sub will change the value */
1101 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1102 gv(RC_INT);
1103 /* calculate the number of integer/float register arguments */
1104 for(i = 0; i < nb_args; i++) {
1105 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1106 if (mode == x86_64_mode_sse)
1107 nb_sse_args += reg_count;
1108 else if (mode == x86_64_mode_integer)
1109 nb_reg_args += reg_count;
1112 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1113 and ended by a 16-byte aligned argument. This is because, from the point of view of
1114 the callee, argument alignment is computed from the bottom up. */
1115 /* for struct arguments, we need to call memcpy and the function
1116 call breaks register passing arguments we are preparing.
1117 So, we process arguments which will be passed by stack first. */
1118 gen_reg = nb_reg_args;
1119 sse_reg = nb_sse_args;
1120 run_start = 0;
1121 args_size = 0;
1122 while (run_start != nb_args) {
1123 int run_gen_reg = gen_reg, run_sse_reg = sse_reg;
1125 run_end = nb_args;
1126 stack_adjust = 0;
1127 for(i = run_start; (i < nb_args) && (run_end == nb_args); i++) {
1128 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1129 switch (mode) {
1130 case x86_64_mode_memory:
1131 case x86_64_mode_x87:
1132 stack_arg:
1133 if (align == 16)
1134 run_end = i;
1135 else
1136 stack_adjust += size;
1137 break;
1139 case x86_64_mode_sse:
1140 sse_reg -= reg_count;
1141 if (sse_reg + reg_count > 8) goto stack_arg;
1142 break;
1144 case x86_64_mode_integer:
1145 gen_reg -= reg_count;
1146 if (gen_reg + reg_count > REGN) goto stack_arg;
1147 break;
1148 default: break; /* nothing to be done for x86_64_mode_none */
1152 gen_reg = run_gen_reg;
1153 sse_reg = run_sse_reg;
1155 /* adjust stack to align SSE boundary */
1156 if (stack_adjust &= 15) {
1157 /* fetch cpu flag before the following sub will change the value */
1158 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1159 gv(RC_INT);
1161 stack_adjust = 16 - stack_adjust;
1162 o(0x48);
1163 oad(0xec81, stack_adjust); /* sub $xxx, %rsp */
1164 args_size += stack_adjust;
1167 for(i = run_start; i < run_end;) {
1168 /* Swap argument to top, it will possibly be changed here,
1169 and might use more temps. At the end of the loop we keep
1170 in on the stack and swap it back to its original position
1171 if it is a register. */
1172 SValue tmp = vtop[0];
1173 vtop[0] = vtop[-i];
1174 vtop[-i] = tmp;
1176 mode = classify_x86_64_arg(&vtop->type, NULL, &size, &align, &reg_count);
1178 int arg_stored = 1;
1179 switch (vtop->type.t & VT_BTYPE) {
1180 case VT_STRUCT:
1181 if (mode == x86_64_mode_sse) {
1182 if (sse_reg > 8)
1183 sse_reg -= reg_count;
1184 else
1185 arg_stored = 0;
1186 } else if (mode == x86_64_mode_integer) {
1187 if (gen_reg > REGN)
1188 gen_reg -= reg_count;
1189 else
1190 arg_stored = 0;
1193 if (arg_stored) {
1194 /* allocate the necessary size on stack */
1195 o(0x48);
1196 oad(0xec81, size); /* sub $xxx, %rsp */
1197 /* generate structure store */
1198 r = get_reg(RC_INT);
1199 orex(1, r, 0, 0x89); /* mov %rsp, r */
1200 o(0xe0 + REG_VALUE(r));
1201 vset(&vtop->type, r | VT_LVAL, 0);
1202 vswap();
1203 vstore();
1204 args_size += size;
1206 break;
1208 case VT_LDOUBLE:
1209 assert(0);
1210 break;
1212 case VT_FLOAT:
1213 case VT_DOUBLE:
1214 assert(mode == x86_64_mode_sse);
1215 if (sse_reg > 8) {
1216 --sse_reg;
1217 r = gv(RC_FLOAT);
1218 o(0x50); /* push $rax */
1219 /* movq %xmmN, (%rsp) */
1220 o(0xd60f66);
1221 o(0x04 + REG_VALUE(r)*8);
1222 o(0x24);
1223 args_size += size;
1224 } else {
1225 arg_stored = 0;
1227 break;
1229 default:
1230 assert(mode == x86_64_mode_integer);
1231 /* simple type */
1232 /* XXX: implicit cast ? */
1233 if (gen_reg > REGN) {
1234 --gen_reg;
1235 r = gv(RC_INT);
1236 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1237 args_size += size;
1238 } else {
1239 arg_stored = 0;
1241 break;
1244 /* And swap the argument back to it's original position. */
1245 tmp = vtop[0];
1246 vtop[0] = vtop[-i];
1247 vtop[-i] = tmp;
1249 if (arg_stored) {
1250 vrotb(i+1);
1251 assert((vtop->type.t == tmp.type.t) && (vtop->r == tmp.r));
1252 vpop();
1253 --nb_args;
1254 --run_end;
1255 } else {
1256 ++i;
1260 /* handle 16 byte aligned arguments at end of run */
1261 run_start = i = run_end;
1262 while (i < nb_args) {
1263 /* Rotate argument to top since it will always be popped */
1264 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1265 if (align != 16)
1266 break;
1268 vrotb(i+1);
1270 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1271 gv(RC_ST0);
1272 oad(0xec8148, size); /* sub $xxx, %rsp */
1273 o(0x7cdb); /* fstpt 0(%rsp) */
1274 g(0x24);
1275 g(0x00);
1276 args_size += size;
1277 } else {
1278 //assert(mode == x86_64_mode_memory);
1280 /* allocate the necessary size on stack */
1281 o(0x48);
1282 oad(0xec81, size); /* sub $xxx, %rsp */
1283 /* generate structure store */
1284 r = get_reg(RC_INT);
1285 orex(1, r, 0, 0x89); /* mov %rsp, r */
1286 o(0xe0 + REG_VALUE(r));
1287 vset(&vtop->type, r | VT_LVAL, 0);
1288 vswap();
1289 vstore();
1290 args_size += size;
1293 vpop();
1294 --nb_args;
1298 /* XXX This should be superfluous. */
1299 // save_regs(0); /* save used temporary registers */
1301 /* then, we prepare register passing arguments.
1302 Note that we cannot set RDX and RCX in this loop because gv()
1303 may break these temporary registers. Let's use R10 and R11
1304 instead of them */
1305 assert(gen_reg <= REGN);
1306 assert(sse_reg <= 8);
1307 for(i = 0; i < nb_args; i++) {
1308 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1309 /* Alter stack entry type so that gv() knows how to treat it */
1310 vtop->type = type;
1311 if (mode == x86_64_mode_sse) {
1312 sse_reg -= reg_count;
1313 if (sse_reg + reg_count <= 8) {
1314 if (reg_count == 2) {
1315 ex_rc = RC_XMM0 << (sse_reg + 1);
1316 gv(RC_XMM0 << sse_reg);
1317 }else{
1318 assert(reg_count == 1);
1319 /* Load directly to register */
1320 gv(RC_XMM0 << sse_reg);
1323 } else if (mode == x86_64_mode_integer) {
1324 /* simple type */
1325 /* XXX: implicit cast ? */
1326 int d;
1327 gen_reg -= reg_count;
1328 if (gen_reg + reg_count <= REGN) {
1329 if (reg_count == 2) {
1330 d = arg_regs[gen_reg+1];
1331 ex_rc = reg_classes[d] & ~RC_MASK;
1332 d = arg_regs[gen_reg];
1333 gv(reg_classes[d] & ~RC_MASK);
1334 }else{
1335 assert(reg_count == 1);
1336 d = arg_regs[gen_reg];
1337 gv(reg_classes[d] & ~RC_MASK);
1341 vtop--;
1343 assert(gen_reg == 0);
1344 assert(sse_reg == 0);
1346 /* We shouldn't have many operands on the stack anymore, but the
1347 call address itself is still there, and it might be in %eax
1348 (or edx/ecx) currently, which the below writes would clobber.
1349 So evict all remaining operands here. */
1350 save_regs(0);
1351 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1352 gcall_or_jmp(0);
1353 if (args_size)
1354 gadd_sp(args_size);
1355 vtop--;
1359 #define FUNC_PROLOG_SIZE 11
1361 static void push_arg_reg(int i) {
1362 loc -= 8;
1363 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1366 /* generate function prolog of type 't' */
1367 void gfunc_prolog(CType *func_type)
1369 X86_64_Mode mode;
1370 int i, addr, align, size, reg_count;
1371 int param_addr = 0, reg_param_index, sse_param_index;
1372 Sym *sym;
1373 CType *type;
1375 sym = func_type->ref;
1376 addr = PTR_SIZE * 2;
1377 pop_stack = loc = 0;
1378 ind += FUNC_PROLOG_SIZE;
1379 func_sub_sp_offset = ind;
1380 func_ret_sub = 0;
1382 if (func_type->ref->c == FUNC_ELLIPSIS) {
1383 int seen_reg_num, seen_sse_num, seen_stack_size;
1384 seen_reg_num = seen_sse_num = 0;
1385 /* frame pointer and return address */
1386 seen_stack_size = PTR_SIZE * 2;
1387 /* count the number of seen parameters */
1388 while ((sym = sym->next) != NULL) {
1389 type = &sym->type;
1390 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1391 switch (mode) {
1392 default:
1393 stack_arg:
1394 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1395 break;
1397 case x86_64_mode_integer:
1398 if (seen_reg_num + reg_count <= 8) {
1399 seen_reg_num += reg_count;
1400 } else {
1401 seen_reg_num = 8;
1402 goto stack_arg;
1404 break;
1406 case x86_64_mode_sse:
1407 if (seen_sse_num + reg_count <= 8) {
1408 seen_sse_num += reg_count;
1409 } else {
1410 seen_sse_num = 8;
1411 goto stack_arg;
1413 break;
1417 loc -= 16;
1418 /* movl $0x????????, -0x10(%rbp) */
1419 o(0xf045c7);
1420 gen_le32(seen_reg_num * 8);
1421 /* movl $0x????????, -0xc(%rbp) */
1422 o(0xf445c7);
1423 gen_le32(seen_sse_num * 16 + 48);
1424 /* movl $0x????????, -0x8(%rbp) */
1425 o(0xf845c7);
1426 gen_le32(seen_stack_size);
1428 o(0xc084);/* test %al,%al */
1429 o(0x74);/* je */
1430 g(4*(8 - seen_sse_num) + 3);
1432 /* save all register passing arguments */
1433 for (i = 0; i < 8; i++) {
1434 loc -= 16;
1435 o(0x290f);/* movaps %xmm1-7,-XXX(%rbp) */
1436 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1438 for (i = 0; i < (REGN - seen_reg_num); i++) {
1439 push_arg_reg(REGN-1 - i);
1443 sym = func_type->ref;
1444 reg_param_index = 0;
1445 sse_param_index = 0;
1447 /* if the function returns a structure, then add an
1448 implicit pointer parameter */
1449 func_vt = sym->type;
1450 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1451 if (mode == x86_64_mode_memory) {
1452 push_arg_reg(reg_param_index);
1453 func_vc = loc;
1454 reg_param_index++;
1456 /* define parameters */
1457 while ((sym = sym->next) != NULL) {
1458 type = &sym->type;
1459 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1460 switch (mode) {
1461 case x86_64_mode_sse:
1462 if (sse_param_index + reg_count <= 8) {
1463 /* save arguments passed by register */
1464 loc -= reg_count * 8;
1465 param_addr = loc;
1466 for (i = 0; i < reg_count; ++i) {
1467 o(0xd60f66); /* movq */
1468 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1469 ++sse_param_index;
1471 } else {
1472 addr = (addr + align - 1) & -align;
1473 param_addr = addr;
1474 addr += size;
1475 sse_param_index += reg_count;
1477 break;
1479 case x86_64_mode_memory:
1480 case x86_64_mode_x87:
1481 addr = (addr + align - 1) & -align;
1482 param_addr = addr;
1483 addr += size;
1484 break;
1486 case x86_64_mode_integer: {
1487 if (reg_param_index + reg_count <= REGN) {
1488 /* save arguments passed by register */
1489 loc -= reg_count * 8;
1490 param_addr = loc;
1491 for (i = 0; i < reg_count; ++i) {
1492 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1493 ++reg_param_index;
1495 } else {
1496 addr = (addr + align - 1) & -align;
1497 param_addr = addr;
1498 addr += size;
1499 reg_param_index += reg_count;
1501 break;
1503 default: break; /* nothing to be done for x86_64_mode_none */
1505 sym_push(sym->v & ~SYM_FIELD, type,
1506 VT_LOCAL | VT_LVAL, param_addr);
1510 /* generate function epilog */
1511 void gfunc_epilog(void)
1513 int v, saved_ind;
1515 o(0xc9); /* leave */
1516 if (func_ret_sub == 0) {
1517 o(0xc3); /* ret */
1518 } else {
1519 o(0xc2); /* ret n */
1520 g(func_ret_sub);
1521 g(func_ret_sub >> 8);
1523 /* align local size to word & save local variables */
1524 v = (-loc + 15) & -16;
1525 saved_ind = ind;
1526 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1527 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1528 o(0xec8148); /* sub rsp, stacksize */
1529 gen_le32(v);
1530 ind = saved_ind;
1533 #endif /* not PE */
1535 /* generate a jump to a label */
1536 int gjmp(int t)
1538 return psym(0xe9, t);
1541 /* generate a jump to a fixed address */
1542 void gjmp_addr(int a)
1544 int r;
1545 r = a - ind - 2;
1546 if (r == (char)r) {
1547 g(0xeb);
1548 g(r);
1549 } else {
1550 oad(0xe9, a - ind - 5);
1554 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1555 int gtst(int inv, int t)
1557 int v, *p;
1559 v = vtop->r & VT_VALMASK;
1560 if (v == VT_CMP) {
1561 /* fast case : can jump directly since flags are set */
1562 if (vtop->c.i & 0x100)
1564 /* This was a float compare. If the parity flag is set
1565 the result was unordered. For anything except != this
1566 means false and we don't jump (anding both conditions).
1567 For != this means true (oring both).
1568 Take care about inverting the test. We need to jump
1569 to our target if the result was unordered and test wasn't NE,
1570 otherwise if unordered we don't want to jump. */
1571 vtop->c.i &= ~0x100;
1572 if (!inv == (vtop->c.i != TOK_NE))
1573 o(0x067a); /* jp +6 */
1574 else
1576 g(0x0f);
1577 t = psym(0x8a, t); /* jp t */
1580 g(0x0f);
1581 t = psym((vtop->c.i - 16) ^ inv, t);
1582 } else if (v == VT_JMP || v == VT_JMPI) {
1583 /* && or || optimization */
1584 if ((v & 1) == inv) {
1585 /* insert vtop->c jump list in t */
1586 p = &vtop->c.i;
1587 while (*p != 0)
1588 p = (int *)(cur_text_section->data + *p);
1589 *p = t;
1590 t = vtop->c.i;
1591 } else {
1592 t = gjmp(t);
1593 gsym(vtop->c.i);
1595 } else {
1596 if (is_float(vtop->type.t) ||
1597 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
1598 vpushi(0);
1599 gen_op(TOK_NE);
1601 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
1602 /* constant jmp optimization */
1603 if ((vtop->c.i != 0) != inv)
1604 t = gjmp(t);
1605 } else {
1606 v = gv(RC_INT);
1607 orex(0,v,v,0x85);
1608 o(0xc0 + REG_VALUE(v) * 9);
1609 g(0x0f);
1610 t = psym(0x85 ^ inv, t);
1613 vtop--;
1614 return t;
1617 /* generate an integer binary operation */
1618 void gen_opi(int op)
1620 int r, fr, opc, fc, c, ll, uu, cc, tt2;
1622 fr = vtop[0].r;
1623 fc = vtop->c.ul;
1624 ll = is64_type(vtop[-1].type.t);
1625 cc = (fr & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1626 tt2 = (fr & (VT_LVAL | VT_LVAL_TYPE)) == VT_LVAL;
1628 switch(op) {
1629 case '+':
1630 case TOK_ADDC1: /* add with carry generation */
1631 opc = 0;
1632 gen_op8:
1633 vswap();
1634 r = gv(RC_INT);
1635 vswap();
1636 if (cc && (!ll || (int)vtop->c.ll == vtop->c.ll)) {
1637 /* constant case */
1638 c = vtop->c.i;
1639 if (c == (char)c) {
1640 /* XXX: generate inc and dec for smaller code ? */
1641 orex(ll, r, 0, 0x83);
1642 o(0xc0 + REG_VALUE(r) + opc*8);
1643 g(c);
1644 } else {
1645 orex(ll, r, 0, 0x81);
1646 oad(0xc0 + REG_VALUE(r) + opc*8, c);
1648 } else {
1649 if(!tt2)
1650 fr = gv(RC_INT);
1651 orex(ll, fr, r, 0x03 + opc*8);
1652 if(fr >= VT_CONST)
1653 gen_modrm(r, fr, vtop->sym, fc);
1654 else
1655 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r)*8);
1657 vtop--;
1658 if (op >= TOK_ULT && op <= TOK_GT) {
1659 vtop->r = VT_CMP;
1660 vtop->c.i = op;
1662 break;
1663 case '-':
1664 case TOK_SUBC1: /* sub with carry generation */
1665 opc = 5;
1666 goto gen_op8;
1667 case TOK_ADDC2: /* add with carry use */
1668 opc = 2;
1669 goto gen_op8;
1670 case TOK_SUBC2: /* sub with carry use */
1671 opc = 3;
1672 goto gen_op8;
1673 case '&':
1674 opc = 4;
1675 goto gen_op8;
1676 case '^':
1677 opc = 6;
1678 goto gen_op8;
1679 case '|':
1680 opc = 1;
1681 goto gen_op8;
1682 case '*':
1683 opc = 5;
1684 vswap();
1685 r = gv(RC_INT);
1686 vswap();
1687 if(!tt2)
1688 fr = gv(RC_INT);
1689 if(r == TREG_RAX){
1690 if(fr != TREG_RDX)
1691 save_reg(TREG_RDX);
1692 orex(ll, fr, r, 0xf7);
1693 if(fr >= VT_CONST)
1694 gen_modrm(opc, fr, vtop->sym, fc);
1695 else
1696 o(0xc0 + REG_VALUE(fr) + opc*8);
1697 }else{
1698 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1699 if(fr >= VT_CONST)
1700 gen_modrm(r, fr, vtop->sym, fc);
1701 else
1702 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r)*8);
1704 vtop--;
1705 break;
1706 case TOK_SHL:
1707 opc = 4;
1708 goto gen_shift;
1709 case TOK_SHR:
1710 opc = 5;
1711 goto gen_shift;
1712 case TOK_SAR:
1713 opc = 7;
1714 gen_shift:
1715 if (cc) {
1716 /* constant case */
1717 vswap();
1718 r = gv(RC_INT);
1719 vswap();
1720 c = vtop->c.i;
1721 if(c == 1){
1722 orex(ll, r, 0, 0xd1);
1723 o(0xc0 + REG_VALUE(r) + opc*8);
1724 }else{
1725 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1726 o(0xc0 + REG_VALUE(r) + opc*8);
1727 g(c & (ll ? 0x3f : 0x1f));
1729 } else {
1730 /* we generate the shift in ecx */
1731 gv2(RC_INT, RC_RCX);
1732 r = vtop[-1].r;
1733 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1734 o(0xc0 + REG_VALUE(r) + opc*8);
1736 vtop--;
1737 break;
1738 case TOK_UDIV:
1739 case TOK_UMOD:
1740 opc = 6;
1741 uu = 1;
1742 goto divmod;
1743 case '/':
1744 case '%':
1745 case TOK_PDIV:
1746 opc = 7;
1747 uu = 0;
1748 divmod:
1749 /* first operand must be in eax */
1750 /* XXX: need better constraint for second operand */
1751 if(!tt2){
1752 gv2(RC_RAX, RC_INT2);
1753 fr = vtop[0].r;
1754 }else{
1755 vswap();
1756 gv(RC_RAX);
1757 vswap();
1759 save_reg(TREG_RDX);
1760 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cdq RDX:RAX <- sign-extend of RAX. */
1761 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1762 if(fr >= VT_CONST)
1763 gen_modrm(opc, fr, vtop->sym, fc);
1764 else
1765 o(0xc0 + REG_VALUE(fr) + opc*8);
1766 if (op == '%' || op == TOK_UMOD)
1767 r = TREG_RDX;
1768 else
1769 r = TREG_RAX;
1770 vtop--;
1771 vtop->r = r;
1772 break;
1773 default:
1774 opc = 7;
1775 goto gen_op8;
1779 void gen_opl(int op)
1781 gen_opi(op);
1784 /* generate a floating point operation 'v = t1 op t2' instruction. The
1785 two operands are guaranted to have the same floating point type */
1786 /* XXX: need to use ST1 too */
1787 void gen_opf(int op)
1789 int a, ft, fc, swapped, fr, r;
1790 int float_type = (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1792 /* convert constants to memory references */
1793 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1794 vswap();
1795 gv(float_type);
1796 vswap();
1798 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1799 gv(float_type);
1801 swapped = 0;
1802 fc = vtop->c.ul;
1803 ft = vtop->type.t;
1805 if ((ft & VT_BTYPE) == VT_LDOUBLE) {
1806 /* swap the stack if needed so that t1 is the register and t2 is
1807 the memory reference */
1808 /* must put at least one value in the floating point register */
1809 if ((vtop[-1].r & VT_LVAL) && (vtop[0].r & VT_LVAL)) {
1810 vswap();
1811 gv(float_type);
1812 vswap();
1814 if (vtop[-1].r & VT_LVAL) {
1815 vswap();
1816 swapped = 1;
1818 if (op >= TOK_ULT && op <= TOK_GT) {
1819 /* load on stack second operand */
1820 load(TREG_ST0, vtop);
1821 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1822 if (op == TOK_GE || op == TOK_GT)
1823 swapped = !swapped;
1824 else if (op == TOK_EQ || op == TOK_NE)
1825 swapped = 0;
1826 if (swapped)
1827 o(0xc9d9); /* fxch %st(1) */
1828 if (op == TOK_EQ || op == TOK_NE)
1829 o(0xe9da); /* fucompp */
1830 else
1831 o(0xd9de); /* fcompp */
1832 o(0xe0df); /* fnstsw %ax */
1833 if (op == TOK_EQ) {
1834 o(0x45e480); /* and $0x45, %ah */
1835 o(0x40fC80); /* cmp $0x40, %ah */
1836 } else if (op == TOK_NE) {
1837 o(0x45e480); /* and $0x45, %ah */
1838 o(0x40f480); /* xor $0x40, %ah */
1839 op = TOK_NE;
1840 } else if (op == TOK_GE || op == TOK_LE) {
1841 o(0x05c4f6); /* test $0x05, %ah */
1842 op = TOK_EQ;
1843 } else {
1844 o(0x45c4f6); /* test $0x45, %ah */
1845 op = TOK_EQ;
1847 vtop--;
1848 vtop->r = VT_CMP;
1849 vtop->c.i = op;
1850 } else {
1851 /* no memory reference possible for long double operations */
1852 load(TREG_ST0, vtop);
1853 swapped = !swapped;
1854 switch(op) {
1855 default:
1856 case '+':
1857 a = 0;
1858 break;
1859 case '-':
1860 a = 4;
1861 if (swapped)
1862 a++;
1863 break;
1864 case '*':
1865 a = 1;
1866 break;
1867 case '/':
1868 a = 6;
1869 if (swapped)
1870 a++;
1871 break;
1873 o(0xde); /* fxxxp %st, %st(1) */
1874 o(0xc1 + (a << 3));
1875 vtop--;
1877 } else {
1878 vswap();
1879 gv(float_type);
1880 vswap();
1881 fr = vtop->r;
1882 r = vtop[-1].r;
1883 if (op >= TOK_ULT && op <= TOK_GT) {
1884 switch(op){
1885 case TOK_LE:
1886 op = TOK_ULE; /* setae */
1887 break;
1888 case TOK_LT:
1889 op = TOK_ULT;
1890 break;
1891 case TOK_GE:
1892 op = TOK_UGE;
1893 break;
1894 case TOK_GT:
1895 op = TOK_UGT; /* seta */
1896 break;
1898 assert(!(vtop[-1].r & VT_LVAL));
1899 if ((ft & VT_BTYPE) == VT_DOUBLE)
1900 o(0x66);
1901 o(0x2e0f); /* ucomisd */
1902 if(fr >= VT_CONST)
1903 gen_modrm(r, fr, vtop->sym, fc);
1904 else
1905 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r)*8);
1906 vtop--;
1907 vtop->r = VT_CMP;
1908 vtop->c.i = op | 0x100;
1909 } else {
1910 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1911 /* no memory reference possible for long double operations */
1912 switch(op) {
1913 default:
1914 case '+':
1915 a = 0;
1916 break;
1917 case '-':
1918 a = 4;
1919 break;
1920 case '*':
1921 a = 1;
1922 break;
1923 case '/':
1924 a = 6;
1925 break;
1927 assert((ft & VT_BTYPE) != VT_LDOUBLE);
1928 assert(!(vtop[-1].r & VT_LVAL));
1929 if ((ft & VT_BTYPE) == VT_DOUBLE) {
1930 o(0xf2);
1931 } else {
1932 o(0xf3);
1934 o(0x0f);
1935 o(0x58 + a);
1936 if(fr >= VT_CONST)
1937 gen_modrm(r, fr, vtop->sym, fc);
1938 else
1939 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r)*8);
1940 vtop--;
1945 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1946 and 'long long' cases. */
1947 void gen_cvt_itof(int t)
1949 int ft, bt, tbt, r;
1951 ft = vtop->type.t;
1952 bt = ft & VT_BTYPE;
1953 tbt = t & VT_BTYPE;
1954 r = gv(RC_INT);
1956 if (tbt == VT_LDOUBLE) {
1957 save_reg(TREG_ST0);
1958 if ((ft & VT_BTYPE) == VT_LLONG) {
1959 /* signed long long to float/double/long double (unsigned case
1960 is handled generically) */
1961 o(0x50 + REG_VALUE(r)); /* push r */
1962 o(0x242cdf); /* fildll (%rsp) */
1963 o(0x08c48348); /* add $8, %rsp */
1964 } else if ((ft & (VT_BTYPE | VT_UNSIGNED)) == (VT_INT | VT_UNSIGNED)) {
1965 /* unsigned int to float/double/long double */
1966 o(0x6a); /* push $0 */
1967 g(0x00);
1968 o(0x50 + REG_VALUE(r)); /* push r */
1969 o(0x242cdf); /* fildll (%rsp) */
1970 o(0x10c48348); /* add $16, %rsp */
1971 } else {
1972 /* int to float/double/long double */
1973 o(0x50 + REG_VALUE(r)); /* push r */
1974 o(0x2404db); /* fildl (%rsp) */
1975 o(0x08c48348); /* add $8, %rsp */
1977 vtop->r = TREG_ST0;
1978 } else {
1979 int r_xmm;
1980 r_xmm = get_reg(RC_FLOAT);
1981 o(0xf2 + (tbt == VT_FLOAT));
1982 if ((ft & (VT_BTYPE | VT_UNSIGNED)) == (VT_INT | VT_UNSIGNED) || bt == VT_LLONG) {
1983 o(0x48); /* REX */
1985 o(0x2a0f);
1986 o(0xc0 + REG_VALUE(r) + REG_VALUE(r_xmm)*8); /* cvtsi2sd or cvtsi2ss */
1987 vtop->r = r_xmm;
1991 /* convert from one floating point type to another */
1992 void gen_cvt_ftof(int t)
1994 int ft, bt, tbt, r;
1996 ft = vtop->type.t;
1997 bt = ft & VT_BTYPE;
1998 tbt = t & VT_BTYPE;
2000 if(bt == VT_LDOUBLE)
2001 r = get_reg(RC_FLOAT);
2002 else
2003 r = gv(RC_FLOAT);
2004 if (bt == VT_FLOAT) {
2005 if (tbt == VT_DOUBLE) {
2006 o(0x5a0f); /* cvtps2pd */
2007 o(0xc0 + REG_VALUE(r) + REG_VALUE(r) * 8);
2008 } else if (tbt == VT_LDOUBLE) {
2009 /* movss %xmm0-7,-0x10(%rsp) */
2010 o(0x110ff3);
2011 o(0xf02444 + REG_VALUE(r)*8);
2012 o(0xf02444d9); /* flds -0x10(%rsp) */
2013 vtop->r = TREG_ST0;
2015 } else if (bt == VT_DOUBLE) {
2016 if (tbt == VT_FLOAT) {
2017 o(0x5a0f66); /* cvtpd2ps */
2018 o(0xc0 + REG_VALUE(r) + REG_VALUE(r) * 8);
2019 } else if (tbt == VT_LDOUBLE) {
2020 /* movsd %xmm0-7,-0x10(%rsp) */
2021 o(0x110ff2);
2022 o(0xf02444 + REG_VALUE(r)*8);
2023 o(0xf02444dd); /* fldl -0x10(%rsp) */
2024 vtop->r = TREG_ST0;
2026 } else {
2027 gv(RC_ST0);
2028 if (tbt == VT_DOUBLE) {
2029 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2030 /* movsd -0x10(%rsp),%xmm0-7 */
2031 o(0x100ff2);
2032 o(0xf02444 + REG_VALUE(r)*8);
2033 vtop->r = r;
2034 } else if (tbt == VT_FLOAT) {
2035 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2036 /* movss -0x10(%rsp),%xmm0-7 */
2037 o(0x100ff3);
2038 o(0xf02444 + REG_VALUE(r)*8);
2039 vtop->r = r;
2044 /* convert fp to int 't' type */
2045 void gen_cvt_ftoi(int t)
2047 int ft, bt, ll, r, r_xmm;
2049 ft = vtop->type.t;
2050 bt = ft & VT_BTYPE;
2052 if (bt == VT_LDOUBLE) {
2053 gen_cvt_ftof(VT_DOUBLE);
2054 bt = VT_DOUBLE;
2056 r_xmm = gv(RC_FLOAT);
2057 if ((t & VT_BTYPE) == VT_INT)
2058 ll = 0;
2059 else
2060 ll = 1;
2061 r = get_reg(RC_INT);
2062 if (bt == VT_FLOAT) {
2063 o(0xf3);
2064 } else if (bt == VT_DOUBLE) {
2065 o(0xf2);
2066 } else {
2067 assert(0);
2069 orex(ll, r, r_xmm, 0x2c0f); /* cvttss2si or cvttsd2si */
2070 o(0xc0 + REG_VALUE(r_xmm) + (REG_VALUE(r) << 3));
2071 vtop->r = r;
2074 /* computed goto support */
2075 void ggoto(void)
2077 gcall_or_jmp(1);
2078 vtop--;
2081 /* Save the stack pointer onto the stack and return the location of its address */
2082 ST_FUNC void gen_vla_sp_save(int addr) {
2083 /* mov %rsp,addr(%rbp)*/
2084 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2087 /* Restore the SP from a location on the stack */
2088 ST_FUNC void gen_vla_sp_restore(int addr) {
2089 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2092 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2093 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2094 #ifdef TCC_TARGET_PE
2095 /* alloca does more than just adjust %rsp on Windows */
2096 vpush_global_sym(&func_old_type, TOK_alloca);
2097 vswap(); /* Move alloca ref past allocation size */
2098 gfunc_call(1);
2099 vset(type, REG_IRET, 0);
2100 #else
2101 int r;
2102 r = gv(RC_INT); /* allocation size */
2103 /* sub r,%rsp */
2104 o(0x2b48);
2105 o(0xe0 | REG_VALUE(r));
2106 /* We align to 16 bytes rather than align */
2107 /* and ~15, %rsp */
2108 o(0xf0e48348);
2109 /* mov %rsp, r */
2110 o(0x8948);
2111 o(0xe0 | REG_VALUE(r));
2112 vpop();
2113 vset(type, r, 0);
2114 #endif
2118 /* end of x86-64 code generator */
2119 /*************************************************************/
2120 #endif /* ! TARGET_DEFS_ONLY */
2121 /******************************************************/