update for x86_64-gen.c
[tinycc.git] / x86_64-gen.c
bloba671e8ebc06ffbdcbe25d68718ef2cfb9347592f
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 8
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
34 #define RC_RAX 0x0004
35 #define RC_RCX 0x0008
36 #define RC_RDX 0x0010
37 #define RC_ST0 0x0020 /* only for long double */
38 #define RC_R8 0x0040
39 #define RC_R9 0x0080
40 #define RC_XMM0 0x0100
41 #define RC_XMM1 0x0200
42 #define RC_XMM2 0x0400
43 #define RC_XMM3 0x0800
44 #define RC_XMM4 0x1000
45 #define RC_XMM5 0x2000
46 #define RC_XMM6 0x4000
47 #define RC_XMM7 0x8000
48 #define RC_RSI 0x10000
49 #define RC_RDI 0x20000
50 #define RC_INT1 0x40000 /* function_pointer */
51 #define RC_INT2 0x80000
52 #define RC_RBX 0x100000
53 #define RC_R10 0x200000
54 #define RC_R11 0x400000
55 #define RC_R12 0x800000
56 #define RC_R13 0x1000000
57 #define RC_R14 0x2000000
58 #define RC_R15 0x4000000
59 #define RC_IRET RC_RAX /* function return: integer register */
60 #define RC_LRET RC_RDX /* function return: second integer register */
61 #define RC_FRET RC_XMM0 /* function return: float register */
62 #define RC_QRET RC_XMM1 /* function return: second float register */
63 #define RC_MASK (RC_INT|RC_INT1|RC_INT2|RC_FLOAT)
65 /* pretty names for the registers */
66 enum {
67 TREG_RAX = 0,
68 TREG_RCX = 1,
69 TREG_RDX = 2,
70 TREG_RSP = 4,
71 TREG_ST0 = 5,
72 TREG_RSI = 6,
73 TREG_RDI = 7,
75 TREG_R8 = 8,
76 TREG_R9 = 9,
77 TREG_R10 = 10,
78 TREG_R11 = 11,
80 TREG_XMM0 = 16,
81 TREG_XMM1 = 17,
82 TREG_XMM2 = 18,
83 TREG_XMM3 = 19,
84 TREG_XMM4 = 20,
85 TREG_XMM5 = 21,
86 TREG_XMM6 = 22,
87 TREG_XMM7 = 23,
91 #define REX_BASE(reg) (((reg) >> 3) & 1)
92 #define REG_VALUE(reg) ((reg) & 7)
93 #define FLAG_GOT 0X01
95 /* return registers for function */
96 #define REG_IRET TREG_RAX /* single word int return register */
97 #define REG_LRET TREG_RDX /* second word return register (for long long) */
98 #define REG_FRET TREG_XMM0 /* float return register */
99 #define REG_QRET TREG_XMM1 /* second float return register */
101 /* defined if function parameters must be evaluated in reverse order */
102 #define INVERT_FUNC_PARAMS
104 /* pointer size, in bytes */
105 #define PTR_SIZE 8
107 /* long double size and alignment, in bytes */
108 #define LDOUBLE_SIZE 16
109 #define LDOUBLE_ALIGN 16
110 /* maximum alignment (for aligned attribute support) */
111 #define MAX_ALIGN 16
113 /******************************************************/
114 /* ELF defines */
116 #define EM_TCC_TARGET EM_X86_64
118 /* relocation type for 32 bit data relocation */
119 #define R_DATA_32 R_X86_64_32
120 #define R_DATA_PTR R_X86_64_64
121 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
122 #define R_COPY R_X86_64_COPY
124 #define ELF_START_ADDR 0x400000
125 #define ELF_PAGE_SIZE 0x200000
127 /******************************************************/
128 #else /* ! TARGET_DEFS_ONLY */
129 /******************************************************/
130 #include "tcc.h"
131 #include <assert.h>
133 ST_DATA const int reg_classes[NB_REGS] = {
134 /* eax */ RC_INT|RC_RAX|RC_INT2,
135 /* ecx */ RC_INT|RC_RCX|RC_INT2,
136 /* edx */ RC_INT|RC_RDX,
137 RC_INT|RC_INT1|RC_INT2|RC_RBX,
139 /* st0 */ RC_ST0,
140 RC_RSI|RC_INT2,
141 RC_RDI|RC_INT2,
142 RC_INT|RC_R8|RC_INT2,
143 RC_INT|RC_R9|RC_INT2,
144 RC_INT|RC_INT1|RC_INT2|RC_R10,
145 RC_INT|RC_INT1|RC_INT2|RC_R11,
146 RC_INT|RC_INT1|RC_INT2|RC_R12,
147 RC_INT|RC_INT1|RC_INT2|RC_R13,
148 RC_INT|RC_INT1|RC_INT2|RC_R14,
149 RC_INT|RC_INT1|RC_INT2|RC_R15,
150 /* xmm0 */ RC_FLOAT | RC_XMM0,
151 RC_FLOAT|RC_XMM1,
152 RC_FLOAT|RC_XMM2,
153 RC_FLOAT|RC_XMM3,
154 RC_FLOAT|RC_XMM4,
155 RC_FLOAT|RC_XMM5,
156 RC_FLOAT|RC_XMM6,
157 RC_FLOAT|RC_XMM7,
160 static unsigned long func_sub_sp_offset;
161 static int func_ret_sub;
163 /* XXX: make it faster ? */
164 void g(int c)
166 int ind1;
167 ind1 = ind + 1;
168 if (ind1 > cur_text_section->data_allocated)
169 section_realloc(cur_text_section, ind1);
170 cur_text_section->data[ind] = c;
171 ind = ind1;
174 void o(unsigned int c)
176 while (c) {
177 g(c);
178 c = c >> 8;
182 void gen_le16(int v)
184 g(v);
185 g(v >> 8);
188 void gen_le32(int c)
190 g(c);
191 g(c >> 8);
192 g(c >> 16);
193 g(c >> 24);
196 void gen_le64(int64_t c)
198 g(c);
199 g(c >> 8);
200 g(c >> 16);
201 g(c >> 24);
202 g(c >> 32);
203 g(c >> 40);
204 g(c >> 48);
205 g(c >> 56);
208 void orex(int ll, int r, int r2, int b)
210 if ((r & VT_VALMASK) >= VT_CONST)
211 r = 0;
212 if ((r2 & VT_VALMASK) >= VT_CONST)
213 r2 = 0;
214 if (ll || REX_BASE(r) || REX_BASE(r2))
215 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
216 o(b);
219 /* output a symbol and patch all calls to it */
220 void gsym_addr(int t, int a)
222 int n, *ptr;
223 while (t) {
224 ptr = (int *)(cur_text_section->data + t);
225 n = *ptr; /* next value */
226 *ptr = a - t - 4;
227 t = n;
231 void gsym(int t)
233 gsym_addr(t, ind);
236 /* psym is used to put an instruction with a data field which is a
237 reference to a symbol. It is in fact the same as oad ! */
238 #define psym oad
240 static int is64_type(int t)
242 return ((t & VT_BTYPE) == VT_PTR ||
243 (t & VT_BTYPE) == VT_FUNC ||
244 (t & VT_BTYPE) == VT_LLONG);
247 /* instruction + 4 bytes data. Return the address of the data */
248 ST_FUNC int oad(int c, int s)
250 int ind1;
252 o(c);
253 ind1 = ind + 4;
254 if (ind1 > cur_text_section->data_allocated)
255 section_realloc(cur_text_section, ind1);
256 *(int *)(cur_text_section->data + ind) = s;
257 s = ind;
258 ind = ind1;
259 return s;
262 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
264 if (r & VT_SYM)
265 greloc(cur_text_section, sym, ind, R_X86_64_32);
266 gen_le32(c);
269 /* output constant with relocation if 'r & VT_SYM' is true */
270 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
272 if (r & VT_SYM)
273 greloc(cur_text_section, sym, ind, R_X86_64_64);
274 gen_le64(c);
277 /* output constant with relocation if 'r & VT_SYM' is true */
278 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
280 if (r & VT_SYM)
281 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
282 gen_le32(c-4);
285 /* output got address with relocation */
286 static void gen_gotpcrel(int r, Sym *sym, int c)
288 #ifndef TCC_TARGET_PE
289 Section *sr;
290 ElfW(Rela) *rel;
291 greloc(cur_text_section, sym, ind, R_X86_64_GOTPCREL);
292 sr = cur_text_section->reloc;
293 rel = (ElfW(Rela) *)(sr->data + sr->data_offset - sizeof(ElfW(Rela)));
294 rel->r_addend = -4;
295 #else
296 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym->v, NULL), c, r,
297 cur_text_section->data[ind-3],
298 cur_text_section->data[ind-2],
299 cur_text_section->data[ind-1]
301 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
302 #endif
303 gen_le32(0);
304 if (c) {
305 /* we use add c, %xxx for displacement */
306 orex(1, r, 0, 0x81);
307 o(0xc0 + REG_VALUE(r));
308 gen_le32(c);
312 static void gen_modrm_impl(int op_reg, int fr, Sym *sym, int c, int flag)
314 int r = fr & VT_VALMASK;
315 op_reg = REG_VALUE(op_reg) << 3;
316 if (r == VT_CONST) {
317 /* constant memory reference */
318 o(0x05 | op_reg);
319 if (flag & FLAG_GOT) {
320 gen_gotpcrel(fr, sym, c);
321 } else {
322 gen_addrpc32(fr, sym, c);
324 } else if (r == VT_LOCAL) {
325 /* currently, we use only ebp as base */
326 if (c == (char)c) {
327 /* short reference */
328 o(0x45 | op_reg);
329 g(c);
330 } else {
331 oad(0x85 | op_reg, c);
333 } else if (c) {
334 if (c == (char)c) {
335 /* short reference */
336 g(0x40 | op_reg | REG_VALUE(fr));
337 if(r == TREG_RSP)
338 g(0x24);
339 g(c);
340 } else {
341 g(0x80 | op_reg | REG_VALUE(fr));
342 if(r == TREG_RSP)
343 g(0x24);
344 gen_le32(c);
346 } else {
347 g(0x00 | op_reg | REG_VALUE(fr));
348 if(r == TREG_RSP)
349 g(0x24);
353 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
354 opcode bits */
355 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
357 gen_modrm_impl(op_reg, r, sym, c, 0);
360 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
361 opcode bits */
362 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
364 int flag;
365 if((op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC))
366 flag = FLAG_GOT;
367 orex(1, r, op_reg, opcode);
368 gen_modrm_impl(op_reg, r, sym, c, flag);
372 /* load 'r' from value 'sv' */
373 void load(int r, SValue *sv)
375 int v, t, ft, fc, fr, ll;
376 SValue v1;
378 #ifdef TCC_TARGET_PE
379 SValue v2;
380 sv = pe_getimport(sv, &v2);
381 #endif
383 fr = sv->r;
384 ft = sv->type.t & ~VT_DEFSIGN;
385 fc = sv->c.ul;
386 ll = is64_type(ft);
388 #ifndef TCC_TARGET_PE
389 /* we use indirect access via got */
390 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
391 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
392 /* use the result register as a temporal register */
393 int tr;
394 if (is_float(ft)) {
395 /* we cannot use float registers as a temporal register */
396 tr = get_reg(RC_INT) | TREG_MEM;
397 }else{
398 tr = r | TREG_MEM;
400 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
401 /* load from the temporal register */
402 fr = tr | VT_LVAL;
404 #endif
406 v = fr & VT_VALMASK;
407 if (fr & VT_LVAL) {
408 if (v == VT_LLOCAL) {
409 v1.type.t = VT_PTR;
410 v1.r = VT_LOCAL | VT_LVAL;
411 v1.c.ul = fc;
412 fr = r;
413 if (!(reg_classes[fr] & RC_INT))
414 fr = get_reg(RC_INT);
415 load(fr, &v1);
416 fc = 0;
418 int b;
419 if ((ft & VT_BTYPE) == VT_FLOAT) {
420 b = 0x100ff3; /* movss */
421 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
422 b = 0x100ff2; /* movds */
423 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
424 b = 0xdb, r = 5; /* fldt */
425 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
426 b = 0xbe0f; /* movsbl */
427 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
428 b = 0xb60f; /* movzbl */
429 } else if ((ft & VT_TYPE) == VT_SHORT) {
430 b = 0xbf0f; /* movswl */
431 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
432 b = 0xb70f; /* movzwl */
433 } else {
434 assert(((ft & VT_BTYPE) == VT_INT) || ((ft & VT_BTYPE) == VT_LLONG)
435 || ((ft & VT_BTYPE) == VT_PTR) || ((ft & VT_BTYPE) == VT_ENUM)
436 || ((ft & VT_BTYPE) == VT_FUNC));
437 b = 0x8b;
439 orex(ll, fr, r, b);
440 gen_modrm(r, fr, sv->sym, fc);
441 } else {
442 if (v == VT_CONST) {
443 if (fr & VT_SYM) {
444 #ifdef TCC_TARGET_PE
445 orex(1,0,r,0x8d);
446 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
447 gen_addrpc32(fr, sv->sym, fc);
448 #else
449 if (sv->sym->type.t & VT_STATIC) {
450 orex(1,0,r,0x8d);
451 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
452 gen_addrpc32(fr, sv->sym, fc);
453 } else {
454 orex(1,0,r,0x8b);
455 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
456 gen_gotpcrel(r, sv->sym, fc);
458 #endif
459 } else {
460 orex(ll,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
461 if (ll)
462 gen_le64(sv->c.ull);
463 else
464 gen_le32(fc);
466 } else if (v == VT_LOCAL) {
467 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
468 gen_modrm(r, VT_LOCAL, sv->sym, fc);
469 } else if (v == VT_CMP) {
470 orex(0, r, 0, 0xb8 + REG_VALUE(r));
471 if ((fc & ~0x100) == TOK_NE){
472 gen_le32(1);/* mov $0, r */
473 }else{
474 gen_le32(0);/* mov $1, r */
476 if (fc & 0x100){
477 fc &= ~0x100;
478 /* This was a float compare. If the parity bit is
479 set the result was unordered, meaning false for everything
480 except TOK_NE, and true for TOK_NE. */
481 o(0x037a + (REX_BASE(r) << 8));/* jp 3*/
483 orex(0,r,0, 0x0f); /* setxx %br */
484 o(fc);
485 o(0xc0 + REG_VALUE(r));
486 } else if (v == VT_JMP || v == VT_JMPI) {
487 t = v & 1;
488 orex(0,r,0,0);
489 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
490 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
491 gsym(fc);
492 orex(0,r,0,0);
493 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
494 } else if (v != r) {
495 if (reg_classes[r] & RC_FLOAT) {
496 if(v == TREG_ST0){
497 /* gen_cvt_ftof(VT_DOUBLE); */
498 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
499 /* movsd -0x10(%rsp),%xmm0 */
500 o(0x100ff2);
501 o(0xf02444 + REG_VALUE(r)*8);
502 }else if(reg_classes[v] & RC_FLOAT){
503 o(0x7e0ff3);
504 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
505 }else
506 assert(0);
507 } else if (r == TREG_ST0) {
508 assert(reg_classes[v] & RC_FLOAT);
509 /* gen_cvt_ftof(VT_LDOUBLE); */
510 /* movsd %xmm0,-0x10(%rsp) */
511 o(0x110ff2);
512 o(0xf02444 + REG_VALUE(v)*8);
513 o(0xf02444dd); /* fldl -0x10(%rsp) */
514 } else {
515 if(fc){
516 orex(1,fr,r,0x8d); /* lea xxx(%ebp), r */
517 gen_modrm(r, fr, sv->sym, fc);
518 }else{
519 orex(ll,v,r, 0x8b);
520 o(0xc0 + REG_VALUE(v) + REG_VALUE(r) * 8); /* mov v, r */
527 /* store register 'r' in lvalue 'v' */
528 void store(int r, SValue *sv)
530 int fr, bt, ft, fc, ll, v;
532 #ifdef TCC_TARGET_PE
533 SValue v2;
534 sv = pe_getimport(sv, &v2);
535 #endif
536 ft = sv->type.t & ~VT_DEFSIGN;
537 fc = sv->c.ul;
538 fr = sv->r;
539 bt = ft & VT_BTYPE;
540 ll = is64_type(ft);
541 v = fr & VT_VALMASK;
543 //#ifndef TCC_TARGET_PE
544 /* we need to access the variable via got */
545 // if (fr == VT_CONST && (v->r & VT_SYM)) {
546 /* mov xx(%rip), %r11 */
547 // o(0x1d8b4c);
548 // gen_gotpcrel(TREG_R11, v->sym, v->c.ul);
549 //pic = is64_type(bt) ? 0x49 : 0x41;
550 // }
551 //#endif
553 /* XXX: incorrect if float reg to reg */
554 if (bt == VT_FLOAT) {
555 orex(0, fr, r, 0x110ff3); /* movss */
556 } else if (bt == VT_DOUBLE) {
557 orex(0, fr, r, 0x110ff2);/* movds */
558 } else if (bt == VT_LDOUBLE) {
559 o(0xc0d9); /* fld %st(0) */
560 orex(0, fr, r, 0xdb);/* fstpt */
561 r = 7;
562 } else {
563 if (bt == VT_SHORT)
564 o(0x66);
565 if (bt == VT_BYTE || bt == VT_BOOL)
566 orex(ll, fr, r, 0x88);
567 else{
568 orex(ll, fr, r, 0x89);
571 if (v == VT_CONST || v == VT_LOCAL || (fr & VT_LVAL)) {
572 gen_modrm(r, fr, sv->sym, fc);
573 } else if (v != r) {
574 /* XXX: don't we really come here? */
575 abort();
576 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8); /* mov r, fr */
580 /* 'is_jmp' is '1' if it is a jump */
581 static void gcall_or_jmp(int is_jmp)
583 int r;
584 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
585 /* constant case */
586 if (vtop->r & VT_SYM) {
587 /* relocation case */
588 greloc(cur_text_section, vtop->sym,
589 ind + 1, R_X86_64_PLT32);
590 } else {
591 /* put an empty PC32 relocation */
592 put_elf_reloc(symtab_section, cur_text_section,
593 ind + 1, R_X86_64_PC32, 0);
595 oad(0xe8 + is_jmp, vtop->c.ul - 4); /* call/jmp im */
596 } else {
597 /* otherwise, indirect call */
598 r = get_reg(RC_INT1);
599 load(r, vtop);
600 orex(0, r, 0, 0xff); /* REX call/jmp *r */
601 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
605 void struct_copy(SValue *d, SValue *s, SValue *c)
607 if(!c->c.i)
608 return;
609 save_reg(TREG_RCX);
610 load(TREG_RCX, c);
611 load(TREG_RDI, d);
612 load(TREG_RSI, s);
613 o(0xa4f3);// rep movsb
616 void gen_putz(SValue *d, int size)
618 if(!size)
619 return;
620 save_reg(TREG_RAX);
621 o(0xb0);
622 g(0x00);
623 save_reg(TREG_RCX);
624 o(0xb8 + REG_VALUE(TREG_RCX)); /* mov $xx, r */
625 gen_le32(size);
626 load(TREG_RDI, d);
627 o(0xaaf3);//rep stos
630 /* Generate function call. The function address is pushed first, then
631 all the parameters in call order. This functions pops all the
632 parameters and the function address. */
633 void gen_offs_sp(int b, int r, int off)
635 if(r & 0x100)
636 o(b);
637 else
638 orex(1, 0, r, b);
639 if(!off){
640 o(0x2404 | (REG_VALUE(r) << 3));
641 }else if (off == (char)off) {
642 o(0x2444 | (REG_VALUE(r) << 3));
643 g(off);
644 } else {
645 o(0x2484 | (REG_VALUE(r) << 3));
646 gen_le32(off);
650 static int func_scratch;
651 static int r_loc;
653 #ifdef TCC_TARGET_PE
655 #define REGN 4
656 static const uint8_t arg_regs[REGN] = {
657 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
660 /* Prepare arguments in R10 and R11 rather than RCX and RDX
661 because gv() will not ever use these */
662 static int arg_prepare_reg(int idx) {
663 if (idx == 0 || idx == 1)
664 /* idx=0: r10, idx=1: r11 */
665 return idx + 10;
666 else
667 return arg_regs[idx];
670 /* Return the number of registers needed to return the struct, or 0 if
671 returning via struct pointer. */
672 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align)
674 int size, align;
675 *ret_align = 1; // Never have to re-align return values for x86-64
676 size = type_size(vt, &align);
677 ret->ref = NULL;
678 if (size > 8) {
679 return 0;
680 } else if (size > 4) {
681 ret->t = VT_LLONG;
682 return 1;
683 } else if (size > 2) {
684 ret->t = VT_INT;
685 return 1;
686 } else if (size > 1) {
687 ret->t = VT_SHORT;
688 return 1;
689 } else {
690 ret->t = VT_BYTE;
691 return 1;
695 static int is_sse_float(int t) {
696 int bt;
697 bt = t & VT_BTYPE;
698 return bt == VT_DOUBLE || bt == VT_FLOAT;
701 int gfunc_arg_size(CType *type) {
702 int align;
703 if (type->t & (VT_ARRAY|VT_BITFIELD))
704 return 8;
705 return type_size(type, &align);
708 void gfunc_call(int nb_args)
710 int size, r, args_size, i, d, bt, struct_size;
711 int arg;
713 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
714 arg = nb_args;
716 /* for struct arguments, we need to call memcpy and the function
717 call breaks register passing arguments we are preparing.
718 So, we process arguments which will be passed by stack first. */
719 struct_size = args_size;
720 for(i = 0; i < nb_args; i++) {
721 SValue *sv;
723 --arg;
724 sv = &vtop[-i];
725 bt = (sv->type.t & VT_BTYPE);
726 size = gfunc_arg_size(&sv->type);
728 if (size <= 8)
729 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
731 if (bt == VT_STRUCT) {
732 /* align to stack align size */
733 size = (size + 15) & ~15;
734 /* generate structure store */
735 r = get_reg(RC_INT);
736 gen_offs_sp(0x8d, r, struct_size);
737 struct_size += size;
739 /* generate memcpy call */
740 vset(&sv->type, r | VT_LVAL, 0);
741 vpushv(sv);
742 vstore();
743 --vtop;
744 } else if (bt == VT_LDOUBLE) {
745 gv(RC_ST0);
746 gen_offs_sp(0xdb, 0x107, struct_size);
747 struct_size += 16;
751 if (func_scratch < struct_size)
752 func_scratch = struct_size;
754 arg = nb_args;
755 struct_size = args_size;
757 for(i = 0; i < nb_args; i++) {
758 --arg;
759 bt = (vtop->type.t & VT_BTYPE);
761 size = gfunc_arg_size(&vtop->type);
762 if (size > 8) {
763 /* align to stack align size */
764 size = (size + 15) & ~15;
765 if (arg >= REGN) {
766 d = get_reg(RC_INT);
767 gen_offs_sp(0x8d, d, struct_size);
768 gen_offs_sp(0x89, d, arg*8);
769 } else {
770 d = arg_prepare_reg(arg);
771 gen_offs_sp(0x8d, d, struct_size);
773 struct_size += size;
774 } else {
775 if (is_sse_float(vtop->type.t)) {
776 gv(RC_XMM0); /* only use one float register */
777 if (arg >= REGN) {
778 /* movq %xmm0, j*8(%rsp) */
779 gen_offs_sp(0xd60f66, 0x100, arg*8);
780 } else {
781 /* movaps %xmm0, %xmmN */
782 o(0x280f);
783 o(0xc0 + (arg << 3));
784 d = arg_prepare_reg(arg);
785 /* mov %xmm0, %rxx */
786 o(0x66);
787 orex(1,d,0, 0x7e0f);
788 o(0xc0 + REG_VALUE(d));
790 } else {
791 if (bt == VT_STRUCT) {
792 vtop->type.ref = NULL;
793 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
794 : size > 1 ? VT_SHORT : VT_BYTE;
797 r = gv(RC_INT);
798 if (arg >= REGN) {
799 gen_offs_sp(0x89, r, arg*8);
800 } else {
801 d = arg_prepare_reg(arg);
802 orex(1,d,r,0x89); /* mov */
803 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
807 vtop--;
809 save_regs(0);
811 /* Copy R10 and R11 into RCX and RDX, respectively */
812 if (nb_args > 0) {
813 o(0xd1894c); /* mov %r10, %rcx */
814 if (nb_args > 1) {
815 o(0xda894c); /* mov %r11, %rdx */
819 gcall_or_jmp(0);
820 vtop--;
824 #define FUNC_PROLOG_SIZE 11
826 /* generate function prolog of type 't' */
827 void gfunc_prolog(CType *func_type)
829 int addr, reg_param_index, bt, size;
830 Sym *sym;
831 CType *type;
833 func_ret_sub = func_scratch = r_loc = 0;
834 pop_stack = loc = 0;
836 addr = PTR_SIZE * 2;
837 ind += FUNC_PROLOG_SIZE;
838 func_sub_sp_offset = ind;
839 reg_param_index = 0;
841 sym = func_type->ref;
843 /* if the function returns a structure, then add an
844 implicit pointer parameter */
845 func_vt = sym->type;
846 func_var = (sym->c == FUNC_ELLIPSIS);
847 size = gfunc_arg_size(&func_vt);
848 if (size > 8) {
849 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
850 func_vc = addr;
851 reg_param_index++;
852 addr += 8;
855 /* define parameters */
856 while ((sym = sym->next) != NULL) {
857 type = &sym->type;
858 bt = type->t & VT_BTYPE;
859 size = gfunc_arg_size(type);
860 if (size > 8) {
861 if (reg_param_index < REGN) {
862 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
864 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL | VT_REF, addr);
865 } else {
866 if (reg_param_index < REGN) {
867 /* save arguments passed by register */
868 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
869 o(0xd60f66); /* movq */
870 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
871 } else {
872 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
875 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
877 addr += 8;
878 reg_param_index++;
881 while (reg_param_index < REGN) {
882 if (func_type->ref->c == FUNC_ELLIPSIS) {
883 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
884 addr += 8;
886 reg_param_index++;
890 /* generate function epilog */
891 void gfunc_epilog(void)
893 int v, saved_ind;
895 o(0xc9); /* leave */
896 if (func_ret_sub == 0) {
897 o(0xc3); /* ret */
898 } else {
899 o(0xc2); /* ret n */
900 g(func_ret_sub);
901 g(func_ret_sub >> 8);
904 saved_ind = ind;
905 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
906 /* align local size to word & save local variables */
907 v = (func_scratch + -loc + 15) & -16;
909 if (v >= 4096) {
910 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
911 oad(0xb8, v); /* mov stacksize, %eax */
912 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
913 greloc(cur_text_section, sym, ind-4, R_X86_64_PC32);
914 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
915 } else {
916 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
917 o(0xec8148); /* sub rsp, stacksize */
918 gen_le32(v);
921 cur_text_section->data_offset = saved_ind;
922 pe_add_unwind_data(ind, saved_ind, v);
923 ind = cur_text_section->data_offset;
926 #else
928 typedef enum X86_64_Mode {
929 x86_64_mode_none,
930 x86_64_mode_memory,
931 x86_64_mode_integer,
932 x86_64_mode_sse,
933 x86_64_mode_x87
934 } X86_64_Mode;
936 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
938 if (a == b)
939 return a;
940 else if (a == x86_64_mode_none)
941 return b;
942 else if (b == x86_64_mode_none)
943 return a;
944 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
945 return x86_64_mode_memory;
946 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
947 return x86_64_mode_integer;
948 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
949 return x86_64_mode_memory;
950 else
951 return x86_64_mode_sse;
954 static X86_64_Mode classify_x86_64_inner(CType *ty)
956 X86_64_Mode mode;
957 Sym *f;
959 switch (ty->t & VT_BTYPE) {
960 case VT_VOID: return x86_64_mode_none;
962 case VT_INT:
963 case VT_BYTE:
964 case VT_SHORT:
965 case VT_LLONG:
966 case VT_QLONG:
967 case VT_BOOL:
968 case VT_PTR:
969 case VT_FUNC:
970 case VT_ENUM: return x86_64_mode_integer;
972 case VT_FLOAT:
973 case VT_QFLOAT:
974 case VT_DOUBLE: return x86_64_mode_sse;
976 case VT_LDOUBLE: return x86_64_mode_x87;
978 case VT_STRUCT:
979 f = ty->ref;
981 // Detect union
982 if (f->next && (f->c == f->next->c))
983 return x86_64_mode_memory;
985 mode = x86_64_mode_none;
986 for (f = f->next; f; f = f->next)
987 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
989 return mode;
992 assert(0);
995 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
997 X86_64_Mode mode;
998 int size, align, ret_t = 0;
1000 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1001 *psize = 8;
1002 *palign = 8;
1003 *reg_count = 1;
1004 ret_t = ty->t;
1005 mode = x86_64_mode_integer;
1006 } else {
1007 size = type_size(ty, &align);
1008 *psize = (size + 7) & ~7;
1009 *palign = (align + 7) & ~7;
1011 if (size > 16) {
1012 mode = x86_64_mode_memory;
1013 ret_t = ty->t;
1014 } else {
1015 mode = classify_x86_64_inner(ty);
1016 switch (mode) {
1017 case x86_64_mode_integer:
1018 if (size > 8) {
1019 *reg_count = 2;
1020 ret_t = VT_QLONG;
1021 } else {
1022 *reg_count = 1;
1023 if(size > 4)
1024 ret_t = VT_LLONG;
1025 else if(size > 2){
1026 ret_t = VT_INT;
1027 }else if(size > 1)
1028 ret_t = VT_SHORT;
1029 else
1030 ret_t = VT_BYTE;
1032 ret_t |= (ty->t & VT_UNSIGNED);
1033 break;
1034 case x86_64_mode_x87:
1035 *reg_count = 1;
1036 ret_t = VT_LDOUBLE;
1037 break;
1038 case x86_64_mode_sse:
1039 if (size > 8) {
1040 *reg_count = 2;
1041 ret_t = VT_QFLOAT;
1042 } else {
1043 *reg_count = 1;
1044 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1046 break;
1047 default:
1048 ret_t = ty->t;
1049 break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1054 if (ret) {
1055 ret->ref = ty->ref;
1056 ret->t = ret_t;
1059 return mode;
1062 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1064 /* This definition must be synced with stdarg.h */
1065 enum __va_arg_type {
1066 __va_gen_reg, __va_float_reg, __va_ld_reg, __va_stack
1068 int size, align, reg_count;
1069 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1070 switch (mode) {
1071 default: return __va_stack;
1072 case x86_64_mode_x87: return __va_ld_reg;
1073 case x86_64_mode_integer: return __va_gen_reg;
1074 case x86_64_mode_sse: return __va_float_reg;
1078 /* Return the number of registers needed to return the struct, or 0 if
1079 returning via struct pointer. */
1080 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align)
1082 int size, align, reg_count;
1083 *ret_align = 1; // Never have to re-align return values for x86-64
1084 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1087 #define REGN 6
1088 static const uint8_t arg_regs[REGN] = {
1089 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1092 /* Generate function call. The function address is pushed first, then
1093 all the parameters in call order. This functions pops all the
1094 parameters and the function address. */
1095 void gfunc_call(int nb_args)
1097 X86_64_Mode mode;
1098 int size, align, args_size, s, e, i, reg_count;
1099 int nb_reg_args = 0;
1100 int nb_sse_args = 0;
1101 int gen_reg, sse_reg;
1102 CType type;
1104 /* fetch cpu flag before the following sub will change the value */
1105 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1106 gv(RC_INT);
1107 /* calculate the number of integer/float register arguments */
1108 for(i = 0; i < nb_args; i++) {
1109 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1110 if (mode == x86_64_mode_sse)
1111 nb_sse_args += reg_count;
1112 else if (mode == x86_64_mode_integer)
1113 nb_reg_args += reg_count;
1116 args_size = 0;
1117 gen_reg = nb_reg_args;
1118 sse_reg = nb_sse_args;
1119 /* for struct arguments, we need to call memcpy and the function
1120 call breaks register passing arguments we are preparing.
1121 So, we process arguments which will be passed by stack first. */
1122 for(i = 0; i < nb_args; i++) {
1123 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1124 switch (mode) {
1125 case x86_64_mode_x87:
1126 if((vtop[-i].type.t & VT_BTYPE) == VT_STRUCT)
1127 goto stack_arg1;
1128 else
1129 args_size = (args_size + 15) & ~15;
1130 case x86_64_mode_memory:
1131 stack_arg1:
1132 args_size += size;
1133 break;
1134 case x86_64_mode_sse:
1135 sse_reg -= reg_count;
1136 if (sse_reg + reg_count > 8)
1137 goto stack_arg1;
1138 break;
1139 case x86_64_mode_integer:
1140 gen_reg -= reg_count;
1141 if (gen_reg + reg_count > REGN)
1142 goto stack_arg1;
1143 break;
1144 default: break; /* nothing to be done for x86_64_mode_none */
1148 args_size = (args_size + 15) & ~15;
1149 if (func_scratch < args_size)
1150 func_scratch = args_size;
1152 gen_reg = nb_reg_args;
1153 sse_reg = nb_sse_args;
1154 for(s = e = 0; s < nb_args; s = e){
1155 int run_gen, run_sse, st_size;
1156 run_gen = gen_reg;
1157 run_sse = sse_reg;
1158 st_size = 0;
1159 for(i = s; i < nb_args; i++) {
1160 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1161 switch (mode) {
1162 case x86_64_mode_x87:
1163 if((vtop[-i].type.t & VT_BTYPE) == VT_STRUCT){
1164 goto stack_arg2;
1165 }else{
1166 ++i;
1167 goto doing;
1169 case x86_64_mode_memory:
1170 stack_arg2:
1171 st_size += size;
1172 break;
1173 case x86_64_mode_sse:
1174 sse_reg -= reg_count;
1175 if (sse_reg + reg_count > 8)
1176 goto stack_arg2;
1177 break;
1178 case x86_64_mode_integer:
1179 gen_reg -= reg_count;
1180 if (gen_reg + reg_count > REGN)
1181 goto stack_arg2;
1182 break;
1183 default: break; /* nothing to be done for x86_64_mode_none */
1186 doing:
1187 e = i;
1188 st_size = -st_size & 15;// 16 - (size & 15)
1189 if(st_size)
1190 args_size -= st_size;
1192 gen_reg = run_gen;
1193 sse_reg = run_sse;
1194 for(i = s; i < e; i++) {
1195 SValue tmp;
1196 /* Swap argument to top, it will possibly be changed here,
1197 and might use more temps. All arguments must remain on the
1198 stack, so that get_reg can correctly evict some of them onto
1199 stack. We could use also use a vrott(nb_args) at the end
1200 of this loop, but this seems faster. */
1201 if(i != 0){
1202 tmp = vtop[0];
1203 vtop[0] = vtop[-i];
1204 vtop[-i] = tmp;
1207 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1208 switch (mode) {
1209 case x86_64_mode_x87:
1210 /* ±ØÐë±£Ö¤ TREG_ST0 µÄΨһ */
1211 if((vtop->type.t & VT_BTYPE) == VT_STRUCT){
1212 vdup();
1213 vtop->type = type;
1214 gv(RC_ST0);
1215 args_size -= size;
1216 gen_offs_sp(0xdb, 0x107, args_size);
1217 vtop--;//ÊÍ·Å TREG_ST0
1218 }else{
1219 gv(RC_ST0);
1220 args_size -= size;
1221 gen_offs_sp(0xdb, 0x107, args_size);
1222 vtop->r = VT_CONST;//ÊÍ·Å TREG_ST0
1224 break;
1225 case x86_64_mode_memory:
1226 args_size -= size;
1227 vset(&char_pointer_type, TREG_RSP, args_size);/* generate memcpy RSP */
1228 vpushv(&vtop[-1]);
1229 vtop->type = char_pointer_type;
1230 gaddrof();
1231 vpushi(size);
1232 struct_copy(&vtop[-2], &vtop[-1], &vtop[0]);
1233 vtop -= 3;
1234 break;
1235 case x86_64_mode_sse:
1236 sse_reg -= reg_count;
1237 if (sse_reg + reg_count > 8){
1238 args_size -= size;
1239 goto gen_code;
1241 break;
1242 case x86_64_mode_integer:
1243 gen_reg -= reg_count;
1244 if (gen_reg + reg_count > REGN){
1245 args_size -= size;
1246 gen_code:
1247 vset(&type, TREG_RSP | VT_LVAL, args_size);
1248 vpushv(&vtop[-1]);
1249 vtop->type = type;
1250 vstore();
1251 vtop--;
1253 break;
1254 default: break; /* nothing to be done for x86_64_mode_none */
1256 if(i != 0){
1257 tmp = vtop[0];
1258 vtop[0] = vtop[-i];
1259 vtop[-i] = tmp;
1262 run_gen = gen_reg;
1263 run_sse = sse_reg;
1266 gen_reg = nb_reg_args;
1267 sse_reg = nb_sse_args;
1268 for(i = 0; i < nb_args; i++) {
1269 int d;
1270 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1271 /* Alter stack entry type so that gv() knows how to treat it */
1272 vtop->type = type;
1273 /* Alter stack entry type so that gv() knows how to treat it */
1274 if (mode == x86_64_mode_sse) {
1275 sse_reg -= reg_count;
1276 if (sse_reg + reg_count <= 8) {
1277 if (reg_count == 2) {
1278 ex_rc = RC_XMM0 << (sse_reg + 1);
1279 gv(RC_XMM0 << sse_reg);
1280 }else{
1281 assert(reg_count == 1);
1282 /* Load directly to register */
1283 gv(RC_XMM0 << sse_reg);
1286 } else if (mode == x86_64_mode_integer) {
1287 gen_reg -= reg_count;
1288 if (gen_reg + reg_count <= REGN) {
1289 if (reg_count == 2) {
1290 d = arg_regs[gen_reg+1];
1291 ex_rc = reg_classes[d] & ~RC_MASK;
1292 d = arg_regs[gen_reg];
1293 gv(reg_classes[d] & ~RC_MASK);
1294 }else{
1295 assert(reg_count == 1);
1296 d = arg_regs[gen_reg];
1297 gv(reg_classes[d] & ~RC_MASK);
1301 vpop();
1303 save_regs(0);
1304 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1305 gcall_or_jmp(0);
1306 vtop--;
1310 #define FUNC_PROLOG_SIZE 11
1312 static void push_arg_reg(int i) {
1313 loc -= 8;
1314 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1317 /* generate function prolog of type 't' */
1318 void gfunc_prolog(CType *func_type)
1320 X86_64_Mode mode;
1321 int i, addr, align, size, reg_count;
1322 int param_addr = 0, reg_param_index, sse_param_index;
1323 Sym *sym;
1324 CType *type;
1326 sym = func_type->ref;
1327 addr = PTR_SIZE * 2;
1328 pop_stack = loc = 0;
1329 func_scratch = r_loc = 0;
1330 ind += FUNC_PROLOG_SIZE;
1331 func_sub_sp_offset = ind;
1332 func_ret_sub = 0;
1334 if (func_type->ref->c == FUNC_ELLIPSIS) {
1335 int seen_reg_num, seen_sse_num, seen_stack_size;
1336 seen_reg_num = seen_sse_num = 0;
1337 /* frame pointer and return address */
1338 seen_stack_size = PTR_SIZE * 2;
1339 /* count the number of seen parameters */
1340 while ((sym = sym->next) != NULL) {
1341 type = &sym->type;
1342 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1343 switch (mode) {
1344 default:
1345 stack_arg:
1346 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1347 break;
1349 case x86_64_mode_integer:
1350 if (seen_reg_num + reg_count <= REGN) {
1351 seen_reg_num += reg_count;
1352 } else {
1353 seen_reg_num = 8;
1354 goto stack_arg;
1356 break;
1358 case x86_64_mode_sse:
1359 if (seen_sse_num + reg_count <= 8) {
1360 seen_sse_num += reg_count;
1361 } else {
1362 seen_sse_num = 8;
1363 goto stack_arg;
1365 break;
1369 loc -= 16;
1370 /* movl $0x????????, -0x10(%rbp) */
1371 o(0xf045c7);
1372 gen_le32(seen_reg_num * 8);
1373 /* movl $0x????????, -0xc(%rbp) */
1374 o(0xf445c7);
1375 gen_le32(seen_sse_num * 16 + 48);
1376 /* movl $0x????????, -0x8(%rbp) */
1377 o(0xf845c7);
1378 gen_le32(seen_stack_size);
1380 o(0xc084);/* test %al,%al */
1381 o(0x74);/* je */
1382 g(4*(8 - seen_sse_num) + 3);
1384 /* save all register passing arguments */
1385 for (i = 0; i < 8; i++) {
1386 loc -= 16;
1387 o(0x290f);/* movaps %xmm1-7,-XXX(%rbp) */
1388 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1390 for (i = 0; i < (REGN - seen_reg_num); i++) {
1391 push_arg_reg(REGN-1 - i);
1395 sym = func_type->ref;
1396 reg_param_index = 0;
1397 sse_param_index = 0;
1399 /* if the function returns a structure, then add an
1400 implicit pointer parameter */
1401 func_vt = sym->type;
1402 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1403 if (mode == x86_64_mode_memory) {
1404 push_arg_reg(reg_param_index);
1405 func_vc = loc;
1406 reg_param_index++;
1408 /* define parameters */
1409 while ((sym = sym->next) != NULL) {
1410 type = &sym->type;
1411 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1412 switch (mode) {
1413 case x86_64_mode_sse:
1414 if (sse_param_index + reg_count <= 8) {
1415 /* save arguments passed by register */
1416 loc -= reg_count * 8;
1417 param_addr = loc;
1418 for (i = 0; i < reg_count; ++i) {
1419 o(0xd60f66); /* movq */
1420 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1421 ++sse_param_index;
1423 } else {
1424 addr = (addr + align - 1) & -align;
1425 param_addr = addr;
1426 addr += size;
1427 sse_param_index += reg_count;
1429 break;
1431 case x86_64_mode_memory:
1432 case x86_64_mode_x87:
1433 addr = (addr + align - 1) & -align;
1434 param_addr = addr;
1435 addr += size;
1436 break;
1438 case x86_64_mode_integer: {
1439 if (reg_param_index + reg_count <= REGN) {
1440 /* save arguments passed by register */
1441 loc -= reg_count * 8;
1442 param_addr = loc;
1443 for (i = 0; i < reg_count; ++i) {
1444 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1445 ++reg_param_index;
1447 } else {
1448 addr = (addr + align - 1) & -align;
1449 param_addr = addr;
1450 addr += size;
1451 reg_param_index += reg_count;
1453 break;
1455 default: break; /* nothing to be done for x86_64_mode_none */
1457 sym_push(sym->v & ~SYM_FIELD, type,
1458 VT_LOCAL | VT_LVAL, param_addr);
1462 /* generate function epilog */
1463 void gfunc_epilog(void)
1465 int v, saved_ind;
1467 o(0xc9); /* leave */
1468 if (func_ret_sub == 0) {
1469 o(0xc3); /* ret */
1470 } else {
1471 o(0xc2); /* ret n */
1472 g(func_ret_sub);
1473 g(func_ret_sub >> 8);
1475 /* align local size to word & save local variables */
1476 v = (func_scratch -loc + 15) & -16;
1477 saved_ind = ind;
1478 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1479 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1480 o(0xec8148); /* sub rsp, stacksize */
1481 gen_le32(v);
1482 ind = saved_ind;
1485 #endif /* not PE */
1487 /* generate a jump to a label */
1488 int gjmp(int t)
1490 return psym(0xe9, t);
1493 /* generate a jump to a fixed address */
1494 void gjmp_addr(int a)
1496 int r;
1497 r = a - ind - 2;
1498 if (r == (char)r) {
1499 g(0xeb);
1500 g(r);
1501 } else {
1502 oad(0xe9, a - ind - 5);
1506 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1507 int gtst(int inv, int t)
1509 int v, *p;
1511 v = vtop->r & VT_VALMASK;
1512 if (v == VT_CMP) {
1513 /* fast case : can jump directly since flags are set */
1514 if (vtop->c.i & 0x100)
1516 /* This was a float compare. If the parity flag is set
1517 the result was unordered. For anything except != this
1518 means false and we don't jump (anding both conditions).
1519 For != this means true (oring both).
1520 Take care about inverting the test. We need to jump
1521 to our target if the result was unordered and test wasn't NE,
1522 otherwise if unordered we don't want to jump. */
1523 vtop->c.i &= ~0x100;
1524 if (!inv == (vtop->c.i != TOK_NE))
1525 o(0x067a); /* jp +6 */
1526 else
1528 g(0x0f);
1529 t = psym(0x8a, t); /* jp t */
1532 g(0x0f);
1533 t = psym((vtop->c.i - 16) ^ inv, t);
1534 } else if (v == VT_JMP || v == VT_JMPI) {
1535 /* && or || optimization */
1536 if ((v & 1) == inv) {
1537 /* insert vtop->c jump list in t */
1538 p = &vtop->c.i;
1539 while (*p != 0)
1540 p = (int *)(cur_text_section->data + *p);
1541 *p = t;
1542 t = vtop->c.i;
1543 } else {
1544 t = gjmp(t);
1545 gsym(vtop->c.i);
1547 } else {
1548 if (is_float(vtop->type.t) ||
1549 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
1550 vpushi(0);
1551 gen_op(TOK_NE);
1553 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
1554 /* constant jmp optimization */
1555 if ((vtop->c.i != 0) != inv)
1556 t = gjmp(t);
1557 } else {
1558 v = gv(RC_INT);
1559 orex(0,v,v,0x85);
1560 o(0xc0 + REG_VALUE(v) * 9);
1561 g(0x0f);
1562 t = psym(0x85 ^ inv, t);
1565 vtop--;
1566 return t;
1569 /* generate an integer binary operation */
1570 void gen_opi(int op)
1572 int r, fr, opc, fc, c, ll, uu, cc, tt2;
1574 fr = vtop[0].r;
1575 fc = vtop->c.ul;
1576 ll = is64_type(vtop[-1].type.t);
1577 cc = (fr & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1578 tt2 = (fr & (VT_LVAL | VT_LVAL_TYPE)) == VT_LVAL;
1580 switch(op) {
1581 case '+':
1582 case TOK_ADDC1: /* add with carry generation */
1583 opc = 0;
1584 gen_op8:
1585 vswap();
1586 r = gv(RC_INT);
1587 vswap();
1588 if (cc && (!ll || (int)vtop->c.ll == vtop->c.ll)) {
1589 /* constant case */
1590 c = vtop->c.i;
1591 if (c == (char)c) {
1592 /* XXX: generate inc and dec for smaller code ? */
1593 orex(ll, r, 0, 0x83);
1594 o(0xc0 + REG_VALUE(r) + opc*8);
1595 g(c);
1596 } else {
1597 orex(ll, r, 0, 0x81);
1598 oad(0xc0 + REG_VALUE(r) + opc*8, c);
1600 } else {
1601 if(!tt2)
1602 fr = gv(RC_INT);
1603 orex(ll, fr, r, 0x03 + opc*8);
1604 if(fr >= VT_CONST)
1605 gen_modrm(r, fr, vtop->sym, fc);
1606 else
1607 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r)*8);
1609 vtop--;
1610 if (op >= TOK_ULT && op <= TOK_GT) {
1611 vtop->r = VT_CMP;
1612 vtop->c.i = op;
1614 break;
1615 case '-':
1616 case TOK_SUBC1: /* sub with carry generation */
1617 opc = 5;
1618 goto gen_op8;
1619 case TOK_ADDC2: /* add with carry use */
1620 opc = 2;
1621 goto gen_op8;
1622 case TOK_SUBC2: /* sub with carry use */
1623 opc = 3;
1624 goto gen_op8;
1625 case '&':
1626 opc = 4;
1627 goto gen_op8;
1628 case '^':
1629 opc = 6;
1630 goto gen_op8;
1631 case '|':
1632 opc = 1;
1633 goto gen_op8;
1634 case '*':
1635 opc = 5;
1636 vswap();
1637 r = gv(RC_INT);
1638 vswap();
1639 if(!tt2)
1640 fr = gv(RC_INT);
1641 if(r == TREG_RAX){
1642 if(fr != TREG_RDX)
1643 save_reg(TREG_RDX);
1644 orex(ll, fr, r, 0xf7);
1645 if(fr >= VT_CONST)
1646 gen_modrm(opc, fr, vtop->sym, fc);
1647 else
1648 o(0xc0 + REG_VALUE(fr) + opc*8);
1649 }else{
1650 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1651 if(fr >= VT_CONST)
1652 gen_modrm(r, fr, vtop->sym, fc);
1653 else
1654 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r)*8);
1656 vtop--;
1657 break;
1658 case TOK_SHL:
1659 opc = 4;
1660 goto gen_shift;
1661 case TOK_SHR:
1662 opc = 5;
1663 goto gen_shift;
1664 case TOK_SAR:
1665 opc = 7;
1666 gen_shift:
1667 if (cc) {
1668 /* constant case */
1669 vswap();
1670 r = gv(RC_INT);
1671 vswap();
1672 c = vtop->c.i;
1673 if(c == 1){
1674 orex(ll, r, 0, 0xd1);
1675 o(0xc0 + REG_VALUE(r) + opc*8);
1676 }else{
1677 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1678 o(0xc0 + REG_VALUE(r) + opc*8);
1679 g(c & (ll ? 0x3f : 0x1f));
1681 } else {
1682 /* we generate the shift in ecx */
1683 gv2(RC_INT, RC_RCX);
1684 r = vtop[-1].r;
1685 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1686 o(0xc0 + REG_VALUE(r) + opc*8);
1688 vtop--;
1689 break;
1690 case TOK_UDIV:
1691 case TOK_UMOD:
1692 opc = 6;
1693 uu = 1;
1694 goto divmod;
1695 case '/':
1696 case '%':
1697 case TOK_PDIV:
1698 opc = 7;
1699 uu = 0;
1700 divmod:
1701 /* first operand must be in eax */
1702 /* XXX: need better constraint for second operand */
1703 if(!tt2){
1704 gv2(RC_RAX, RC_INT2);
1705 fr = vtop[0].r;
1706 }else{
1707 vswap();
1708 gv(RC_RAX);
1709 vswap();
1711 save_reg(TREG_RDX);
1712 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cdq RDX:RAX <- sign-extend of RAX. */
1713 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1714 if(fr >= VT_CONST)
1715 gen_modrm(opc, fr, vtop->sym, fc);
1716 else
1717 o(0xc0 + REG_VALUE(fr) + opc*8);
1718 if (op == '%' || op == TOK_UMOD)
1719 r = TREG_RDX;
1720 else
1721 r = TREG_RAX;
1722 vtop--;
1723 vtop->r = r;
1724 break;
1725 default:
1726 opc = 7;
1727 goto gen_op8;
1731 void gen_opl(int op)
1733 gen_opi(op);
1736 /* generate a floating point operation 'v = t1 op t2' instruction. The
1737 two operands are guaranted to have the same floating point type */
1738 /* XXX: need to use ST1 too */
1739 void gen_opf(int op)
1741 int a, ft, fc, swapped, fr, r;
1742 int float_type = (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1744 /* convert constants to memory references */
1745 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1746 vswap();
1747 gv(float_type);
1748 vswap();
1750 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1751 gv(float_type);
1753 swapped = 0;
1754 fc = vtop->c.ul;
1755 ft = vtop->type.t;
1757 if ((ft & VT_BTYPE) == VT_LDOUBLE) {
1758 /* swap the stack if needed so that t1 is the register and t2 is
1759 the memory reference */
1760 /* must put at least one value in the floating point register */
1761 if ((vtop[-1].r & VT_LVAL) && (vtop[0].r & VT_LVAL)) {
1762 vswap();
1763 gv(float_type);
1764 vswap();
1766 if (vtop[-1].r & VT_LVAL) {
1767 vswap();
1768 swapped = 1;
1770 if (op >= TOK_ULT && op <= TOK_GT) {
1771 /* load on stack second operand */
1772 load(TREG_ST0, vtop);
1773 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1774 if (op == TOK_GE || op == TOK_GT)
1775 swapped = !swapped;
1776 else if (op == TOK_EQ || op == TOK_NE)
1777 swapped = 0;
1778 if (swapped)
1779 o(0xc9d9); /* fxch %st(1) */
1780 if (op == TOK_EQ || op == TOK_NE)
1781 o(0xe9da); /* fucompp */
1782 else
1783 o(0xd9de); /* fcompp */
1784 o(0xe0df); /* fnstsw %ax */
1785 if (op == TOK_EQ) {
1786 o(0x45e480); /* and $0x45, %ah */
1787 o(0x40fC80); /* cmp $0x40, %ah */
1788 } else if (op == TOK_NE) {
1789 o(0x45e480); /* and $0x45, %ah */
1790 o(0x40f480); /* xor $0x40, %ah */
1791 op = TOK_NE;
1792 } else if (op == TOK_GE || op == TOK_LE) {
1793 o(0x05c4f6); /* test $0x05, %ah */
1794 op = TOK_EQ;
1795 } else {
1796 o(0x45c4f6); /* test $0x45, %ah */
1797 op = TOK_EQ;
1799 vtop--;
1800 vtop->r = VT_CMP;
1801 vtop->c.i = op;
1802 } else {
1803 /* no memory reference possible for long double operations */
1804 load(TREG_ST0, vtop);
1805 swapped = !swapped;
1806 switch(op) {
1807 default:
1808 case '+':
1809 a = 0;
1810 break;
1811 case '-':
1812 a = 4;
1813 if (swapped)
1814 a++;
1815 break;
1816 case '*':
1817 a = 1;
1818 break;
1819 case '/':
1820 a = 6;
1821 if (swapped)
1822 a++;
1823 break;
1825 o(0xde); /* fxxxp %st, %st(1) */
1826 o(0xc1 + (a << 3));
1827 vtop--;
1829 } else {
1830 vswap();
1831 gv(float_type);
1832 vswap();
1833 fr = vtop->r;
1834 r = vtop[-1].r;
1835 if (op >= TOK_ULT && op <= TOK_GT) {
1836 switch(op){
1837 case TOK_LE:
1838 op = TOK_ULE; /* setae */
1839 break;
1840 case TOK_LT:
1841 op = TOK_ULT;
1842 break;
1843 case TOK_GE:
1844 op = TOK_UGE;
1845 break;
1846 case TOK_GT:
1847 op = TOK_UGT; /* seta */
1848 break;
1850 assert(!(vtop[-1].r & VT_LVAL));
1851 if ((ft & VT_BTYPE) == VT_DOUBLE)
1852 o(0x66);
1853 o(0x2e0f); /* ucomisd */
1854 if(fr >= VT_CONST)
1855 gen_modrm(r, fr, vtop->sym, fc);
1856 else
1857 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r)*8);
1858 vtop--;
1859 vtop->r = VT_CMP;
1860 vtop->c.i = op | 0x100;
1861 } else {
1862 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1863 /* no memory reference possible for long double operations */
1864 switch(op) {
1865 default:
1866 case '+':
1867 a = 0;
1868 break;
1869 case '-':
1870 a = 4;
1871 break;
1872 case '*':
1873 a = 1;
1874 break;
1875 case '/':
1876 a = 6;
1877 break;
1879 assert((ft & VT_BTYPE) != VT_LDOUBLE);
1880 assert(!(vtop[-1].r & VT_LVAL));
1881 if ((ft & VT_BTYPE) == VT_DOUBLE) {
1882 o(0xf2);
1883 } else {
1884 o(0xf3);
1886 o(0x0f);
1887 o(0x58 + a);
1888 if(fr >= VT_CONST)
1889 gen_modrm(r, fr, vtop->sym, fc);
1890 else
1891 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r)*8);
1892 vtop--;
1897 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1898 and 'long long' cases. */
1899 void gen_cvt_itof(int t)
1901 int ft, bt, tbt, r;
1903 ft = vtop->type.t;
1904 bt = ft & VT_BTYPE;
1905 tbt = t & VT_BTYPE;
1906 r = gv(RC_INT);
1908 if (tbt == VT_LDOUBLE) {
1909 save_reg(TREG_ST0);
1910 if ((ft & VT_BTYPE) == VT_LLONG) {
1911 /* signed long long to float/double/long double (unsigned case
1912 is handled generically) */
1913 o(0x50 + REG_VALUE(r)); /* push r */
1914 o(0x242cdf); /* fildll (%rsp) */
1915 o(0x08c48348); /* add $8, %rsp */
1916 } else if ((ft & (VT_BTYPE | VT_UNSIGNED)) == (VT_INT | VT_UNSIGNED)) {
1917 /* unsigned int to float/double/long double */
1918 o(0x6a); /* push $0 */
1919 g(0x00);
1920 o(0x50 + REG_VALUE(r)); /* push r */
1921 o(0x242cdf); /* fildll (%rsp) */
1922 o(0x10c48348); /* add $16, %rsp */
1923 } else {
1924 /* int to float/double/long double */
1925 o(0x50 + REG_VALUE(r)); /* push r */
1926 o(0x2404db); /* fildl (%rsp) */
1927 o(0x08c48348); /* add $8, %rsp */
1929 vtop->r = TREG_ST0;
1930 } else {
1931 int r_xmm;
1932 r_xmm = get_reg(RC_FLOAT);
1933 o(0xf2 + (tbt == VT_FLOAT));
1934 if ((ft & (VT_BTYPE | VT_UNSIGNED)) == (VT_INT | VT_UNSIGNED) || bt == VT_LLONG) {
1935 o(0x48); /* REX */
1937 o(0x2a0f);
1938 o(0xc0 + REG_VALUE(r) + REG_VALUE(r_xmm)*8); /* cvtsi2sd or cvtsi2ss */
1939 vtop->r = r_xmm;
1943 /* convert from one floating point type to another */
1944 void gen_cvt_ftof(int t)
1946 int ft, bt, tbt, r;
1948 ft = vtop->type.t;
1949 bt = ft & VT_BTYPE;
1950 tbt = t & VT_BTYPE;
1952 if(bt == VT_LDOUBLE)
1953 r = get_reg(RC_FLOAT);
1954 else
1955 r = gv(RC_FLOAT);
1956 if (bt == VT_FLOAT) {
1957 if (tbt == VT_DOUBLE) {
1958 o(0x5a0f); /* cvtps2pd */
1959 o(0xc0 + REG_VALUE(r) + REG_VALUE(r) * 8);
1960 } else if (tbt == VT_LDOUBLE) {
1961 /* movss %xmm0-7,-0x10(%rsp) */
1962 o(0x110ff3);
1963 o(0xf02444 + REG_VALUE(r)*8);
1964 o(0xf02444d9); /* flds -0x10(%rsp) */
1965 vtop->r = TREG_ST0;
1967 } else if (bt == VT_DOUBLE) {
1968 if (tbt == VT_FLOAT) {
1969 o(0x5a0f66); /* cvtpd2ps */
1970 o(0xc0 + REG_VALUE(r) + REG_VALUE(r) * 8);
1971 } else if (tbt == VT_LDOUBLE) {
1972 /* movsd %xmm0-7,-0x10(%rsp) */
1973 o(0x110ff2);
1974 o(0xf02444 + REG_VALUE(r)*8);
1975 o(0xf02444dd); /* fldl -0x10(%rsp) */
1976 vtop->r = TREG_ST0;
1978 } else {
1979 gv(RC_ST0);
1980 if (tbt == VT_DOUBLE) {
1981 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
1982 /* movsd -0x10(%rsp),%xmm0-7 */
1983 o(0x100ff2);
1984 o(0xf02444 + REG_VALUE(r)*8);
1985 vtop->r = r;
1986 } else if (tbt == VT_FLOAT) {
1987 o(0xf0245cd9); /* fstps -0x10(%rsp) */
1988 /* movss -0x10(%rsp),%xmm0-7 */
1989 o(0x100ff3);
1990 o(0xf02444 + REG_VALUE(r)*8);
1991 vtop->r = r;
1996 /* convert fp to int 't' type */
1997 void gen_cvt_ftoi(int t)
1999 int ft, bt, ll, r, r_xmm;
2001 ft = vtop->type.t;
2002 bt = ft & VT_BTYPE;
2004 if (bt == VT_LDOUBLE) {
2005 gen_cvt_ftof(VT_DOUBLE);
2006 bt = VT_DOUBLE;
2008 r_xmm = gv(RC_FLOAT);
2009 if ((t & VT_BTYPE) == VT_INT)
2010 ll = 0;
2011 else
2012 ll = 1;
2013 r = get_reg(RC_INT);
2014 if (bt == VT_FLOAT) {
2015 o(0xf3);
2016 } else if (bt == VT_DOUBLE) {
2017 o(0xf2);
2018 } else {
2019 assert(0);
2021 orex(ll, r, r_xmm, 0x2c0f); /* cvttss2si or cvttsd2si */
2022 o(0xc0 + REG_VALUE(r_xmm) + (REG_VALUE(r) << 3));
2023 vtop->r = r;
2026 /* computed goto support */
2027 void ggoto(void)
2029 gcall_or_jmp(1);
2030 vtop--;
2033 /* Save the stack pointer onto the stack and return the location of its address */
2034 ST_FUNC void gen_vla_sp_save(int addr) {
2035 /* mov %rsp,addr(%rbp)*/
2036 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2039 /* Restore the SP from a location on the stack */
2040 ST_FUNC void gen_vla_sp_restore(int addr) {
2041 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2044 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2045 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2046 #ifdef TCC_TARGET_PE
2047 /* alloca does more than just adjust %rsp on Windows */
2048 vpush_global_sym(&func_old_type, TOK_alloca);
2049 vswap(); /* Move alloca ref past allocation size */
2050 gfunc_call(1);
2051 vset(type, REG_IRET, 0);
2052 #else
2053 int r;
2054 r = gv(RC_INT); /* allocation size */
2055 /* sub r,%rsp */
2056 o(0x2b48);
2057 o(0xe0 | REG_VALUE(r));
2058 /* We align to 16 bytes rather than align */
2059 /* and ~15, %rsp */
2060 o(0xf0e48348);
2061 /* mov %rsp, r */
2062 o(0x8948);
2063 o(0xe0 | REG_VALUE(r));
2064 vpop();
2065 vset(type, r, 0);
2066 #endif
2070 /* end of x86-64 code generator */
2071 /*************************************************************/
2072 #endif /* ! TARGET_DEFS_ONLY */
2073 /******************************************************/