Align on 4n bytes when copying fct args on stack
[tinycc.git] / x86_64-gen.c
blob1fa8dd53cdb0387ed610cb0e19ca9d39089aca3f
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 5
27 #define NB_ASM_REGS 8
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
34 #define RC_RAX 0x0004
35 #define RC_RCX 0x0008
36 #define RC_RDX 0x0010
37 #define RC_R8 0x0100
38 #define RC_R9 0x0200
39 #define RC_R10 0x0400
40 #define RC_R11 0x0800
41 #define RC_XMM0 0x0020
42 #define RC_ST0 0x0040 /* only for long double */
43 #define RC_IRET RC_RAX /* function return: integer register */
44 #define RC_LRET RC_RDX /* function return: second integer register */
45 #define RC_FRET RC_XMM0 /* function return: float register */
47 /* pretty names for the registers */
48 enum {
49 TREG_RAX = 0,
50 TREG_RCX = 1,
51 TREG_RDX = 2,
52 TREG_XMM0 = 3,
53 TREG_ST0 = 4,
55 TREG_RSI = 6,
56 TREG_RDI = 7,
57 TREG_R8 = 8,
58 TREG_R9 = 9,
60 TREG_R10 = 10,
61 TREG_R11 = 11,
63 TREG_MEM = 0x10,
66 #define REX_BASE(reg) (((reg) >> 3) & 1)
67 #define REG_VALUE(reg) ((reg) & 7)
69 /* return registers for function */
70 #define REG_IRET TREG_RAX /* single word int return register */
71 #define REG_LRET TREG_RDX /* second word return register (for long long) */
72 #define REG_FRET TREG_XMM0 /* float return register */
74 /* defined if function parameters must be evaluated in reverse order */
75 #define INVERT_FUNC_PARAMS
77 /* pointer size, in bytes */
78 #define PTR_SIZE 8
80 /* long double size and alignment, in bytes */
81 #define LDOUBLE_SIZE 16
82 #define LDOUBLE_ALIGN 8
83 /* maximum alignment (for aligned attribute support) */
84 #define MAX_ALIGN 8
86 ST_FUNC void gen_opl(int op);
87 ST_FUNC void gen_le64(int64_t c);
89 /******************************************************/
90 /* ELF defines */
92 #define EM_TCC_TARGET EM_X86_64
94 /* relocation type for 32 bit data relocation */
95 #define R_DATA_32 R_X86_64_32
96 #define R_DATA_PTR R_X86_64_64
97 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
98 #define R_COPY R_X86_64_COPY
100 #define ELF_START_ADDR 0x08048000
101 #define ELF_PAGE_SIZE 0x1000
103 /******************************************************/
104 #else /* ! TARGET_DEFS_ONLY */
105 /******************************************************/
106 #include "tcc.h"
107 #include <assert.h>
109 ST_DATA const int reg_classes[] = {
110 /* eax */ RC_INT | RC_RAX,
111 /* ecx */ RC_INT | RC_RCX,
112 /* edx */ RC_INT | RC_RDX,
113 /* xmm0 */ RC_FLOAT | RC_XMM0,
114 /* st0 */ RC_ST0,
118 RC_INT | RC_R8,
119 RC_INT | RC_R9,
120 RC_INT | RC_R10,
121 RC_INT | RC_R11
124 static unsigned long func_sub_sp_offset;
125 static int func_ret_sub;
127 /* XXX: make it faster ? */
128 void g(int c)
130 int ind1;
131 ind1 = ind + 1;
132 if (ind1 > cur_text_section->data_allocated)
133 section_realloc(cur_text_section, ind1);
134 cur_text_section->data[ind] = c;
135 ind = ind1;
138 void o(unsigned int c)
140 while (c) {
141 g(c);
142 c = c >> 8;
146 void gen_le16(int v)
148 g(v);
149 g(v >> 8);
152 void gen_le32(int c)
154 g(c);
155 g(c >> 8);
156 g(c >> 16);
157 g(c >> 24);
160 void gen_le64(int64_t c)
162 g(c);
163 g(c >> 8);
164 g(c >> 16);
165 g(c >> 24);
166 g(c >> 32);
167 g(c >> 40);
168 g(c >> 48);
169 g(c >> 56);
172 void orex(int ll, int r, int r2, int b)
174 if ((r & VT_VALMASK) >= VT_CONST)
175 r = 0;
176 if ((r2 & VT_VALMASK) >= VT_CONST)
177 r2 = 0;
178 if (ll || REX_BASE(r) || REX_BASE(r2))
179 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
180 o(b);
183 /* output a symbol and patch all calls to it */
184 void gsym_addr(int t, int a)
186 int n, *ptr;
187 while (t) {
188 ptr = (int *)(cur_text_section->data + t);
189 n = *ptr; /* next value */
190 *ptr = a - t - 4;
191 t = n;
195 void gsym(int t)
197 gsym_addr(t, ind);
200 /* psym is used to put an instruction with a data field which is a
201 reference to a symbol. It is in fact the same as oad ! */
202 #define psym oad
204 static int is64_type(int t)
206 return ((t & VT_BTYPE) == VT_PTR ||
207 (t & VT_BTYPE) == VT_FUNC ||
208 (t & VT_BTYPE) == VT_LLONG);
211 static int is_sse_float(int t) {
212 int bt;
213 bt = t & VT_BTYPE;
214 return bt == VT_DOUBLE || bt == VT_FLOAT;
218 /* instruction + 4 bytes data. Return the address of the data */
219 ST_FUNC int oad(int c, int s)
221 int ind1;
223 o(c);
224 ind1 = ind + 4;
225 if (ind1 > cur_text_section->data_allocated)
226 section_realloc(cur_text_section, ind1);
227 *(int *)(cur_text_section->data + ind) = s;
228 s = ind;
229 ind = ind1;
230 return s;
233 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
235 if (r & VT_SYM)
236 greloc(cur_text_section, sym, ind, R_X86_64_32);
237 gen_le32(c);
240 /* output constant with relocation if 'r & VT_SYM' is true */
241 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
243 if (r & VT_SYM)
244 greloc(cur_text_section, sym, ind, R_X86_64_64);
245 gen_le64(c);
248 /* output constant with relocation if 'r & VT_SYM' is true */
249 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
251 if (r & VT_SYM)
252 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
253 gen_le32(c-4);
256 /* output got address with relocation */
257 static void gen_gotpcrel(int r, Sym *sym, int c)
259 #ifndef TCC_TARGET_PE
260 Section *sr;
261 ElfW(Rela) *rel;
262 greloc(cur_text_section, sym, ind, R_X86_64_GOTPCREL);
263 sr = cur_text_section->reloc;
264 rel = (ElfW(Rela) *)(sr->data + sr->data_offset - sizeof(ElfW(Rela)));
265 rel->r_addend = -4;
266 #else
267 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym->v, NULL), c, r,
268 cur_text_section->data[ind-3],
269 cur_text_section->data[ind-2],
270 cur_text_section->data[ind-1]
272 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
273 #endif
274 gen_le32(0);
275 if (c) {
276 /* we use add c, %xxx for displacement */
277 orex(1, r, 0, 0x81);
278 o(0xc0 + REG_VALUE(r));
279 gen_le32(c);
283 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
285 op_reg = REG_VALUE(op_reg) << 3;
286 if ((r & VT_VALMASK) == VT_CONST) {
287 /* constant memory reference */
288 o(0x05 | op_reg);
289 if (is_got) {
290 gen_gotpcrel(r, sym, c);
291 } else {
292 gen_addrpc32(r, sym, c);
294 } else if ((r & VT_VALMASK) == VT_LOCAL) {
295 /* currently, we use only ebp as base */
296 if (c == (char)c) {
297 /* short reference */
298 o(0x45 | op_reg);
299 g(c);
300 } else {
301 oad(0x85 | op_reg, c);
303 } else if ((r & VT_VALMASK) >= TREG_MEM) {
304 if (c) {
305 g(0x80 | op_reg | REG_VALUE(r));
306 gen_le32(c);
307 } else {
308 g(0x00 | op_reg | REG_VALUE(r));
310 } else {
311 g(0x00 | op_reg | REG_VALUE(r));
315 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
316 opcode bits */
317 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
319 gen_modrm_impl(op_reg, r, sym, c, 0);
322 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
323 opcode bits */
324 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
326 int is_got;
327 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
328 orex(1, r, op_reg, opcode);
329 gen_modrm_impl(op_reg, r, sym, c, is_got);
333 /* load 'r' from value 'sv' */
334 void load(int r, SValue *sv)
336 int v, t, ft, fc, fr;
337 SValue v1;
339 #ifdef TCC_TARGET_PE
340 SValue v2;
341 sv = pe_getimport(sv, &v2);
342 #endif
344 fr = sv->r;
345 ft = sv->type.t;
346 fc = sv->c.ul;
348 #ifndef TCC_TARGET_PE
349 /* we use indirect access via got */
350 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
351 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
352 /* use the result register as a temporal register */
353 int tr = r | TREG_MEM;
354 if (is_float(ft)) {
355 /* we cannot use float registers as a temporal register */
356 tr = get_reg(RC_INT) | TREG_MEM;
358 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
360 /* load from the temporal register */
361 fr = tr | VT_LVAL;
363 #endif
365 v = fr & VT_VALMASK;
366 if (fr & VT_LVAL) {
367 int b, ll;
368 if (v == VT_LLOCAL) {
369 v1.type.t = VT_PTR;
370 v1.r = VT_LOCAL | VT_LVAL;
371 v1.c.ul = fc;
372 fr = r;
373 if (!(reg_classes[fr] & RC_INT))
374 fr = get_reg(RC_INT);
375 load(fr, &v1);
377 ll = 0;
378 if ((ft & VT_BTYPE) == VT_FLOAT) {
379 b = 0x6e0f66, r = 0; /* movd */
380 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
381 b = 0x7e0ff3, r = 0; /* movq */
382 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
383 b = 0xdb, r = 5; /* fldt */
384 } else if ((ft & VT_TYPE) == VT_BYTE) {
385 b = 0xbe0f; /* movsbl */
386 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
387 b = 0xb60f; /* movzbl */
388 } else if ((ft & VT_TYPE) == VT_SHORT) {
389 b = 0xbf0f; /* movswl */
390 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
391 b = 0xb70f; /* movzwl */
392 } else {
393 ll = is64_type(ft);
394 b = 0x8b;
396 if (ll) {
397 gen_modrm64(b, r, fr, sv->sym, fc);
398 } else {
399 orex(ll, fr, r, b);
400 gen_modrm(r, fr, sv->sym, fc);
402 } else {
403 if (v == VT_CONST) {
404 if (fr & VT_SYM) {
405 #ifdef TCC_TARGET_PE
406 orex(1,0,r,0x8d);
407 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
408 gen_addrpc32(fr, sv->sym, fc);
409 #else
410 if (sv->sym->type.t & VT_STATIC) {
411 orex(1,0,r,0x8d);
412 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
413 gen_addrpc32(fr, sv->sym, fc);
414 } else {
415 orex(1,0,r,0x8b);
416 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
417 gen_gotpcrel(r, sv->sym, fc);
419 #endif
420 } else if (is64_type(ft)) {
421 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
422 gen_le64(sv->c.ull);
423 } else {
424 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
425 gen_le32(fc);
427 } else if (v == VT_LOCAL) {
428 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
429 gen_modrm(r, VT_LOCAL, sv->sym, fc);
430 } else if (v == VT_CMP) {
431 orex(0,r,0,0);
432 if ((fc & ~0x100) != TOK_NE)
433 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
434 else
435 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
436 if (fc & 0x100)
438 /* This was a float compare. If the parity bit is
439 set the result was unordered, meaning false for everything
440 except TOK_NE, and true for TOK_NE. */
441 fc &= ~0x100;
442 o(0x037a + (REX_BASE(r) << 8));
444 orex(0,r,0, 0x0f); /* setxx %br */
445 o(fc);
446 o(0xc0 + REG_VALUE(r));
447 } else if (v == VT_JMP || v == VT_JMPI) {
448 t = v & 1;
449 orex(0,r,0,0);
450 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
451 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
452 gsym(fc);
453 orex(0,r,0,0);
454 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
455 } else if (v != r) {
456 if (r == TREG_XMM0) {
457 assert(v == TREG_ST0);
458 /* gen_cvt_ftof(VT_DOUBLE); */
459 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
460 /* movsd -0x10(%rsp),%xmm0 */
461 o(0x44100ff2);
462 o(0xf024);
463 } else if (r == TREG_ST0) {
464 assert(v == TREG_XMM0);
465 /* gen_cvt_ftof(VT_LDOUBLE); */
466 /* movsd %xmm0,-0x10(%rsp) */
467 o(0x44110ff2);
468 o(0xf024);
469 o(0xf02444dd); /* fldl -0x10(%rsp) */
470 } else {
471 orex(1,r,v, 0x89);
472 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
478 /* store register 'r' in lvalue 'v' */
479 void store(int r, SValue *v)
481 int fr, bt, ft, fc;
482 int op64 = 0;
483 /* store the REX prefix in this variable when PIC is enabled */
484 int pic = 0;
486 #ifdef TCC_TARGET_PE
487 SValue v2;
488 v = pe_getimport(v, &v2);
489 #endif
491 ft = v->type.t;
492 fc = v->c.ul;
493 fr = v->r & VT_VALMASK;
494 bt = ft & VT_BTYPE;
496 #ifndef TCC_TARGET_PE
497 /* we need to access the variable via got */
498 if (fr == VT_CONST && (v->r & VT_SYM)) {
499 /* mov xx(%rip), %r11 */
500 o(0x1d8b4c);
501 gen_gotpcrel(TREG_R11, v->sym, v->c.ul);
502 pic = is64_type(bt) ? 0x49 : 0x41;
504 #endif
506 /* XXX: incorrect if float reg to reg */
507 if (bt == VT_FLOAT) {
508 o(0x66);
509 o(pic);
510 o(0x7e0f); /* movd */
511 r = 0;
512 } else if (bt == VT_DOUBLE) {
513 o(0x66);
514 o(pic);
515 o(0xd60f); /* movq */
516 r = 0;
517 } else if (bt == VT_LDOUBLE) {
518 o(0xc0d9); /* fld %st(0) */
519 o(pic);
520 o(0xdb); /* fstpt */
521 r = 7;
522 } else {
523 if (bt == VT_SHORT)
524 o(0x66);
525 o(pic);
526 if (bt == VT_BYTE || bt == VT_BOOL)
527 orex(0, 0, r, 0x88);
528 else if (is64_type(bt))
529 op64 = 0x89;
530 else
531 orex(0, 0, r, 0x89);
533 if (pic) {
534 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
535 if (op64)
536 o(op64);
537 o(3 + (r << 3));
538 } else if (op64) {
539 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
540 gen_modrm64(op64, r, v->r, v->sym, fc);
541 } else if (fr != r) {
542 /* XXX: don't we really come here? */
543 abort();
544 o(0xc0 + fr + r * 8); /* mov r, fr */
546 } else {
547 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
548 gen_modrm(r, v->r, v->sym, fc);
549 } else if (fr != r) {
550 /* XXX: don't we really come here? */
551 abort();
552 o(0xc0 + fr + r * 8); /* mov r, fr */
557 /* 'is_jmp' is '1' if it is a jump */
558 static void gcall_or_jmp(int is_jmp)
560 int r;
561 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
562 /* constant case */
563 if (vtop->r & VT_SYM) {
564 /* relocation case */
565 greloc(cur_text_section, vtop->sym,
566 ind + 1, R_X86_64_PC32);
567 } else {
568 /* put an empty PC32 relocation */
569 put_elf_reloc(symtab_section, cur_text_section,
570 ind + 1, R_X86_64_PC32, 0);
572 oad(0xe8 + is_jmp, vtop->c.ul - 4); /* call/jmp im */
573 } else {
574 /* otherwise, indirect call */
575 r = TREG_R11;
576 load(r, vtop);
577 o(0x41); /* REX */
578 o(0xff); /* call/jmp *r */
579 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
583 #ifdef TCC_TARGET_PE
585 #define REGN 4
586 static const uint8_t arg_regs[] = {
587 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
590 static int func_scratch;
592 /* Generate function call. The function address is pushed first, then
593 all the parameters in call order. This functions pops all the
594 parameters and the function address. */
596 void gen_offs_sp(int b, int r, int d)
598 orex(1,0,r & 0x100 ? 0 : r, b);
599 if (d == (char)d) {
600 o(0x2444 | (REG_VALUE(r) << 3));
601 g(d);
602 } else {
603 o(0x2484 | (REG_VALUE(r) << 3));
604 gen_le32(d);
608 void gfunc_call(int nb_args)
610 int size, align, r, args_size, i, d, j, bt, struct_size;
611 int nb_reg_args, gen_reg;
613 nb_reg_args = nb_args;
614 args_size = (nb_reg_args < REGN ? REGN : nb_reg_args) * PTR_SIZE;
616 /* for struct arguments, we need to call memcpy and the function
617 call breaks register passing arguments we are preparing.
618 So, we process arguments which will be passed by stack first. */
619 struct_size = args_size;
620 for(i = 0; i < nb_args; i++) {
621 SValue *sv = &vtop[-i];
622 bt = (sv->type.t & VT_BTYPE);
623 if (bt == VT_STRUCT) {
624 size = type_size(&sv->type, &align);
625 /* align to stack align size */
626 size = (size + 15) & ~15;
627 /* generate structure store */
628 r = get_reg(RC_INT);
629 gen_offs_sp(0x8d, r, struct_size);
630 struct_size += size;
632 /* generate memcpy call */
633 vset(&sv->type, r | VT_LVAL, 0);
634 vpushv(sv);
635 vstore();
636 --vtop;
638 } else if (bt == VT_LDOUBLE) {
640 gv(RC_ST0);
641 gen_offs_sp(0xdb, 0x107, struct_size);
642 struct_size += 16;
647 if (func_scratch < struct_size)
648 func_scratch = struct_size;
649 #if 1
650 for (i = 0; i < REGN; ++i)
651 save_reg(arg_regs[i]);
652 save_reg(TREG_RAX);
653 #endif
654 gen_reg = nb_reg_args;
655 struct_size = args_size;
657 for(i = 0; i < nb_args; i++) {
658 bt = (vtop->type.t & VT_BTYPE);
660 if (bt == VT_STRUCT || bt == VT_LDOUBLE) {
661 if (bt == VT_LDOUBLE)
662 size = 16;
663 else
664 size = type_size(&vtop->type, &align);
665 /* align to stack align size */
666 size = (size + 15) & ~15;
667 j = --gen_reg;
668 if (j >= REGN) {
669 d = TREG_RAX;
670 gen_offs_sp(0x8d, d, struct_size);
671 gen_offs_sp(0x89, d, j*8);
672 } else {
673 d = arg_regs[j];
674 gen_offs_sp(0x8d, d, struct_size);
676 struct_size += size;
678 } else if (is_sse_float(vtop->type.t)) {
679 gv(RC_FLOAT); /* only one float register */
680 j = --gen_reg;
681 if (j >= REGN) {
682 /* movq %xmm0, j*8(%rsp) */
683 gen_offs_sp(0xd60f66, 0x100, j*8);
684 } else {
685 /* movaps %xmm0, %xmmN */
686 o(0x280f);
687 o(0xc0 + (j << 3));
688 d = arg_regs[j];
689 /* mov %xmm0, %rxx */
690 o(0x66);
691 orex(1,d,0, 0x7e0f);
692 o(0xc0 + REG_VALUE(d));
694 } else {
695 j = --gen_reg;
696 if (j >= REGN) {
697 r = gv(RC_INT);
698 gen_offs_sp(0x89, r, j*8);
699 } else {
700 d = arg_regs[j];
701 if (d < NB_REGS) {
702 gv(reg_classes[d] & ~RC_INT);
703 } else {
704 r = gv(RC_INT);
705 if (d != r) {
706 orex(1,d,r, 0x89);
707 o(0xc0 + REG_VALUE(d) + REG_VALUE(r) * 8);
713 vtop--;
715 save_regs(0);
716 gcall_or_jmp(0);
717 vtop--;
721 #define FUNC_PROLOG_SIZE 11
723 /* generate function prolog of type 't' */
724 void gfunc_prolog(CType *func_type)
726 int addr, reg_param_index, bt;
727 Sym *sym;
728 CType *type;
730 func_ret_sub = 0;
731 func_scratch = 0;
732 loc = 0;
734 addr = PTR_SIZE * 2;
735 ind += FUNC_PROLOG_SIZE;
736 func_sub_sp_offset = ind;
737 reg_param_index = 0;
739 sym = func_type->ref;
741 /* if the function returns a structure, then add an
742 implicit pointer parameter */
743 func_vt = sym->type;
744 if ((func_vt.t & VT_BTYPE) == VT_STRUCT) {
745 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
746 reg_param_index++;
747 addr += PTR_SIZE;
750 /* define parameters */
751 while ((sym = sym->next) != NULL) {
752 type = &sym->type;
753 bt = type->t & VT_BTYPE;
754 if (reg_param_index < REGN) {
755 /* save arguments passed by register */
756 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
758 if (bt == VT_STRUCT || bt == VT_LDOUBLE) {
759 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL | VT_REF, addr);
760 } else {
761 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
763 reg_param_index++;
764 addr += PTR_SIZE;
767 while (reg_param_index < REGN) {
768 if (func_type->ref->c == FUNC_ELLIPSIS)
769 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
770 reg_param_index++;
771 addr += PTR_SIZE;
775 /* generate function epilog */
776 void gfunc_epilog(void)
778 int v, saved_ind;
780 o(0xc9); /* leave */
781 if (func_ret_sub == 0) {
782 o(0xc3); /* ret */
783 } else {
784 o(0xc2); /* ret n */
785 g(func_ret_sub);
786 g(func_ret_sub >> 8);
789 saved_ind = ind;
790 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
791 /* align local size to word & save local variables */
792 v = (func_scratch + -loc + 15) & -16;
794 if (v >= 4096) {
795 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
796 oad(0xb8, v); /* mov stacksize, %eax */
797 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
798 greloc(cur_text_section, sym, ind-4, R_X86_64_PC32);
799 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
800 } else {
801 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
802 o(0xec8148); /* sub rsp, stacksize */
803 gen_le32(v);
806 cur_text_section->data_offset = saved_ind;
807 pe_add_unwind_data(ind, saved_ind, v);
808 ind = cur_text_section->data_offset;
811 #else
813 static void gadd_sp(int val)
815 if (val == (char)val) {
816 o(0xc48348);
817 g(val);
818 } else {
819 oad(0xc48148, val); /* add $xxx, %rsp */
823 #define REGN 6
824 static const uint8_t arg_regs[REGN] = {
825 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
828 /* Generate function call. The function address is pushed first, then
829 all the parameters in call order. This functions pops all the
830 parameters and the function address. */
831 void gfunc_call(int nb_args)
833 int size, align, r, args_size, i;
834 int nb_reg_args = 0;
835 int nb_sse_args = 0;
836 int sse_reg, gen_reg;
838 /* calculate the number of integer/float arguments */
839 args_size = 0;
840 for(i = 0; i < nb_args; i++) {
841 if ((vtop[-i].type.t & VT_BTYPE) == VT_STRUCT) {
842 args_size += type_size(&vtop[-i].type, &align);
843 args_size = (args_size + 7) & ~7;
844 } else if ((vtop[-i].type.t & VT_BTYPE) == VT_LDOUBLE) {
845 args_size += 16;
846 } else if (is_sse_float(vtop[-i].type.t)) {
847 nb_sse_args++;
848 if (nb_sse_args > 8) args_size += 8;
849 } else {
850 nb_reg_args++;
851 if (nb_reg_args > REGN) args_size += 8;
855 /* for struct arguments, we need to call memcpy and the function
856 call breaks register passing arguments we are preparing.
857 So, we process arguments which will be passed by stack first. */
858 gen_reg = nb_reg_args;
859 sse_reg = nb_sse_args;
861 /* adjust stack to align SSE boundary */
862 if (args_size &= 15) {
863 /* fetch cpu flag before the following sub will change the value */
864 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
865 gv(RC_INT);
867 args_size = 16 - args_size;
868 o(0x48);
869 oad(0xec81, args_size); /* sub $xxx, %rsp */
872 for(i = 0; i < nb_args; i++) {
873 /* Swap argument to top, it will possibly be changed here,
874 and might use more temps. All arguments must remain on the
875 stack, so that get_reg can correctly evict some of them onto
876 stack. We could use also use a vrott(nb_args) at the end
877 of this loop, but this seems faster. */
878 SValue tmp = vtop[0];
879 vtop[0] = vtop[-i];
880 vtop[-i] = tmp;
881 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
882 size = type_size(&vtop->type, &align);
883 /* align to stack align size */
884 size = (size + 7) & ~7;
885 /* allocate the necessary size on stack */
886 o(0x48);
887 oad(0xec81, size); /* sub $xxx, %rsp */
888 /* generate structure store */
889 r = get_reg(RC_INT);
890 orex(1, r, 0, 0x89); /* mov %rsp, r */
891 o(0xe0 + REG_VALUE(r));
892 vset(&vtop->type, r | VT_LVAL, 0);
893 vswap();
894 vstore();
895 args_size += size;
896 } else if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
897 gv(RC_ST0);
898 size = LDOUBLE_SIZE;
899 oad(0xec8148, size); /* sub $xxx, %rsp */
900 o(0x7cdb); /* fstpt 0(%rsp) */
901 g(0x24);
902 g(0x00);
903 args_size += size;
904 } else if (is_sse_float(vtop->type.t)) {
905 int j = --sse_reg;
906 if (j >= 8) {
907 gv(RC_FLOAT);
908 o(0x50); /* push $rax */
909 /* movq %xmm0, (%rsp) */
910 o(0x04d60f66);
911 o(0x24);
912 args_size += 8;
914 } else {
915 int j = --gen_reg;
916 /* simple type */
917 /* XXX: implicit cast ? */
918 if (j >= REGN) {
919 r = gv(RC_INT);
920 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
921 args_size += 8;
925 /* And swap the argument back to it's original position. */
926 tmp = vtop[0];
927 vtop[0] = vtop[-i];
928 vtop[-i] = tmp;
931 /* XXX This should be superfluous. */
932 save_regs(0); /* save used temporary registers */
934 /* then, we prepare register passing arguments.
935 Note that we cannot set RDX and RCX in this loop because gv()
936 may break these temporary registers. Let's use R10 and R11
937 instead of them */
938 gen_reg = nb_reg_args;
939 sse_reg = nb_sse_args;
940 for(i = 0; i < nb_args; i++) {
941 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT ||
942 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
943 } else if (is_sse_float(vtop->type.t)) {
944 int j = --sse_reg;
945 if (j < 8) {
946 gv(RC_FLOAT); /* only one float register */
947 /* movaps %xmm0, %xmmN */
948 o(0x280f);
949 o(0xc0 + (sse_reg << 3));
951 } else {
952 int j = --gen_reg;
953 /* simple type */
954 /* XXX: implicit cast ? */
955 if (j < REGN) {
956 int d = arg_regs[j];
957 r = gv(RC_INT);
958 if (j == 2 || j == 3)
959 /* j=2: r10, j=3: r11 */
960 d = j + 8;
961 orex(1,d,r,0x89); /* mov */
962 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
965 vtop--;
968 /* We shouldn't have many operands on the stack anymore, but the
969 call address itself is still there, and it might be in %eax
970 (or edx/ecx) currently, which the below writes would clobber.
971 So evict all remaining operands here. */
972 save_regs(0);
974 /* Copy R10 and R11 into RDX and RCX, respectively */
975 if (nb_reg_args > 2) {
976 o(0xd2894c); /* mov %r10, %rdx */
977 if (nb_reg_args > 3) {
978 o(0xd9894c); /* mov %r11, %rcx */
982 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
983 gcall_or_jmp(0);
984 if (args_size)
985 gadd_sp(args_size);
986 vtop--;
990 #define FUNC_PROLOG_SIZE 11
992 static void push_arg_reg(int i) {
993 loc -= 8;
994 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
997 /* generate function prolog of type 't' */
998 void gfunc_prolog(CType *func_type)
1000 int i, addr, align, size;
1001 int param_index, param_addr, reg_param_index, sse_param_index;
1002 Sym *sym;
1003 CType *type;
1005 sym = func_type->ref;
1006 addr = PTR_SIZE * 2;
1007 loc = 0;
1008 ind += FUNC_PROLOG_SIZE;
1009 func_sub_sp_offset = ind;
1010 func_ret_sub = 0;
1012 if (func_type->ref->c == FUNC_ELLIPSIS) {
1013 int seen_reg_num, seen_sse_num, seen_stack_size;
1014 seen_reg_num = seen_sse_num = 0;
1015 /* frame pointer and return address */
1016 seen_stack_size = PTR_SIZE * 2;
1017 /* count the number of seen parameters */
1018 sym = func_type->ref;
1019 while ((sym = sym->next) != NULL) {
1020 type = &sym->type;
1021 if (is_sse_float(type->t)) {
1022 if (seen_sse_num < 8) {
1023 seen_sse_num++;
1024 } else {
1025 seen_stack_size += 8;
1027 } else if ((type->t & VT_BTYPE) == VT_STRUCT) {
1028 size = type_size(type, &align);
1029 size = (size + 7) & ~7;
1030 seen_stack_size += size;
1031 } else if ((type->t & VT_BTYPE) == VT_LDOUBLE) {
1032 seen_stack_size += LDOUBLE_SIZE;
1033 } else {
1034 if (seen_reg_num < REGN) {
1035 seen_reg_num++;
1036 } else {
1037 seen_stack_size += 8;
1042 loc -= 16;
1043 /* movl $0x????????, -0x10(%rbp) */
1044 o(0xf045c7);
1045 gen_le32(seen_reg_num * 8);
1046 /* movl $0x????????, -0xc(%rbp) */
1047 o(0xf445c7);
1048 gen_le32(seen_sse_num * 16 + 48);
1049 /* movl $0x????????, -0x8(%rbp) */
1050 o(0xf845c7);
1051 gen_le32(seen_stack_size);
1053 /* save all register passing arguments */
1054 for (i = 0; i < 8; i++) {
1055 loc -= 16;
1056 o(0xd60f66); /* movq */
1057 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1058 /* movq $0, loc+8(%rbp) */
1059 o(0x85c748);
1060 gen_le32(loc + 8);
1061 gen_le32(0);
1063 for (i = 0; i < REGN; i++) {
1064 push_arg_reg(REGN-1-i);
1068 sym = func_type->ref;
1069 param_index = 0;
1070 reg_param_index = 0;
1071 sse_param_index = 0;
1073 /* if the function returns a structure, then add an
1074 implicit pointer parameter */
1075 func_vt = sym->type;
1076 if ((func_vt.t & VT_BTYPE) == VT_STRUCT) {
1077 push_arg_reg(reg_param_index);
1078 param_addr = loc;
1080 func_vc = loc;
1081 param_index++;
1082 reg_param_index++;
1084 /* define parameters */
1085 while ((sym = sym->next) != NULL) {
1086 type = &sym->type;
1087 size = type_size(type, &align);
1088 size = (size + 7) & ~7;
1089 if (is_sse_float(type->t)) {
1090 if (sse_param_index < 8) {
1091 /* save arguments passed by register */
1092 loc -= 8;
1093 o(0xd60f66); /* movq */
1094 gen_modrm(sse_param_index, VT_LOCAL, NULL, loc);
1095 param_addr = loc;
1096 } else {
1097 param_addr = addr;
1098 addr += size;
1100 sse_param_index++;
1102 } else if ((type->t & VT_BTYPE) == VT_STRUCT ||
1103 (type->t & VT_BTYPE) == VT_LDOUBLE) {
1104 param_addr = addr;
1105 addr += size;
1106 } else {
1107 if (reg_param_index < REGN) {
1108 /* save arguments passed by register */
1109 push_arg_reg(reg_param_index);
1110 param_addr = loc;
1111 } else {
1112 param_addr = addr;
1113 addr += 8;
1115 reg_param_index++;
1117 sym_push(sym->v & ~SYM_FIELD, type,
1118 VT_LOCAL | VT_LVAL, param_addr);
1119 param_index++;
1123 /* generate function epilog */
1124 void gfunc_epilog(void)
1126 int v, saved_ind;
1128 o(0xc9); /* leave */
1129 if (func_ret_sub == 0) {
1130 o(0xc3); /* ret */
1131 } else {
1132 o(0xc2); /* ret n */
1133 g(func_ret_sub);
1134 g(func_ret_sub >> 8);
1136 /* align local size to word & save local variables */
1137 v = (-loc + 15) & -16;
1138 saved_ind = ind;
1139 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1140 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1141 o(0xec8148); /* sub rsp, stacksize */
1142 gen_le32(v);
1143 ind = saved_ind;
1146 #endif /* not PE */
1148 /* generate a jump to a label */
1149 int gjmp(int t)
1151 return psym(0xe9, t);
1154 /* generate a jump to a fixed address */
1155 void gjmp_addr(int a)
1157 int r;
1158 r = a - ind - 2;
1159 if (r == (char)r) {
1160 g(0xeb);
1161 g(r);
1162 } else {
1163 oad(0xe9, a - ind - 5);
1167 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1168 int gtst(int inv, int t)
1170 int v, *p;
1172 v = vtop->r & VT_VALMASK;
1173 if (v == VT_CMP) {
1174 /* fast case : can jump directly since flags are set */
1175 if (vtop->c.i & 0x100)
1177 /* This was a float compare. If the parity flag is set
1178 the result was unordered. For anything except != this
1179 means false and we don't jump (anding both conditions).
1180 For != this means true (oring both).
1181 Take care about inverting the test. We need to jump
1182 to our target if the result was unordered and test wasn't NE,
1183 otherwise if unordered we don't want to jump. */
1184 vtop->c.i &= ~0x100;
1185 if (!inv == (vtop->c.i != TOK_NE))
1186 o(0x067a); /* jp +6 */
1187 else
1189 g(0x0f);
1190 t = psym(0x8a, t); /* jp t */
1193 g(0x0f);
1194 t = psym((vtop->c.i - 16) ^ inv, t);
1195 } else if (v == VT_JMP || v == VT_JMPI) {
1196 /* && or || optimization */
1197 if ((v & 1) == inv) {
1198 /* insert vtop->c jump list in t */
1199 p = &vtop->c.i;
1200 while (*p != 0)
1201 p = (int *)(cur_text_section->data + *p);
1202 *p = t;
1203 t = vtop->c.i;
1204 } else {
1205 t = gjmp(t);
1206 gsym(vtop->c.i);
1208 } else {
1209 if (is_float(vtop->type.t) ||
1210 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
1211 vpushi(0);
1212 gen_op(TOK_NE);
1214 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
1215 /* constant jmp optimization */
1216 if ((vtop->c.i != 0) != inv)
1217 t = gjmp(t);
1218 } else {
1219 v = gv(RC_INT);
1220 orex(0,v,v,0x85);
1221 o(0xc0 + REG_VALUE(v) * 9);
1222 g(0x0f);
1223 t = psym(0x85 ^ inv, t);
1226 vtop--;
1227 return t;
1230 /* generate an integer binary operation */
1231 void gen_opi(int op)
1233 int r, fr, opc, c;
1234 int ll, uu, cc;
1236 ll = is64_type(vtop[-1].type.t);
1237 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1238 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1240 switch(op) {
1241 case '+':
1242 case TOK_ADDC1: /* add with carry generation */
1243 opc = 0;
1244 gen_op8:
1245 if (cc && (!ll || (int)vtop->c.ll == vtop->c.ll)) {
1246 /* constant case */
1247 vswap();
1248 r = gv(RC_INT);
1249 vswap();
1250 c = vtop->c.i;
1251 if (c == (char)c) {
1252 /* XXX: generate inc and dec for smaller code ? */
1253 orex(ll, r, 0, 0x83);
1254 o(0xc0 | (opc << 3) | REG_VALUE(r));
1255 g(c);
1256 } else {
1257 orex(ll, r, 0, 0x81);
1258 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1260 } else {
1261 gv2(RC_INT, RC_INT);
1262 r = vtop[-1].r;
1263 fr = vtop[0].r;
1264 orex(ll, r, fr, (opc << 3) | 0x01);
1265 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1267 vtop--;
1268 if (op >= TOK_ULT && op <= TOK_GT) {
1269 vtop->r = VT_CMP;
1270 vtop->c.i = op;
1272 break;
1273 case '-':
1274 case TOK_SUBC1: /* sub with carry generation */
1275 opc = 5;
1276 goto gen_op8;
1277 case TOK_ADDC2: /* add with carry use */
1278 opc = 2;
1279 goto gen_op8;
1280 case TOK_SUBC2: /* sub with carry use */
1281 opc = 3;
1282 goto gen_op8;
1283 case '&':
1284 opc = 4;
1285 goto gen_op8;
1286 case '^':
1287 opc = 6;
1288 goto gen_op8;
1289 case '|':
1290 opc = 1;
1291 goto gen_op8;
1292 case '*':
1293 gv2(RC_INT, RC_INT);
1294 r = vtop[-1].r;
1295 fr = vtop[0].r;
1296 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1297 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1298 vtop--;
1299 break;
1300 case TOK_SHL:
1301 opc = 4;
1302 goto gen_shift;
1303 case TOK_SHR:
1304 opc = 5;
1305 goto gen_shift;
1306 case TOK_SAR:
1307 opc = 7;
1308 gen_shift:
1309 opc = 0xc0 | (opc << 3);
1310 if (cc) {
1311 /* constant case */
1312 vswap();
1313 r = gv(RC_INT);
1314 vswap();
1315 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1316 o(opc | REG_VALUE(r));
1317 g(vtop->c.i & (ll ? 63 : 31));
1318 } else {
1319 /* we generate the shift in ecx */
1320 gv2(RC_INT, RC_RCX);
1321 r = vtop[-1].r;
1322 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1323 o(opc | REG_VALUE(r));
1325 vtop--;
1326 break;
1327 case TOK_UDIV:
1328 case TOK_UMOD:
1329 uu = 1;
1330 goto divmod;
1331 case '/':
1332 case '%':
1333 case TOK_PDIV:
1334 uu = 0;
1335 divmod:
1336 /* first operand must be in eax */
1337 /* XXX: need better constraint for second operand */
1338 gv2(RC_RAX, RC_RCX);
1339 r = vtop[-1].r;
1340 fr = vtop[0].r;
1341 vtop--;
1342 save_reg(TREG_RDX);
1343 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1344 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1345 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1346 if (op == '%' || op == TOK_UMOD)
1347 r = TREG_RDX;
1348 else
1349 r = TREG_RAX;
1350 vtop->r = r;
1351 break;
1352 default:
1353 opc = 7;
1354 goto gen_op8;
1358 void gen_opl(int op)
1360 gen_opi(op);
1363 /* generate a floating point operation 'v = t1 op t2' instruction. The
1364 two operands are guaranted to have the same floating point type */
1365 /* XXX: need to use ST1 too */
1366 void gen_opf(int op)
1368 int a, ft, fc, swapped, r;
1369 int float_type =
1370 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1372 /* convert constants to memory references */
1373 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1374 vswap();
1375 gv(float_type);
1376 vswap();
1378 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1379 gv(float_type);
1381 /* must put at least one value in the floating point register */
1382 if ((vtop[-1].r & VT_LVAL) &&
1383 (vtop[0].r & VT_LVAL)) {
1384 vswap();
1385 gv(float_type);
1386 vswap();
1388 swapped = 0;
1389 /* swap the stack if needed so that t1 is the register and t2 is
1390 the memory reference */
1391 if (vtop[-1].r & VT_LVAL) {
1392 vswap();
1393 swapped = 1;
1395 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1396 if (op >= TOK_ULT && op <= TOK_GT) {
1397 /* load on stack second operand */
1398 load(TREG_ST0, vtop);
1399 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1400 if (op == TOK_GE || op == TOK_GT)
1401 swapped = !swapped;
1402 else if (op == TOK_EQ || op == TOK_NE)
1403 swapped = 0;
1404 if (swapped)
1405 o(0xc9d9); /* fxch %st(1) */
1406 o(0xe9da); /* fucompp */
1407 o(0xe0df); /* fnstsw %ax */
1408 if (op == TOK_EQ) {
1409 o(0x45e480); /* and $0x45, %ah */
1410 o(0x40fC80); /* cmp $0x40, %ah */
1411 } else if (op == TOK_NE) {
1412 o(0x45e480); /* and $0x45, %ah */
1413 o(0x40f480); /* xor $0x40, %ah */
1414 op = TOK_NE;
1415 } else if (op == TOK_GE || op == TOK_LE) {
1416 o(0x05c4f6); /* test $0x05, %ah */
1417 op = TOK_EQ;
1418 } else {
1419 o(0x45c4f6); /* test $0x45, %ah */
1420 op = TOK_EQ;
1422 vtop--;
1423 vtop->r = VT_CMP;
1424 vtop->c.i = op;
1425 } else {
1426 /* no memory reference possible for long double operations */
1427 load(TREG_ST0, vtop);
1428 swapped = !swapped;
1430 switch(op) {
1431 default:
1432 case '+':
1433 a = 0;
1434 break;
1435 case '-':
1436 a = 4;
1437 if (swapped)
1438 a++;
1439 break;
1440 case '*':
1441 a = 1;
1442 break;
1443 case '/':
1444 a = 6;
1445 if (swapped)
1446 a++;
1447 break;
1449 ft = vtop->type.t;
1450 fc = vtop->c.ul;
1451 o(0xde); /* fxxxp %st, %st(1) */
1452 o(0xc1 + (a << 3));
1453 vtop--;
1455 } else {
1456 if (op >= TOK_ULT && op <= TOK_GT) {
1457 /* if saved lvalue, then we must reload it */
1458 r = vtop->r;
1459 fc = vtop->c.ul;
1460 if ((r & VT_VALMASK) == VT_LLOCAL) {
1461 SValue v1;
1462 r = get_reg(RC_INT);
1463 v1.type.t = VT_PTR;
1464 v1.r = VT_LOCAL | VT_LVAL;
1465 v1.c.ul = fc;
1466 load(r, &v1);
1467 fc = 0;
1470 if (op == TOK_EQ || op == TOK_NE) {
1471 swapped = 0;
1472 } else {
1473 if (op == TOK_LE || op == TOK_LT)
1474 swapped = !swapped;
1475 if (op == TOK_LE || op == TOK_GE) {
1476 op = 0x93; /* setae */
1477 } else {
1478 op = 0x97; /* seta */
1482 if (swapped) {
1483 o(0x7e0ff3); /* movq */
1484 gen_modrm(1, r, vtop->sym, fc);
1486 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE) {
1487 o(0x66);
1489 o(0x2e0f); /* ucomisd %xmm0, %xmm1 */
1490 o(0xc8);
1491 } else {
1492 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE) {
1493 o(0x66);
1495 o(0x2e0f); /* ucomisd */
1496 gen_modrm(0, r, vtop->sym, fc);
1499 vtop--;
1500 vtop->r = VT_CMP;
1501 vtop->c.i = op | 0x100;
1502 } else {
1503 /* no memory reference possible for long double operations */
1504 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1505 load(TREG_XMM0, vtop);
1506 swapped = !swapped;
1508 switch(op) {
1509 default:
1510 case '+':
1511 a = 0;
1512 break;
1513 case '-':
1514 a = 4;
1515 break;
1516 case '*':
1517 a = 1;
1518 break;
1519 case '/':
1520 a = 6;
1521 break;
1523 ft = vtop->type.t;
1524 fc = vtop->c.ul;
1525 if ((ft & VT_BTYPE) == VT_LDOUBLE) {
1526 o(0xde); /* fxxxp %st, %st(1) */
1527 o(0xc1 + (a << 3));
1528 } else {
1529 /* if saved lvalue, then we must reload it */
1530 r = vtop->r;
1531 if ((r & VT_VALMASK) == VT_LLOCAL) {
1532 SValue v1;
1533 r = get_reg(RC_INT);
1534 v1.type.t = VT_PTR;
1535 v1.r = VT_LOCAL | VT_LVAL;
1536 v1.c.ul = fc;
1537 load(r, &v1);
1538 fc = 0;
1540 if (swapped) {
1541 /* movq %xmm0,%xmm1 */
1542 o(0x7e0ff3);
1543 o(0xc8);
1544 load(TREG_XMM0, vtop);
1545 /* subsd %xmm1,%xmm0 (f2 0f 5c c1) */
1546 if ((ft & VT_BTYPE) == VT_DOUBLE) {
1547 o(0xf2);
1548 } else {
1549 o(0xf3);
1551 o(0x0f);
1552 o(0x58 + a);
1553 o(0xc1);
1554 } else {
1555 if ((ft & VT_BTYPE) == VT_DOUBLE) {
1556 o(0xf2);
1557 } else {
1558 o(0xf3);
1560 o(0x0f);
1561 o(0x58 + a);
1562 gen_modrm(0, r, vtop->sym, fc);
1565 vtop--;
1570 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1571 and 'long long' cases. */
1572 void gen_cvt_itof(int t)
1574 if ((t & VT_BTYPE) == VT_LDOUBLE) {
1575 save_reg(TREG_ST0);
1576 gv(RC_INT);
1577 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
1578 /* signed long long to float/double/long double (unsigned case
1579 is handled generically) */
1580 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1581 o(0x242cdf); /* fildll (%rsp) */
1582 o(0x08c48348); /* add $8, %rsp */
1583 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
1584 (VT_INT | VT_UNSIGNED)) {
1585 /* unsigned int to float/double/long double */
1586 o(0x6a); /* push $0 */
1587 g(0x00);
1588 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1589 o(0x242cdf); /* fildll (%rsp) */
1590 o(0x10c48348); /* add $16, %rsp */
1591 } else {
1592 /* int to float/double/long double */
1593 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1594 o(0x2404db); /* fildl (%rsp) */
1595 o(0x08c48348); /* add $8, %rsp */
1597 vtop->r = TREG_ST0;
1598 } else {
1599 save_reg(TREG_XMM0);
1600 gv(RC_INT);
1601 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT));
1602 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
1603 (VT_INT | VT_UNSIGNED) ||
1604 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
1605 o(0x48); /* REX */
1607 o(0x2a0f);
1608 o(0xc0 + (vtop->r & VT_VALMASK)); /* cvtsi2sd */
1609 vtop->r = TREG_XMM0;
1613 /* convert from one floating point type to another */
1614 void gen_cvt_ftof(int t)
1616 int ft, bt, tbt;
1618 ft = vtop->type.t;
1619 bt = ft & VT_BTYPE;
1620 tbt = t & VT_BTYPE;
1622 if (bt == VT_FLOAT) {
1623 gv(RC_FLOAT);
1624 if (tbt == VT_DOUBLE) {
1625 o(0xc0140f); /* unpcklps */
1626 o(0xc05a0f); /* cvtps2pd */
1627 } else if (tbt == VT_LDOUBLE) {
1628 /* movss %xmm0,-0x10(%rsp) */
1629 o(0x44110ff3);
1630 o(0xf024);
1631 o(0xf02444d9); /* flds -0x10(%rsp) */
1632 vtop->r = TREG_ST0;
1634 } else if (bt == VT_DOUBLE) {
1635 gv(RC_FLOAT);
1636 if (tbt == VT_FLOAT) {
1637 o(0xc0140f66); /* unpcklpd */
1638 o(0xc05a0f66); /* cvtpd2ps */
1639 } else if (tbt == VT_LDOUBLE) {
1640 /* movsd %xmm0,-0x10(%rsp) */
1641 o(0x44110ff2);
1642 o(0xf024);
1643 o(0xf02444dd); /* fldl -0x10(%rsp) */
1644 vtop->r = TREG_ST0;
1646 } else {
1647 gv(RC_ST0);
1648 if (tbt == VT_DOUBLE) {
1649 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
1650 /* movsd -0x10(%rsp),%xmm0 */
1651 o(0x44100ff2);
1652 o(0xf024);
1653 vtop->r = TREG_XMM0;
1654 } else if (tbt == VT_FLOAT) {
1655 o(0xf0245cd9); /* fstps -0x10(%rsp) */
1656 /* movss -0x10(%rsp),%xmm0 */
1657 o(0x44100ff3);
1658 o(0xf024);
1659 vtop->r = TREG_XMM0;
1664 /* convert fp to int 't' type */
1665 void gen_cvt_ftoi(int t)
1667 int ft, bt, size, r;
1668 ft = vtop->type.t;
1669 bt = ft & VT_BTYPE;
1670 if (bt == VT_LDOUBLE) {
1671 gen_cvt_ftof(VT_DOUBLE);
1672 bt = VT_DOUBLE;
1675 gv(RC_FLOAT);
1676 if (t != VT_INT)
1677 size = 8;
1678 else
1679 size = 4;
1681 r = get_reg(RC_INT);
1682 if (bt == VT_FLOAT) {
1683 o(0xf3);
1684 } else if (bt == VT_DOUBLE) {
1685 o(0xf2);
1686 } else {
1687 assert(0);
1689 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
1690 o(0xc0 + (REG_VALUE(r) << 3));
1691 vtop->r = r;
1694 /* computed goto support */
1695 void ggoto(void)
1697 gcall_or_jmp(1);
1698 vtop--;
1701 /* end of x86-64 code generator */
1702 /*************************************************************/
1703 #endif /* ! TARGET_DEFS_ONLY */
1704 /******************************************************/