x86-asm: Implement fxrstorq and fxsaveq
[tinycc.git] / x86_64-gen.c
blob8a2d03a3323257a05a3a4c57e9bdeb55466d3fb8
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 8
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
34 #define RC_RAX 0x0004
35 #define RC_RCX 0x0008
36 #define RC_RDX 0x0010
37 #define RC_ST0 0x0080 /* only for long double */
38 #define RC_R8 0x0100
39 #define RC_R9 0x0200
40 #define RC_R10 0x0400
41 #define RC_R11 0x0800
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
56 enum {
57 TREG_RAX = 0,
58 TREG_RCX = 1,
59 TREG_RDX = 2,
60 TREG_RSP = 4,
61 TREG_RSI = 6,
62 TREG_RDI = 7,
64 TREG_R8 = 8,
65 TREG_R9 = 9,
66 TREG_R10 = 10,
67 TREG_R11 = 11,
69 TREG_XMM0 = 16,
70 TREG_XMM1 = 17,
71 TREG_XMM2 = 18,
72 TREG_XMM3 = 19,
73 TREG_XMM4 = 20,
74 TREG_XMM5 = 21,
75 TREG_XMM6 = 22,
76 TREG_XMM7 = 23,
78 TREG_ST0 = 24,
80 TREG_MEM = 0x20
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
96 #define PTR_SIZE 8
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
102 #define MAX_ALIGN 16
104 /******************************************************/
105 #else /* ! TARGET_DEFS_ONLY */
106 /******************************************************/
107 #include "tcc.h"
108 #include <assert.h>
110 ST_DATA const int reg_classes[NB_REGS] = {
111 /* eax */ RC_INT | RC_RAX,
112 /* ecx */ RC_INT | RC_RCX,
113 /* edx */ RC_INT | RC_RDX,
119 RC_R8,
120 RC_R9,
121 RC_R10,
122 RC_R11,
127 /* xmm0 */ RC_FLOAT | RC_XMM0,
128 /* xmm1 */ RC_FLOAT | RC_XMM1,
129 /* xmm2 */ RC_FLOAT | RC_XMM2,
130 /* xmm3 */ RC_FLOAT | RC_XMM3,
131 /* xmm4 */ RC_FLOAT | RC_XMM4,
132 /* xmm5 */ RC_FLOAT | RC_XMM5,
133 /* xmm6 an xmm7 are included so gv() can be used on them,
134 but they are not tagged with RC_FLOAT because they are
135 callee saved on Windows */
136 RC_XMM6,
137 RC_XMM7,
138 /* st0 */ RC_ST0
141 static unsigned long func_sub_sp_offset;
142 static int func_ret_sub;
144 /* XXX: make it faster ? */
145 ST_FUNC void g(int c)
147 int ind1;
148 ind1 = ind + 1;
149 if (ind1 > cur_text_section->data_allocated)
150 section_realloc(cur_text_section, ind1);
151 cur_text_section->data[ind] = c;
152 ind = ind1;
155 ST_FUNC void o(unsigned int c)
157 while (c) {
158 g(c);
159 c = c >> 8;
163 ST_FUNC void gen_le16(int v)
165 g(v);
166 g(v >> 8);
169 ST_FUNC void gen_le32(int c)
171 g(c);
172 g(c >> 8);
173 g(c >> 16);
174 g(c >> 24);
177 ST_FUNC void gen_le64(int64_t c)
179 g(c);
180 g(c >> 8);
181 g(c >> 16);
182 g(c >> 24);
183 g(c >> 32);
184 g(c >> 40);
185 g(c >> 48);
186 g(c >> 56);
189 static void orex(int ll, int r, int r2, int b)
191 if ((r & VT_VALMASK) >= VT_CONST)
192 r = 0;
193 if ((r2 & VT_VALMASK) >= VT_CONST)
194 r2 = 0;
195 if (ll || REX_BASE(r) || REX_BASE(r2))
196 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
197 o(b);
200 /* output a symbol and patch all calls to it */
201 ST_FUNC void gsym_addr(int t, int a)
203 while (t) {
204 unsigned char *ptr = cur_text_section->data + t;
205 uint32_t n = read32le(ptr); /* next value */
206 write32le(ptr, a - t - 4);
207 t = n;
211 void gsym(int t)
213 gsym_addr(t, ind);
216 /* psym is used to put an instruction with a data field which is a
217 reference to a symbol. It is in fact the same as oad ! */
218 #define psym oad
220 static int is64_type(int t)
222 return ((t & VT_BTYPE) == VT_PTR ||
223 (t & VT_BTYPE) == VT_FUNC ||
224 (t & VT_BTYPE) == VT_LLONG);
227 /* instruction + 4 bytes data. Return the address of the data */
228 ST_FUNC int oad(int c, int s)
230 int ind1;
232 o(c);
233 ind1 = ind + 4;
234 if (ind1 > cur_text_section->data_allocated)
235 section_realloc(cur_text_section, ind1);
236 write32le(cur_text_section->data + ind, s);
237 s = ind;
238 ind = ind1;
239 return s;
242 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
244 if (r & VT_SYM)
245 greloca(cur_text_section, sym, ind, R_X86_64_32, c), c=0;
246 gen_le32(c);
249 /* output constant with relocation if 'r & VT_SYM' is true */
250 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
252 if (r & VT_SYM)
253 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
254 gen_le64(c);
257 /* output constant with relocation if 'r & VT_SYM' is true */
258 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
260 if (r & VT_SYM)
261 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
262 gen_le32(c-4);
265 /* output got address with relocation */
266 static void gen_gotpcrel(int r, Sym *sym, int c)
268 #ifndef TCC_TARGET_PE
269 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
270 #else
271 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
272 get_tok_str(sym->v, NULL), c, r,
273 cur_text_section->data[ind-3],
274 cur_text_section->data[ind-2],
275 cur_text_section->data[ind-1]
277 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
278 #endif
279 gen_le32(0);
280 if (c) {
281 /* we use add c, %xxx for displacement */
282 orex(1, r, 0, 0x81);
283 o(0xc0 + REG_VALUE(r));
284 gen_le32(c);
288 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
290 op_reg = REG_VALUE(op_reg) << 3;
291 if ((r & VT_VALMASK) == VT_CONST) {
292 /* constant memory reference */
293 o(0x05 | op_reg);
294 if (is_got) {
295 gen_gotpcrel(r, sym, c);
296 } else {
297 gen_addrpc32(r, sym, c);
299 } else if ((r & VT_VALMASK) == VT_LOCAL) {
300 /* currently, we use only ebp as base */
301 if (c == (char)c) {
302 /* short reference */
303 o(0x45 | op_reg);
304 g(c);
305 } else {
306 oad(0x85 | op_reg, c);
308 } else if ((r & VT_VALMASK) >= TREG_MEM) {
309 if (c) {
310 g(0x80 | op_reg | REG_VALUE(r));
311 gen_le32(c);
312 } else {
313 g(0x00 | op_reg | REG_VALUE(r));
315 } else {
316 g(0x00 | op_reg | REG_VALUE(r));
320 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
321 opcode bits */
322 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
324 gen_modrm_impl(op_reg, r, sym, c, 0);
327 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
328 opcode bits */
329 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
331 int is_got;
332 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
333 orex(1, r, op_reg, opcode);
334 gen_modrm_impl(op_reg, r, sym, c, is_got);
338 /* load 'r' from value 'sv' */
339 void load(int r, SValue *sv)
341 int v, t, ft, fc, fr;
342 SValue v1;
344 #ifdef TCC_TARGET_PE
345 SValue v2;
346 sv = pe_getimport(sv, &v2);
347 #endif
349 fr = sv->r;
350 ft = sv->type.t & ~VT_DEFSIGN;
351 fc = sv->c.i;
353 ft &= ~(VT_VOLATILE | VT_CONSTANT);
355 #ifndef TCC_TARGET_PE
356 /* we use indirect access via got */
357 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
358 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
359 /* use the result register as a temporal register */
360 int tr = r | TREG_MEM;
361 if (is_float(ft)) {
362 /* we cannot use float registers as a temporal register */
363 tr = get_reg(RC_INT) | TREG_MEM;
365 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
367 /* load from the temporal register */
368 fr = tr | VT_LVAL;
370 #endif
372 v = fr & VT_VALMASK;
373 if (fr & VT_LVAL) {
374 int b, ll;
375 if (v == VT_LLOCAL) {
376 v1.type.t = VT_PTR;
377 v1.r = VT_LOCAL | VT_LVAL;
378 v1.c.i = fc;
379 fr = r;
380 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
381 fr = get_reg(RC_INT);
382 load(fr, &v1);
384 ll = 0;
385 if ((ft & VT_BTYPE) == VT_FLOAT) {
386 b = 0x6e0f66;
387 r = REG_VALUE(r); /* movd */
388 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
389 b = 0x7e0ff3; /* movq */
390 r = REG_VALUE(r);
391 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
392 b = 0xdb, r = 5; /* fldt */
393 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
394 b = 0xbe0f; /* movsbl */
395 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
396 b = 0xb60f; /* movzbl */
397 } else if ((ft & VT_TYPE) == VT_SHORT) {
398 b = 0xbf0f; /* movswl */
399 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
400 b = 0xb70f; /* movzwl */
401 } else {
402 assert(((ft & VT_BTYPE) == VT_INT) || ((ft & VT_BTYPE) == VT_LLONG)
403 || ((ft & VT_BTYPE) == VT_PTR) || ((ft & VT_BTYPE) == VT_ENUM)
404 || ((ft & VT_BTYPE) == VT_FUNC));
405 ll = is64_type(ft);
406 b = 0x8b;
408 if (ll) {
409 gen_modrm64(b, r, fr, sv->sym, fc);
410 } else {
411 orex(ll, fr, r, b);
412 gen_modrm(r, fr, sv->sym, fc);
414 } else {
415 if (v == VT_CONST) {
416 if (fr & VT_SYM) {
417 #ifdef TCC_TARGET_PE
418 orex(1,0,r,0x8d);
419 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
420 gen_addrpc32(fr, sv->sym, fc);
421 #else
422 if (sv->sym->type.t & VT_STATIC) {
423 orex(1,0,r,0x8d);
424 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
425 gen_addrpc32(fr, sv->sym, fc);
426 } else {
427 orex(1,0,r,0x8b);
428 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
429 gen_gotpcrel(r, sv->sym, fc);
431 #endif
432 } else if (is64_type(ft)) {
433 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
434 gen_le64(sv->c.i);
435 } else {
436 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
437 gen_le32(fc);
439 } else if (v == VT_LOCAL) {
440 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
441 gen_modrm(r, VT_LOCAL, sv->sym, fc);
442 } else if (v == VT_CMP) {
443 orex(0,r,0,0);
444 if ((fc & ~0x100) != TOK_NE)
445 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
446 else
447 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
448 if (fc & 0x100)
450 /* This was a float compare. If the parity bit is
451 set the result was unordered, meaning false for everything
452 except TOK_NE, and true for TOK_NE. */
453 fc &= ~0x100;
454 o(0x037a + (REX_BASE(r) << 8));
456 orex(0,r,0, 0x0f); /* setxx %br */
457 o(fc);
458 o(0xc0 + REG_VALUE(r));
459 } else if (v == VT_JMP || v == VT_JMPI) {
460 t = v & 1;
461 orex(0,r,0,0);
462 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
463 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
464 gsym(fc);
465 orex(0,r,0,0);
466 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
467 } else if (v != r) {
468 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
469 if (v == TREG_ST0) {
470 /* gen_cvt_ftof(VT_DOUBLE); */
471 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
472 /* movsd -0x10(%rsp),%xmmN */
473 o(0x100ff2);
474 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
475 o(0xf024);
476 } else {
477 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
478 if ((ft & VT_BTYPE) == VT_FLOAT) {
479 o(0x100ff3);
480 } else {
481 assert((ft & VT_BTYPE) == VT_DOUBLE);
482 o(0x100ff2);
484 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
486 } else if (r == TREG_ST0) {
487 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
488 /* gen_cvt_ftof(VT_LDOUBLE); */
489 /* movsd %xmmN,-0x10(%rsp) */
490 o(0x110ff2);
491 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
492 o(0xf024);
493 o(0xf02444dd); /* fldl -0x10(%rsp) */
494 } else {
495 orex(1,r,v, 0x89);
496 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
502 /* store register 'r' in lvalue 'v' */
503 void store(int r, SValue *v)
505 int fr, bt, ft, fc;
506 int op64 = 0;
507 /* store the REX prefix in this variable when PIC is enabled */
508 int pic = 0;
510 #ifdef TCC_TARGET_PE
511 SValue v2;
512 v = pe_getimport(v, &v2);
513 #endif
515 ft = v->type.t;
516 fc = v->c.i;
517 fr = v->r & VT_VALMASK;
518 ft &= ~(VT_VOLATILE | VT_CONSTANT);
519 bt = ft & VT_BTYPE;
521 #ifndef TCC_TARGET_PE
522 /* we need to access the variable via got */
523 if (fr == VT_CONST && (v->r & VT_SYM)) {
524 /* mov xx(%rip), %r11 */
525 o(0x1d8b4c);
526 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
527 pic = is64_type(bt) ? 0x49 : 0x41;
529 #endif
531 /* XXX: incorrect if float reg to reg */
532 if (bt == VT_FLOAT) {
533 o(0x66);
534 o(pic);
535 o(0x7e0f); /* movd */
536 r = REG_VALUE(r);
537 } else if (bt == VT_DOUBLE) {
538 o(0x66);
539 o(pic);
540 o(0xd60f); /* movq */
541 r = REG_VALUE(r);
542 } else if (bt == VT_LDOUBLE) {
543 o(0xc0d9); /* fld %st(0) */
544 o(pic);
545 o(0xdb); /* fstpt */
546 r = 7;
547 } else {
548 if (bt == VT_SHORT)
549 o(0x66);
550 o(pic);
551 if (bt == VT_BYTE || bt == VT_BOOL)
552 orex(0, 0, r, 0x88);
553 else if (is64_type(bt))
554 op64 = 0x89;
555 else
556 orex(0, 0, r, 0x89);
558 if (pic) {
559 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
560 if (op64)
561 o(op64);
562 o(3 + (r << 3));
563 } else if (op64) {
564 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
565 gen_modrm64(op64, r, v->r, v->sym, fc);
566 } else if (fr != r) {
567 /* XXX: don't we really come here? */
568 abort();
569 o(0xc0 + fr + r * 8); /* mov r, fr */
571 } else {
572 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
573 gen_modrm(r, v->r, v->sym, fc);
574 } else if (fr != r) {
575 /* XXX: don't we really come here? */
576 abort();
577 o(0xc0 + fr + r * 8); /* mov r, fr */
582 /* 'is_jmp' is '1' if it is a jump */
583 static void gcall_or_jmp(int is_jmp)
585 int r;
586 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
587 ((vtop->r & VT_SYM) || (vtop->c.i-4) == (int)(vtop->c.i-4))) {
588 /* constant case */
589 if (vtop->r & VT_SYM) {
590 /* relocation case */
591 #ifdef TCC_TARGET_PE
592 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
593 #else
594 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
595 #endif
596 } else {
597 /* put an empty PC32 relocation */
598 put_elf_reloca(symtab_section, cur_text_section,
599 ind + 1, R_X86_64_PC32, 0, (int)(vtop->c.i-4));
601 oad(0xe8 + is_jmp, 0); /* call/jmp im */
602 } else {
603 /* otherwise, indirect call */
604 r = TREG_R11;
605 load(r, vtop);
606 o(0x41); /* REX */
607 o(0xff); /* call/jmp *r */
608 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
612 #if defined(CONFIG_TCC_BCHECK)
613 #ifndef TCC_TARGET_PE
614 static addr_t func_bound_offset;
615 static unsigned long func_bound_ind;
616 #endif
618 static void gen_static_call(int v)
620 Sym *sym = external_global_sym(v, &func_old_type, 0);
621 oad(0xe8, 0);
622 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
625 /* generate a bounded pointer addition */
626 ST_FUNC void gen_bounded_ptr_add(void)
628 /* save all temporary registers */
629 save_regs(0);
631 /* prepare fast x86_64 function call */
632 gv(RC_RAX);
633 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
634 vtop--;
636 gv(RC_RAX);
637 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
638 vtop--;
640 /* do a fast function call */
641 gen_static_call(TOK___bound_ptr_add);
643 /* returned pointer is in rax */
644 vtop++;
645 vtop->r = TREG_RAX | VT_BOUNDED;
648 /* relocation offset of the bounding function call point */
649 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
652 /* patch pointer addition in vtop so that pointer dereferencing is
653 also tested */
654 ST_FUNC void gen_bounded_ptr_deref(void)
656 addr_t func;
657 int size, align;
658 ElfW(Rela) *rel;
659 Sym *sym;
661 size = 0;
662 /* XXX: put that code in generic part of tcc */
663 if (!is_float(vtop->type.t)) {
664 if (vtop->r & VT_LVAL_BYTE)
665 size = 1;
666 else if (vtop->r & VT_LVAL_SHORT)
667 size = 2;
669 if (!size)
670 size = type_size(&vtop->type, &align);
671 switch(size) {
672 case 1: func = TOK___bound_ptr_indir1; break;
673 case 2: func = TOK___bound_ptr_indir2; break;
674 case 4: func = TOK___bound_ptr_indir4; break;
675 case 8: func = TOK___bound_ptr_indir8; break;
676 case 12: func = TOK___bound_ptr_indir12; break;
677 case 16: func = TOK___bound_ptr_indir16; break;
678 default:
679 tcc_error("unhandled size when dereferencing bounded pointer");
680 func = 0;
681 break;
684 sym = external_global_sym(func, &func_old_type, 0);
685 if (!sym->c)
686 put_extern_sym(sym, NULL, 0, 0);
688 /* patch relocation */
689 /* XXX: find a better solution ? */
691 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
692 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
694 #endif
696 #ifdef TCC_TARGET_PE
698 #define REGN 4
699 static const uint8_t arg_regs[REGN] = {
700 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
703 /* Prepare arguments in R10 and R11 rather than RCX and RDX
704 because gv() will not ever use these */
705 static int arg_prepare_reg(int idx) {
706 if (idx == 0 || idx == 1)
707 /* idx=0: r10, idx=1: r11 */
708 return idx + 10;
709 else
710 return arg_regs[idx];
713 static int func_scratch;
715 /* Generate function call. The function address is pushed first, then
716 all the parameters in call order. This functions pops all the
717 parameters and the function address. */
719 void gen_offs_sp(int b, int r, int d)
721 orex(1,0,r & 0x100 ? 0 : r, b);
722 if (d == (char)d) {
723 o(0x2444 | (REG_VALUE(r) << 3));
724 g(d);
725 } else {
726 o(0x2484 | (REG_VALUE(r) << 3));
727 gen_le32(d);
731 /* Return the number of registers needed to return the struct, or 0 if
732 returning via struct pointer. */
733 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
735 int size, align;
736 *regsize = 8;
737 *ret_align = 1; // Never have to re-align return values for x86-64
738 size = type_size(vt, &align);
739 ret->ref = NULL;
740 if (size > 8) {
741 return 0;
742 } else if (size > 4) {
743 ret->t = VT_LLONG;
744 return 1;
745 } else if (size > 2) {
746 ret->t = VT_INT;
747 return 1;
748 } else if (size > 1) {
749 ret->t = VT_SHORT;
750 return 1;
751 } else {
752 ret->t = VT_BYTE;
753 return 1;
757 static int is_sse_float(int t) {
758 int bt;
759 bt = t & VT_BTYPE;
760 return bt == VT_DOUBLE || bt == VT_FLOAT;
763 int gfunc_arg_size(CType *type) {
764 int align;
765 if (type->t & (VT_ARRAY|VT_BITFIELD))
766 return 8;
767 return type_size(type, &align);
770 void gfunc_call(int nb_args)
772 int size, r, args_size, i, d, bt, struct_size;
773 int arg;
775 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
776 arg = nb_args;
778 /* for struct arguments, we need to call memcpy and the function
779 call breaks register passing arguments we are preparing.
780 So, we process arguments which will be passed by stack first. */
781 struct_size = args_size;
782 for(i = 0; i < nb_args; i++) {
783 SValue *sv;
785 --arg;
786 sv = &vtop[-i];
787 bt = (sv->type.t & VT_BTYPE);
788 size = gfunc_arg_size(&sv->type);
790 if (size <= 8)
791 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
793 if (bt == VT_STRUCT) {
794 /* align to stack align size */
795 size = (size + 15) & ~15;
796 /* generate structure store */
797 r = get_reg(RC_INT);
798 gen_offs_sp(0x8d, r, struct_size);
799 struct_size += size;
801 /* generate memcpy call */
802 vset(&sv->type, r | VT_LVAL, 0);
803 vpushv(sv);
804 vstore();
805 --vtop;
806 } else if (bt == VT_LDOUBLE) {
807 gv(RC_ST0);
808 gen_offs_sp(0xdb, 0x107, struct_size);
809 struct_size += 16;
813 if (func_scratch < struct_size)
814 func_scratch = struct_size;
816 arg = nb_args;
817 struct_size = args_size;
819 for(i = 0; i < nb_args; i++) {
820 --arg;
821 bt = (vtop->type.t & VT_BTYPE);
823 size = gfunc_arg_size(&vtop->type);
824 if (size > 8) {
825 /* align to stack align size */
826 size = (size + 15) & ~15;
827 if (arg >= REGN) {
828 d = get_reg(RC_INT);
829 gen_offs_sp(0x8d, d, struct_size);
830 gen_offs_sp(0x89, d, arg*8);
831 } else {
832 d = arg_prepare_reg(arg);
833 gen_offs_sp(0x8d, d, struct_size);
835 struct_size += size;
836 } else {
837 if (is_sse_float(vtop->type.t)) {
838 gv(RC_XMM0); /* only use one float register */
839 if (arg >= REGN) {
840 /* movq %xmm0, j*8(%rsp) */
841 gen_offs_sp(0xd60f66, 0x100, arg*8);
842 } else {
843 /* movaps %xmm0, %xmmN */
844 o(0x280f);
845 o(0xc0 + (arg << 3));
846 d = arg_prepare_reg(arg);
847 /* mov %xmm0, %rxx */
848 o(0x66);
849 orex(1,d,0, 0x7e0f);
850 o(0xc0 + REG_VALUE(d));
852 } else {
853 if (bt == VT_STRUCT) {
854 vtop->type.ref = NULL;
855 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
856 : size > 1 ? VT_SHORT : VT_BYTE;
859 r = gv(RC_INT);
860 if (arg >= REGN) {
861 gen_offs_sp(0x89, r, arg*8);
862 } else {
863 d = arg_prepare_reg(arg);
864 orex(1,d,r,0x89); /* mov */
865 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
869 vtop--;
871 save_regs(0);
873 /* Copy R10 and R11 into RCX and RDX, respectively */
874 if (nb_args > 0) {
875 o(0xd1894c); /* mov %r10, %rcx */
876 if (nb_args > 1) {
877 o(0xda894c); /* mov %r11, %rdx */
881 gcall_or_jmp(0);
882 /* other compilers don't clear the upper bits when returning char/short */
883 bt = vtop->type.ref->type.t & (VT_BTYPE | VT_UNSIGNED);
884 if (bt == (VT_BYTE | VT_UNSIGNED))
885 o(0xc0b60f); /* movzbl %al, %eax */
886 else if (bt == VT_BYTE)
887 o(0xc0be0f); /* movsbl %al, %eax */
888 else if (bt == VT_SHORT)
889 o(0x98); /* cwtl */
890 else if (bt == (VT_SHORT | VT_UNSIGNED))
891 o(0xc0b70f); /* movzbl %al, %eax */
892 #if 0 /* handled in gen_cast() */
893 else if (bt == VT_INT)
894 o(0x9848); /* cltq */
895 else if (bt == (VT_INT | VT_UNSIGNED))
896 o(0xc089); /* mov %eax,%eax */
897 #endif
898 vtop--;
902 #define FUNC_PROLOG_SIZE 11
904 /* generate function prolog of type 't' */
905 void gfunc_prolog(CType *func_type)
907 int addr, reg_param_index, bt, size;
908 Sym *sym;
909 CType *type;
911 func_ret_sub = 0;
912 func_scratch = 0;
913 loc = 0;
915 addr = PTR_SIZE * 2;
916 ind += FUNC_PROLOG_SIZE;
917 func_sub_sp_offset = ind;
918 reg_param_index = 0;
920 sym = func_type->ref;
922 /* if the function returns a structure, then add an
923 implicit pointer parameter */
924 func_vt = sym->type;
925 func_var = (sym->c == FUNC_ELLIPSIS);
926 size = gfunc_arg_size(&func_vt);
927 if (size > 8) {
928 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
929 func_vc = addr;
930 reg_param_index++;
931 addr += 8;
934 /* define parameters */
935 while ((sym = sym->next) != NULL) {
936 type = &sym->type;
937 bt = type->t & VT_BTYPE;
938 size = gfunc_arg_size(type);
939 if (size > 8) {
940 if (reg_param_index < REGN) {
941 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
943 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL | VT_REF, addr);
944 } else {
945 if (reg_param_index < REGN) {
946 /* save arguments passed by register */
947 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
948 o(0xd60f66); /* movq */
949 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
950 } else {
951 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
954 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
956 addr += 8;
957 reg_param_index++;
960 while (reg_param_index < REGN) {
961 if (func_type->ref->c == FUNC_ELLIPSIS) {
962 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
963 addr += 8;
965 reg_param_index++;
969 /* generate function epilog */
970 void gfunc_epilog(void)
972 int v, saved_ind;
974 o(0xc9); /* leave */
975 if (func_ret_sub == 0) {
976 o(0xc3); /* ret */
977 } else {
978 o(0xc2); /* ret n */
979 g(func_ret_sub);
980 g(func_ret_sub >> 8);
983 saved_ind = ind;
984 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
985 /* align local size to word & save local variables */
986 v = (func_scratch + -loc + 15) & -16;
988 if (v >= 4096) {
989 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
990 oad(0xb8, v); /* mov stacksize, %eax */
991 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
992 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
993 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
994 } else {
995 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
996 o(0xec8148); /* sub rsp, stacksize */
997 gen_le32(v);
1000 cur_text_section->data_offset = saved_ind;
1001 pe_add_unwind_data(ind, saved_ind, v);
1002 ind = cur_text_section->data_offset;
1005 #else
1007 static void gadd_sp(int val)
1009 if (val == (char)val) {
1010 o(0xc48348);
1011 g(val);
1012 } else {
1013 oad(0xc48148, val); /* add $xxx, %rsp */
1017 typedef enum X86_64_Mode {
1018 x86_64_mode_none,
1019 x86_64_mode_memory,
1020 x86_64_mode_integer,
1021 x86_64_mode_sse,
1022 x86_64_mode_x87
1023 } X86_64_Mode;
1025 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1027 if (a == b)
1028 return a;
1029 else if (a == x86_64_mode_none)
1030 return b;
1031 else if (b == x86_64_mode_none)
1032 return a;
1033 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1034 return x86_64_mode_memory;
1035 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1036 return x86_64_mode_integer;
1037 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1038 return x86_64_mode_memory;
1039 else
1040 return x86_64_mode_sse;
1043 static X86_64_Mode classify_x86_64_inner(CType *ty)
1045 X86_64_Mode mode;
1046 Sym *f;
1048 switch (ty->t & VT_BTYPE) {
1049 case VT_VOID: return x86_64_mode_none;
1051 case VT_INT:
1052 case VT_BYTE:
1053 case VT_SHORT:
1054 case VT_LLONG:
1055 case VT_BOOL:
1056 case VT_PTR:
1057 case VT_FUNC:
1058 case VT_ENUM: return x86_64_mode_integer;
1060 case VT_FLOAT:
1061 case VT_DOUBLE: return x86_64_mode_sse;
1063 case VT_LDOUBLE: return x86_64_mode_x87;
1065 case VT_STRUCT:
1066 f = ty->ref;
1068 mode = x86_64_mode_none;
1069 for (f = f->next; f; f = f->next)
1070 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1072 return mode;
1074 assert(0);
1075 return 0;
1078 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1080 X86_64_Mode mode;
1081 int size, align, ret_t = 0;
1083 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1084 *psize = 8;
1085 *palign = 8;
1086 *reg_count = 1;
1087 ret_t = ty->t;
1088 mode = x86_64_mode_integer;
1089 } else {
1090 size = type_size(ty, &align);
1091 *psize = (size + 7) & ~7;
1092 *palign = (align + 7) & ~7;
1094 if (size > 16) {
1095 mode = x86_64_mode_memory;
1096 } else {
1097 mode = classify_x86_64_inner(ty);
1098 switch (mode) {
1099 case x86_64_mode_integer:
1100 if (size > 8) {
1101 *reg_count = 2;
1102 ret_t = VT_QLONG;
1103 } else {
1104 *reg_count = 1;
1105 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1107 break;
1109 case x86_64_mode_x87:
1110 *reg_count = 1;
1111 ret_t = VT_LDOUBLE;
1112 break;
1114 case x86_64_mode_sse:
1115 if (size > 8) {
1116 *reg_count = 2;
1117 ret_t = VT_QFLOAT;
1118 } else {
1119 *reg_count = 1;
1120 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1122 break;
1123 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1128 if (ret) {
1129 ret->ref = NULL;
1130 ret->t = ret_t;
1133 return mode;
1136 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1138 /* This definition must be synced with stdarg.h */
1139 enum __va_arg_type {
1140 __va_gen_reg, __va_float_reg, __va_stack
1142 int size, align, reg_count;
1143 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1144 switch (mode) {
1145 default: return __va_stack;
1146 case x86_64_mode_integer: return __va_gen_reg;
1147 case x86_64_mode_sse: return __va_float_reg;
1151 /* Return the number of registers needed to return the struct, or 0 if
1152 returning via struct pointer. */
1153 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1155 int size, align, reg_count;
1156 *ret_align = 1; // Never have to re-align return values for x86-64
1157 *regsize = 8;
1158 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1161 #define REGN 6
1162 static const uint8_t arg_regs[REGN] = {
1163 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1166 static int arg_prepare_reg(int idx) {
1167 if (idx == 2 || idx == 3)
1168 /* idx=2: r10, idx=3: r11 */
1169 return idx + 8;
1170 else
1171 return arg_regs[idx];
1174 /* Generate function call. The function address is pushed first, then
1175 all the parameters in call order. This functions pops all the
1176 parameters and the function address. */
1177 void gfunc_call(int nb_args)
1179 X86_64_Mode mode;
1180 CType type;
1181 int size, align, r, args_size, stack_adjust, run_start, run_end, i, reg_count;
1182 int nb_reg_args = 0;
1183 int nb_sse_args = 0;
1184 int sse_reg, gen_reg;
1186 /* calculate the number of integer/float register arguments */
1187 for(i = 0; i < nb_args; i++) {
1188 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1189 if (mode == x86_64_mode_sse)
1190 nb_sse_args += reg_count;
1191 else if (mode == x86_64_mode_integer)
1192 nb_reg_args += reg_count;
1195 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1196 and ended by a 16-byte aligned argument. This is because, from the point of view of
1197 the callee, argument alignment is computed from the bottom up. */
1198 /* for struct arguments, we need to call memcpy and the function
1199 call breaks register passing arguments we are preparing.
1200 So, we process arguments which will be passed by stack first. */
1201 gen_reg = nb_reg_args;
1202 sse_reg = nb_sse_args;
1203 run_start = 0;
1204 args_size = 0;
1205 while (run_start != nb_args) {
1206 int run_gen_reg = gen_reg, run_sse_reg = sse_reg;
1208 run_end = nb_args;
1209 stack_adjust = 0;
1210 for(i = run_start; (i < nb_args) && (run_end == nb_args); i++) {
1211 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1212 switch (mode) {
1213 case x86_64_mode_memory:
1214 case x86_64_mode_x87:
1215 stack_arg:
1216 if (align == 16)
1217 run_end = i;
1218 else
1219 stack_adjust += size;
1220 break;
1222 case x86_64_mode_sse:
1223 sse_reg -= reg_count;
1224 if (sse_reg + reg_count > 8) goto stack_arg;
1225 break;
1227 case x86_64_mode_integer:
1228 gen_reg -= reg_count;
1229 if (gen_reg + reg_count > REGN) goto stack_arg;
1230 break;
1231 default: break; /* nothing to be done for x86_64_mode_none */
1235 gen_reg = run_gen_reg;
1236 sse_reg = run_sse_reg;
1238 /* adjust stack to align SSE boundary */
1239 if (stack_adjust &= 15) {
1240 /* fetch cpu flag before the following sub will change the value */
1241 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1242 gv(RC_INT);
1244 stack_adjust = 16 - stack_adjust;
1245 o(0x48);
1246 oad(0xec81, stack_adjust); /* sub $xxx, %rsp */
1247 args_size += stack_adjust;
1250 for(i = run_start; i < run_end;) {
1251 /* Swap argument to top, it will possibly be changed here,
1252 and might use more temps. At the end of the loop we keep
1253 in on the stack and swap it back to its original position
1254 if it is a register. */
1255 SValue tmp = vtop[0];
1256 int arg_stored = 1;
1258 vtop[0] = vtop[-i];
1259 vtop[-i] = tmp;
1260 mode = classify_x86_64_arg(&vtop->type, NULL, &size, &align, &reg_count);
1262 switch (vtop->type.t & VT_BTYPE) {
1263 case VT_STRUCT:
1264 if (mode == x86_64_mode_sse) {
1265 if (sse_reg > 8)
1266 sse_reg -= reg_count;
1267 else
1268 arg_stored = 0;
1269 } else if (mode == x86_64_mode_integer) {
1270 if (gen_reg > REGN)
1271 gen_reg -= reg_count;
1272 else
1273 arg_stored = 0;
1276 if (arg_stored) {
1277 /* allocate the necessary size on stack */
1278 o(0x48);
1279 oad(0xec81, size); /* sub $xxx, %rsp */
1280 /* generate structure store */
1281 r = get_reg(RC_INT);
1282 orex(1, r, 0, 0x89); /* mov %rsp, r */
1283 o(0xe0 + REG_VALUE(r));
1284 vset(&vtop->type, r | VT_LVAL, 0);
1285 vswap();
1286 vstore();
1287 args_size += size;
1289 break;
1291 case VT_LDOUBLE:
1292 assert(0);
1293 break;
1295 case VT_FLOAT:
1296 case VT_DOUBLE:
1297 assert(mode == x86_64_mode_sse);
1298 if (sse_reg > 8) {
1299 --sse_reg;
1300 r = gv(RC_FLOAT);
1301 o(0x50); /* push $rax */
1302 /* movq %xmmN, (%rsp) */
1303 o(0xd60f66);
1304 o(0x04 + REG_VALUE(r)*8);
1305 o(0x24);
1306 args_size += size;
1307 } else {
1308 arg_stored = 0;
1310 break;
1312 default:
1313 assert(mode == x86_64_mode_integer);
1314 /* simple type */
1315 /* XXX: implicit cast ? */
1316 if (gen_reg > REGN) {
1317 --gen_reg;
1318 r = gv(RC_INT);
1319 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1320 args_size += size;
1321 } else {
1322 arg_stored = 0;
1324 break;
1327 /* And swap the argument back to it's original position. */
1328 tmp = vtop[0];
1329 vtop[0] = vtop[-i];
1330 vtop[-i] = tmp;
1332 if (arg_stored) {
1333 vrotb(i+1);
1334 assert((vtop->type.t == tmp.type.t) && (vtop->r == tmp.r));
1335 vpop();
1336 --nb_args;
1337 --run_end;
1338 } else {
1339 ++i;
1343 /* handle 16 byte aligned arguments at end of run */
1344 run_start = i = run_end;
1345 while (i < nb_args) {
1346 /* Rotate argument to top since it will always be popped */
1347 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1348 if (align != 16)
1349 break;
1351 vrotb(i+1);
1353 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1354 gv(RC_ST0);
1355 oad(0xec8148, size); /* sub $xxx, %rsp */
1356 o(0x7cdb); /* fstpt 0(%rsp) */
1357 g(0x24);
1358 g(0x00);
1359 args_size += size;
1360 } else {
1361 assert(mode == x86_64_mode_memory);
1363 /* allocate the necessary size on stack */
1364 o(0x48);
1365 oad(0xec81, size); /* sub $xxx, %rsp */
1366 /* generate structure store */
1367 r = get_reg(RC_INT);
1368 orex(1, r, 0, 0x89); /* mov %rsp, r */
1369 o(0xe0 + REG_VALUE(r));
1370 vset(&vtop->type, r | VT_LVAL, 0);
1371 vswap();
1372 vstore();
1373 args_size += size;
1376 vpop();
1377 --nb_args;
1381 /* XXX This should be superfluous. */
1382 save_regs(0); /* save used temporary registers */
1384 /* then, we prepare register passing arguments.
1385 Note that we cannot set RDX and RCX in this loop because gv()
1386 may break these temporary registers. Let's use R10 and R11
1387 instead of them */
1388 assert(gen_reg <= REGN);
1389 assert(sse_reg <= 8);
1390 for(i = 0; i < nb_args; i++) {
1391 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1392 /* Alter stack entry type so that gv() knows how to treat it */
1393 vtop->type = type;
1394 if (mode == x86_64_mode_sse) {
1395 if (reg_count == 2) {
1396 sse_reg -= 2;
1397 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1398 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1399 /* movaps %xmm0, %xmmN */
1400 o(0x280f);
1401 o(0xc0 + (sse_reg << 3));
1402 /* movaps %xmm1, %xmmN */
1403 o(0x280f);
1404 o(0xc1 + ((sse_reg+1) << 3));
1406 } else {
1407 assert(reg_count == 1);
1408 --sse_reg;
1409 /* Load directly to register */
1410 gv(RC_XMM0 << sse_reg);
1412 } else if (mode == x86_64_mode_integer) {
1413 /* simple type */
1414 /* XXX: implicit cast ? */
1415 int d;
1416 gen_reg -= reg_count;
1417 r = gv(RC_INT);
1418 d = arg_prepare_reg(gen_reg);
1419 orex(1,d,r,0x89); /* mov */
1420 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1421 if (reg_count == 2) {
1422 d = arg_prepare_reg(gen_reg+1);
1423 orex(1,d,vtop->r2,0x89); /* mov */
1424 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1427 vtop--;
1429 assert(gen_reg == 0);
1430 assert(sse_reg == 0);
1432 /* We shouldn't have many operands on the stack anymore, but the
1433 call address itself is still there, and it might be in %eax
1434 (or edx/ecx) currently, which the below writes would clobber.
1435 So evict all remaining operands here. */
1436 save_regs(0);
1438 /* Copy R10 and R11 into RDX and RCX, respectively */
1439 if (nb_reg_args > 2) {
1440 o(0xd2894c); /* mov %r10, %rdx */
1441 if (nb_reg_args > 3) {
1442 o(0xd9894c); /* mov %r11, %rcx */
1446 if (vtop->type.ref->c != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1447 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1448 gcall_or_jmp(0);
1449 if (args_size)
1450 gadd_sp(args_size);
1451 vtop--;
1455 #define FUNC_PROLOG_SIZE 11
1457 static void push_arg_reg(int i) {
1458 loc -= 8;
1459 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1462 /* generate function prolog of type 't' */
1463 void gfunc_prolog(CType *func_type)
1465 X86_64_Mode mode;
1466 int i, addr, align, size, reg_count;
1467 int param_addr = 0, reg_param_index, sse_param_index;
1468 Sym *sym;
1469 CType *type;
1471 sym = func_type->ref;
1472 addr = PTR_SIZE * 2;
1473 loc = 0;
1474 ind += FUNC_PROLOG_SIZE;
1475 func_sub_sp_offset = ind;
1476 func_ret_sub = 0;
1478 if (func_type->ref->c == FUNC_ELLIPSIS) {
1479 int seen_reg_num, seen_sse_num, seen_stack_size;
1480 seen_reg_num = seen_sse_num = 0;
1481 /* frame pointer and return address */
1482 seen_stack_size = PTR_SIZE * 2;
1483 /* count the number of seen parameters */
1484 sym = func_type->ref;
1485 while ((sym = sym->next) != NULL) {
1486 type = &sym->type;
1487 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1488 switch (mode) {
1489 default:
1490 stack_arg:
1491 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1492 break;
1494 case x86_64_mode_integer:
1495 if (seen_reg_num + reg_count <= 8) {
1496 seen_reg_num += reg_count;
1497 } else {
1498 seen_reg_num = 8;
1499 goto stack_arg;
1501 break;
1503 case x86_64_mode_sse:
1504 if (seen_sse_num + reg_count <= 8) {
1505 seen_sse_num += reg_count;
1506 } else {
1507 seen_sse_num = 8;
1508 goto stack_arg;
1510 break;
1514 loc -= 16;
1515 /* movl $0x????????, -0x10(%rbp) */
1516 o(0xf045c7);
1517 gen_le32(seen_reg_num * 8);
1518 /* movl $0x????????, -0xc(%rbp) */
1519 o(0xf445c7);
1520 gen_le32(seen_sse_num * 16 + 48);
1521 /* movl $0x????????, -0x8(%rbp) */
1522 o(0xf845c7);
1523 gen_le32(seen_stack_size);
1525 /* save all register passing arguments */
1526 for (i = 0; i < 8; i++) {
1527 loc -= 16;
1528 o(0xd60f66); /* movq */
1529 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1530 /* movq $0, loc+8(%rbp) */
1531 o(0x85c748);
1532 gen_le32(loc + 8);
1533 gen_le32(0);
1535 for (i = 0; i < REGN; i++) {
1536 push_arg_reg(REGN-1-i);
1540 sym = func_type->ref;
1541 reg_param_index = 0;
1542 sse_param_index = 0;
1544 /* if the function returns a structure, then add an
1545 implicit pointer parameter */
1546 func_vt = sym->type;
1547 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1548 if (mode == x86_64_mode_memory) {
1549 push_arg_reg(reg_param_index);
1550 func_vc = loc;
1551 reg_param_index++;
1553 /* define parameters */
1554 while ((sym = sym->next) != NULL) {
1555 type = &sym->type;
1556 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1557 switch (mode) {
1558 case x86_64_mode_sse:
1559 if (sse_param_index + reg_count <= 8) {
1560 /* save arguments passed by register */
1561 loc -= reg_count * 8;
1562 param_addr = loc;
1563 for (i = 0; i < reg_count; ++i) {
1564 o(0xd60f66); /* movq */
1565 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1566 ++sse_param_index;
1568 } else {
1569 addr = (addr + align - 1) & -align;
1570 param_addr = addr;
1571 addr += size;
1573 break;
1575 case x86_64_mode_memory:
1576 case x86_64_mode_x87:
1577 addr = (addr + align - 1) & -align;
1578 param_addr = addr;
1579 addr += size;
1580 break;
1582 case x86_64_mode_integer: {
1583 if (reg_param_index + reg_count <= REGN) {
1584 /* save arguments passed by register */
1585 loc -= reg_count * 8;
1586 param_addr = loc;
1587 for (i = 0; i < reg_count; ++i) {
1588 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1589 ++reg_param_index;
1591 } else {
1592 addr = (addr + align - 1) & -align;
1593 param_addr = addr;
1594 addr += size;
1596 break;
1598 default: break; /* nothing to be done for x86_64_mode_none */
1600 sym_push(sym->v & ~SYM_FIELD, type,
1601 VT_LOCAL | VT_LVAL, param_addr);
1604 #ifdef CONFIG_TCC_BCHECK
1605 /* leave some room for bound checking code */
1606 if (tcc_state->do_bounds_check) {
1607 func_bound_offset = lbounds_section->data_offset;
1608 func_bound_ind = ind;
1609 oad(0xb8, 0); /* lbound section pointer */
1610 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1611 oad(0xb8, 0); /* call to function */
1613 #endif
1616 /* generate function epilog */
1617 void gfunc_epilog(void)
1619 int v, saved_ind;
1621 #ifdef CONFIG_TCC_BCHECK
1622 if (tcc_state->do_bounds_check
1623 && func_bound_offset != lbounds_section->data_offset)
1625 addr_t saved_ind;
1626 addr_t *bounds_ptr;
1627 Sym *sym_data;
1629 /* add end of table info */
1630 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
1631 *bounds_ptr = 0;
1633 /* generate bound local allocation */
1634 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
1635 func_bound_offset, lbounds_section->data_offset);
1636 saved_ind = ind;
1637 ind = func_bound_ind;
1638 greloc(cur_text_section, sym_data, ind + 1, R_386_32);
1639 ind = ind + 5 + 3;
1640 gen_static_call(TOK___bound_local_new);
1641 ind = saved_ind;
1643 /* generate bound check local freeing */
1644 o(0x5250); /* save returned value, if any */
1645 greloc(cur_text_section, sym_data, ind + 1, R_386_32);
1646 oad(0xb8, 0); /* mov xxx, %rax */
1647 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1648 gen_static_call(TOK___bound_local_delete);
1649 o(0x585a); /* restore returned value, if any */
1651 #endif
1652 o(0xc9); /* leave */
1653 if (func_ret_sub == 0) {
1654 o(0xc3); /* ret */
1655 } else {
1656 o(0xc2); /* ret n */
1657 g(func_ret_sub);
1658 g(func_ret_sub >> 8);
1660 /* align local size to word & save local variables */
1661 v = (-loc + 15) & -16;
1662 saved_ind = ind;
1663 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1664 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1665 o(0xec8148); /* sub rsp, stacksize */
1666 gen_le32(v);
1667 ind = saved_ind;
1670 #endif /* not PE */
1672 /* generate a jump to a label */
1673 int gjmp(int t)
1675 return psym(0xe9, t);
1678 /* generate a jump to a fixed address */
1679 void gjmp_addr(int a)
1681 int r;
1682 r = a - ind - 2;
1683 if (r == (char)r) {
1684 g(0xeb);
1685 g(r);
1686 } else {
1687 oad(0xe9, a - ind - 5);
1691 ST_FUNC void gtst_addr(int inv, int a)
1693 inv ^= (vtop--)->c.i;
1694 a -= ind + 2;
1695 if (a == (char)a) {
1696 g(inv - 32);
1697 g(a);
1698 } else {
1699 g(0x0f);
1700 oad(inv - 16, a - 4);
1704 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1705 ST_FUNC int gtst(int inv, int t)
1707 int v = vtop->r & VT_VALMASK;
1708 if (v == VT_CMP) {
1709 /* fast case : can jump directly since flags are set */
1710 if (vtop->c.i & 0x100)
1712 /* This was a float compare. If the parity flag is set
1713 the result was unordered. For anything except != this
1714 means false and we don't jump (anding both conditions).
1715 For != this means true (oring both).
1716 Take care about inverting the test. We need to jump
1717 to our target if the result was unordered and test wasn't NE,
1718 otherwise if unordered we don't want to jump. */
1719 vtop->c.i &= ~0x100;
1720 if (inv == (vtop->c.i == TOK_NE))
1721 o(0x067a); /* jp +6 */
1722 else
1724 g(0x0f);
1725 t = psym(0x8a, t); /* jp t */
1728 g(0x0f);
1729 t = psym((vtop->c.i - 16) ^ inv, t);
1730 } else if (v == VT_JMP || v == VT_JMPI) {
1731 /* && or || optimization */
1732 if ((v & 1) == inv) {
1733 /* insert vtop->c jump list in t */
1734 uint32_t n1, n = vtop->c.i;
1735 if (n) {
1736 while ((n1 = read32le(cur_text_section->data + n)))
1737 n = n1;
1738 write32le(cur_text_section->data + n, t);
1739 t = vtop->c.i;
1741 } else {
1742 t = gjmp(t);
1743 gsym(vtop->c.i);
1746 vtop--;
1747 return t;
1750 /* generate an integer binary operation */
1751 void gen_opi(int op)
1753 int r, fr, opc, c;
1754 int ll, uu, cc;
1756 ll = is64_type(vtop[-1].type.t);
1757 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1758 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1760 switch(op) {
1761 case '+':
1762 case TOK_ADDC1: /* add with carry generation */
1763 opc = 0;
1764 gen_op8:
1765 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1766 /* constant case */
1767 vswap();
1768 r = gv(RC_INT);
1769 vswap();
1770 c = vtop->c.i;
1771 if (c == (char)c) {
1772 /* XXX: generate inc and dec for smaller code ? */
1773 orex(ll, r, 0, 0x83);
1774 o(0xc0 | (opc << 3) | REG_VALUE(r));
1775 g(c);
1776 } else {
1777 orex(ll, r, 0, 0x81);
1778 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1780 } else {
1781 gv2(RC_INT, RC_INT);
1782 r = vtop[-1].r;
1783 fr = vtop[0].r;
1784 orex(ll, r, fr, (opc << 3) | 0x01);
1785 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1787 vtop--;
1788 if (op >= TOK_ULT && op <= TOK_GT) {
1789 vtop->r = VT_CMP;
1790 vtop->c.i = op;
1792 break;
1793 case '-':
1794 case TOK_SUBC1: /* sub with carry generation */
1795 opc = 5;
1796 goto gen_op8;
1797 case TOK_ADDC2: /* add with carry use */
1798 opc = 2;
1799 goto gen_op8;
1800 case TOK_SUBC2: /* sub with carry use */
1801 opc = 3;
1802 goto gen_op8;
1803 case '&':
1804 opc = 4;
1805 goto gen_op8;
1806 case '^':
1807 opc = 6;
1808 goto gen_op8;
1809 case '|':
1810 opc = 1;
1811 goto gen_op8;
1812 case '*':
1813 gv2(RC_INT, RC_INT);
1814 r = vtop[-1].r;
1815 fr = vtop[0].r;
1816 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1817 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1818 vtop--;
1819 break;
1820 case TOK_SHL:
1821 opc = 4;
1822 goto gen_shift;
1823 case TOK_SHR:
1824 opc = 5;
1825 goto gen_shift;
1826 case TOK_SAR:
1827 opc = 7;
1828 gen_shift:
1829 opc = 0xc0 | (opc << 3);
1830 if (cc) {
1831 /* constant case */
1832 vswap();
1833 r = gv(RC_INT);
1834 vswap();
1835 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1836 o(opc | REG_VALUE(r));
1837 g(vtop->c.i & (ll ? 63 : 31));
1838 } else {
1839 /* we generate the shift in ecx */
1840 gv2(RC_INT, RC_RCX);
1841 r = vtop[-1].r;
1842 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1843 o(opc | REG_VALUE(r));
1845 vtop--;
1846 break;
1847 case TOK_UDIV:
1848 case TOK_UMOD:
1849 uu = 1;
1850 goto divmod;
1851 case '/':
1852 case '%':
1853 case TOK_PDIV:
1854 uu = 0;
1855 divmod:
1856 /* first operand must be in eax */
1857 /* XXX: need better constraint for second operand */
1858 gv2(RC_RAX, RC_RCX);
1859 r = vtop[-1].r;
1860 fr = vtop[0].r;
1861 vtop--;
1862 save_reg(TREG_RDX);
1863 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1864 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1865 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1866 if (op == '%' || op == TOK_UMOD)
1867 r = TREG_RDX;
1868 else
1869 r = TREG_RAX;
1870 vtop->r = r;
1871 break;
1872 default:
1873 opc = 7;
1874 goto gen_op8;
1878 void gen_opl(int op)
1880 gen_opi(op);
1883 /* generate a floating point operation 'v = t1 op t2' instruction. The
1884 two operands are guaranted to have the same floating point type */
1885 /* XXX: need to use ST1 too */
1886 void gen_opf(int op)
1888 int a, ft, fc, swapped, r;
1889 int float_type =
1890 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1892 /* convert constants to memory references */
1893 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1894 vswap();
1895 gv(float_type);
1896 vswap();
1898 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1899 gv(float_type);
1901 /* must put at least one value in the floating point register */
1902 if ((vtop[-1].r & VT_LVAL) &&
1903 (vtop[0].r & VT_LVAL)) {
1904 vswap();
1905 gv(float_type);
1906 vswap();
1908 swapped = 0;
1909 /* swap the stack if needed so that t1 is the register and t2 is
1910 the memory reference */
1911 if (vtop[-1].r & VT_LVAL) {
1912 vswap();
1913 swapped = 1;
1915 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1916 if (op >= TOK_ULT && op <= TOK_GT) {
1917 /* load on stack second operand */
1918 load(TREG_ST0, vtop);
1919 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1920 if (op == TOK_GE || op == TOK_GT)
1921 swapped = !swapped;
1922 else if (op == TOK_EQ || op == TOK_NE)
1923 swapped = 0;
1924 if (swapped)
1925 o(0xc9d9); /* fxch %st(1) */
1926 if (op == TOK_EQ || op == TOK_NE)
1927 o(0xe9da); /* fucompp */
1928 else
1929 o(0xd9de); /* fcompp */
1930 o(0xe0df); /* fnstsw %ax */
1931 if (op == TOK_EQ) {
1932 o(0x45e480); /* and $0x45, %ah */
1933 o(0x40fC80); /* cmp $0x40, %ah */
1934 } else if (op == TOK_NE) {
1935 o(0x45e480); /* and $0x45, %ah */
1936 o(0x40f480); /* xor $0x40, %ah */
1937 op = TOK_NE;
1938 } else if (op == TOK_GE || op == TOK_LE) {
1939 o(0x05c4f6); /* test $0x05, %ah */
1940 op = TOK_EQ;
1941 } else {
1942 o(0x45c4f6); /* test $0x45, %ah */
1943 op = TOK_EQ;
1945 vtop--;
1946 vtop->r = VT_CMP;
1947 vtop->c.i = op;
1948 } else {
1949 /* no memory reference possible for long double operations */
1950 load(TREG_ST0, vtop);
1951 swapped = !swapped;
1953 switch(op) {
1954 default:
1955 case '+':
1956 a = 0;
1957 break;
1958 case '-':
1959 a = 4;
1960 if (swapped)
1961 a++;
1962 break;
1963 case '*':
1964 a = 1;
1965 break;
1966 case '/':
1967 a = 6;
1968 if (swapped)
1969 a++;
1970 break;
1972 ft = vtop->type.t;
1973 fc = vtop->c.i;
1974 o(0xde); /* fxxxp %st, %st(1) */
1975 o(0xc1 + (a << 3));
1976 vtop--;
1978 } else {
1979 if (op >= TOK_ULT && op <= TOK_GT) {
1980 /* if saved lvalue, then we must reload it */
1981 r = vtop->r;
1982 fc = vtop->c.i;
1983 if ((r & VT_VALMASK) == VT_LLOCAL) {
1984 SValue v1;
1985 r = get_reg(RC_INT);
1986 v1.type.t = VT_PTR;
1987 v1.r = VT_LOCAL | VT_LVAL;
1988 v1.c.i = fc;
1989 load(r, &v1);
1990 fc = 0;
1993 if (op == TOK_EQ || op == TOK_NE) {
1994 swapped = 0;
1995 } else {
1996 if (op == TOK_LE || op == TOK_LT)
1997 swapped = !swapped;
1998 if (op == TOK_LE || op == TOK_GE) {
1999 op = 0x93; /* setae */
2000 } else {
2001 op = 0x97; /* seta */
2005 if (swapped) {
2006 gv(RC_FLOAT);
2007 vswap();
2009 assert(!(vtop[-1].r & VT_LVAL));
2011 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
2012 o(0x66);
2013 if (op == TOK_EQ || op == TOK_NE)
2014 o(0x2e0f); /* ucomisd */
2015 else
2016 o(0x2f0f); /* comisd */
2018 if (vtop->r & VT_LVAL) {
2019 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2020 } else {
2021 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2024 vtop--;
2025 vtop->r = VT_CMP;
2026 vtop->c.i = op | 0x100;
2027 } else {
2028 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
2029 switch(op) {
2030 default:
2031 case '+':
2032 a = 0;
2033 break;
2034 case '-':
2035 a = 4;
2036 break;
2037 case '*':
2038 a = 1;
2039 break;
2040 case '/':
2041 a = 6;
2042 break;
2044 ft = vtop->type.t;
2045 fc = vtop->c.i;
2046 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2048 r = vtop->r;
2049 /* if saved lvalue, then we must reload it */
2050 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2051 SValue v1;
2052 r = get_reg(RC_INT);
2053 v1.type.t = VT_PTR;
2054 v1.r = VT_LOCAL | VT_LVAL;
2055 v1.c.i = fc;
2056 load(r, &v1);
2057 fc = 0;
2060 assert(!(vtop[-1].r & VT_LVAL));
2061 if (swapped) {
2062 assert(vtop->r & VT_LVAL);
2063 gv(RC_FLOAT);
2064 vswap();
2067 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2068 o(0xf2);
2069 } else {
2070 o(0xf3);
2072 o(0x0f);
2073 o(0x58 + a);
2075 if (vtop->r & VT_LVAL) {
2076 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2077 } else {
2078 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2081 vtop--;
2086 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2087 and 'long long' cases. */
2088 void gen_cvt_itof(int t)
2090 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2091 save_reg(TREG_ST0);
2092 gv(RC_INT);
2093 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2094 /* signed long long to float/double/long double (unsigned case
2095 is handled generically) */
2096 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2097 o(0x242cdf); /* fildll (%rsp) */
2098 o(0x08c48348); /* add $8, %rsp */
2099 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2100 (VT_INT | VT_UNSIGNED)) {
2101 /* unsigned int to float/double/long double */
2102 o(0x6a); /* push $0 */
2103 g(0x00);
2104 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2105 o(0x242cdf); /* fildll (%rsp) */
2106 o(0x10c48348); /* add $16, %rsp */
2107 } else {
2108 /* int to float/double/long double */
2109 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2110 o(0x2404db); /* fildl (%rsp) */
2111 o(0x08c48348); /* add $8, %rsp */
2113 vtop->r = TREG_ST0;
2114 } else {
2115 int r = get_reg(RC_FLOAT);
2116 gv(RC_INT);
2117 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2118 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2119 (VT_INT | VT_UNSIGNED) ||
2120 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2121 o(0x48); /* REX */
2123 o(0x2a0f);
2124 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2125 vtop->r = r;
2129 /* convert from one floating point type to another */
2130 void gen_cvt_ftof(int t)
2132 int ft, bt, tbt;
2134 ft = vtop->type.t;
2135 bt = ft & VT_BTYPE;
2136 tbt = t & VT_BTYPE;
2138 if (bt == VT_FLOAT) {
2139 gv(RC_FLOAT);
2140 if (tbt == VT_DOUBLE) {
2141 o(0x140f); /* unpcklps */
2142 o(0xc0 + REG_VALUE(vtop->r)*9);
2143 o(0x5a0f); /* cvtps2pd */
2144 o(0xc0 + REG_VALUE(vtop->r)*9);
2145 } else if (tbt == VT_LDOUBLE) {
2146 save_reg(RC_ST0);
2147 /* movss %xmm0,-0x10(%rsp) */
2148 o(0x110ff3);
2149 o(0x44 + REG_VALUE(vtop->r)*8);
2150 o(0xf024);
2151 o(0xf02444d9); /* flds -0x10(%rsp) */
2152 vtop->r = TREG_ST0;
2154 } else if (bt == VT_DOUBLE) {
2155 gv(RC_FLOAT);
2156 if (tbt == VT_FLOAT) {
2157 o(0x140f66); /* unpcklpd */
2158 o(0xc0 + REG_VALUE(vtop->r)*9);
2159 o(0x5a0f66); /* cvtpd2ps */
2160 o(0xc0 + REG_VALUE(vtop->r)*9);
2161 } else if (tbt == VT_LDOUBLE) {
2162 save_reg(RC_ST0);
2163 /* movsd %xmm0,-0x10(%rsp) */
2164 o(0x110ff2);
2165 o(0x44 + REG_VALUE(vtop->r)*8);
2166 o(0xf024);
2167 o(0xf02444dd); /* fldl -0x10(%rsp) */
2168 vtop->r = TREG_ST0;
2170 } else {
2171 int r;
2172 gv(RC_ST0);
2173 r = get_reg(RC_FLOAT);
2174 if (tbt == VT_DOUBLE) {
2175 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2176 /* movsd -0x10(%rsp),%xmm0 */
2177 o(0x100ff2);
2178 o(0x44 + REG_VALUE(r)*8);
2179 o(0xf024);
2180 vtop->r = r;
2181 } else if (tbt == VT_FLOAT) {
2182 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2183 /* movss -0x10(%rsp),%xmm0 */
2184 o(0x100ff3);
2185 o(0x44 + REG_VALUE(r)*8);
2186 o(0xf024);
2187 vtop->r = r;
2192 /* convert fp to int 't' type */
2193 void gen_cvt_ftoi(int t)
2195 int ft, bt, size, r;
2196 ft = vtop->type.t;
2197 bt = ft & VT_BTYPE;
2198 if (bt == VT_LDOUBLE) {
2199 gen_cvt_ftof(VT_DOUBLE);
2200 bt = VT_DOUBLE;
2203 gv(RC_FLOAT);
2204 if (t != VT_INT)
2205 size = 8;
2206 else
2207 size = 4;
2209 r = get_reg(RC_INT);
2210 if (bt == VT_FLOAT) {
2211 o(0xf3);
2212 } else if (bt == VT_DOUBLE) {
2213 o(0xf2);
2214 } else {
2215 assert(0);
2217 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2218 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2219 vtop->r = r;
2222 /* computed goto support */
2223 void ggoto(void)
2225 gcall_or_jmp(1);
2226 vtop--;
2229 /* Save the stack pointer onto the stack and return the location of its address */
2230 ST_FUNC void gen_vla_sp_save(int addr) {
2231 /* mov %rsp,addr(%rbp)*/
2232 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2235 /* Restore the SP from a location on the stack */
2236 ST_FUNC void gen_vla_sp_restore(int addr) {
2237 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2240 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2241 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2242 #ifdef TCC_TARGET_PE
2243 /* alloca does more than just adjust %rsp on Windows */
2244 vpush_global_sym(&func_old_type, TOK_alloca);
2245 vswap(); /* Move alloca ref past allocation size */
2246 gfunc_call(1);
2247 #else
2248 int r;
2249 r = gv(RC_INT); /* allocation size */
2250 /* sub r,%rsp */
2251 o(0x2b48);
2252 o(0xe0 | REG_VALUE(r));
2253 /* We align to 16 bytes rather than align */
2254 /* and ~15, %rsp */
2255 o(0xf0e48348);
2256 vpop();
2257 #endif
2261 /* end of x86-64 code generator */
2262 /*************************************************************/
2263 #endif /* ! TARGET_DEFS_ONLY */
2264 /******************************************************/