Consolidate all relocations in relocate_section
[tinycc.git] / x86_64-gen.c
blobf183b7710b251729e3350793a7d9d2c402879abe
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 8
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
34 #define RC_RAX 0x0004
35 #define RC_RCX 0x0008
36 #define RC_RDX 0x0010
37 #define RC_ST0 0x0080 /* only for long double */
38 #define RC_R8 0x0100
39 #define RC_R9 0x0200
40 #define RC_R10 0x0400
41 #define RC_R11 0x0800
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
56 enum {
57 TREG_RAX = 0,
58 TREG_RCX = 1,
59 TREG_RDX = 2,
60 TREG_RSP = 4,
61 TREG_RSI = 6,
62 TREG_RDI = 7,
64 TREG_R8 = 8,
65 TREG_R9 = 9,
66 TREG_R10 = 10,
67 TREG_R11 = 11,
69 TREG_XMM0 = 16,
70 TREG_XMM1 = 17,
71 TREG_XMM2 = 18,
72 TREG_XMM3 = 19,
73 TREG_XMM4 = 20,
74 TREG_XMM5 = 21,
75 TREG_XMM6 = 22,
76 TREG_XMM7 = 23,
78 TREG_ST0 = 24,
80 TREG_MEM = 0x20
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
96 #define PTR_SIZE 8
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
102 #define MAX_ALIGN 16
104 /******************************************************/
105 /* ELF defines */
107 #define EM_TCC_TARGET EM_X86_64
109 /* relocation type for 32 bit data relocation */
110 #define R_DATA_32 R_X86_64_32
111 #define R_DATA_PTR R_X86_64_64
112 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
113 #define R_GLOB_DAT R_X86_64_GLOB_DAT
114 #define R_COPY R_X86_64_COPY
116 #define ELF_START_ADDR 0x400000
117 #define ELF_PAGE_SIZE 0x200000
119 /******************************************************/
120 #else /* ! TARGET_DEFS_ONLY */
121 /******************************************************/
122 #include "tcc.h"
123 #include <assert.h>
125 ST_DATA const int reg_classes[NB_REGS] = {
126 /* eax */ RC_INT | RC_RAX,
127 /* ecx */ RC_INT | RC_RCX,
128 /* edx */ RC_INT | RC_RDX,
134 RC_R8,
135 RC_R9,
136 RC_R10,
137 RC_R11,
142 /* xmm0 */ RC_FLOAT | RC_XMM0,
143 /* xmm1 */ RC_FLOAT | RC_XMM1,
144 /* xmm2 */ RC_FLOAT | RC_XMM2,
145 /* xmm3 */ RC_FLOAT | RC_XMM3,
146 /* xmm4 */ RC_FLOAT | RC_XMM4,
147 /* xmm5 */ RC_FLOAT | RC_XMM5,
148 /* xmm6 an xmm7 are included so gv() can be used on them,
149 but they are not tagged with RC_FLOAT because they are
150 callee saved on Windows */
151 RC_XMM6,
152 RC_XMM7,
153 /* st0 */ RC_ST0
156 static unsigned long func_sub_sp_offset;
157 static int func_ret_sub;
159 /* XXX: make it faster ? */
160 ST_FUNC void g(int c)
162 int ind1;
163 ind1 = ind + 1;
164 if (ind1 > cur_text_section->data_allocated)
165 section_realloc(cur_text_section, ind1);
166 cur_text_section->data[ind] = c;
167 ind = ind1;
170 ST_FUNC void o(unsigned int c)
172 while (c) {
173 g(c);
174 c = c >> 8;
178 ST_FUNC void gen_le16(int v)
180 g(v);
181 g(v >> 8);
184 ST_FUNC void gen_le32(int c)
186 g(c);
187 g(c >> 8);
188 g(c >> 16);
189 g(c >> 24);
192 ST_FUNC void gen_le64(int64_t c)
194 g(c);
195 g(c >> 8);
196 g(c >> 16);
197 g(c >> 24);
198 g(c >> 32);
199 g(c >> 40);
200 g(c >> 48);
201 g(c >> 56);
204 static void orex(int ll, int r, int r2, int b)
206 if ((r & VT_VALMASK) >= VT_CONST)
207 r = 0;
208 if ((r2 & VT_VALMASK) >= VT_CONST)
209 r2 = 0;
210 if (ll || REX_BASE(r) || REX_BASE(r2))
211 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
212 o(b);
215 /* output a symbol and patch all calls to it */
216 ST_FUNC void gsym_addr(int t, int a)
218 while (t) {
219 unsigned char *ptr = cur_text_section->data + t;
220 uint32_t n = read32le(ptr); /* next value */
221 write32le(ptr, a - t - 4);
222 t = n;
226 void gsym(int t)
228 gsym_addr(t, ind);
231 /* psym is used to put an instruction with a data field which is a
232 reference to a symbol. It is in fact the same as oad ! */
233 #define psym oad
235 static int is64_type(int t)
237 return ((t & VT_BTYPE) == VT_PTR ||
238 (t & VT_BTYPE) == VT_FUNC ||
239 (t & VT_BTYPE) == VT_LLONG);
242 /* instruction + 4 bytes data. Return the address of the data */
243 ST_FUNC int oad(int c, int s)
245 int ind1;
247 o(c);
248 ind1 = ind + 4;
249 if (ind1 > cur_text_section->data_allocated)
250 section_realloc(cur_text_section, ind1);
251 write32le(cur_text_section->data + ind, s);
252 s = ind;
253 ind = ind1;
254 return s;
257 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
259 if (r & VT_SYM)
260 greloca(cur_text_section, sym, ind, R_X86_64_32, c), c=0;
261 gen_le32(c);
264 /* output constant with relocation if 'r & VT_SYM' is true */
265 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
267 if (r & VT_SYM)
268 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
269 gen_le64(c);
272 /* output constant with relocation if 'r & VT_SYM' is true */
273 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
275 if (r & VT_SYM)
276 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
277 gen_le32(c-4);
280 /* output got address with relocation */
281 static void gen_gotpcrel(int r, Sym *sym, int c)
283 #ifndef TCC_TARGET_PE
284 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
285 #else
286 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
287 get_tok_str(sym->v, NULL), c, r,
288 cur_text_section->data[ind-3],
289 cur_text_section->data[ind-2],
290 cur_text_section->data[ind-1]
292 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
293 #endif
294 gen_le32(0);
295 if (c) {
296 /* we use add c, %xxx for displacement */
297 orex(1, r, 0, 0x81);
298 o(0xc0 + REG_VALUE(r));
299 gen_le32(c);
303 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
305 op_reg = REG_VALUE(op_reg) << 3;
306 if ((r & VT_VALMASK) == VT_CONST) {
307 /* constant memory reference */
308 o(0x05 | op_reg);
309 if (is_got) {
310 gen_gotpcrel(r, sym, c);
311 } else {
312 gen_addrpc32(r, sym, c);
314 } else if ((r & VT_VALMASK) == VT_LOCAL) {
315 /* currently, we use only ebp as base */
316 if (c == (char)c) {
317 /* short reference */
318 o(0x45 | op_reg);
319 g(c);
320 } else {
321 oad(0x85 | op_reg, c);
323 } else if ((r & VT_VALMASK) >= TREG_MEM) {
324 if (c) {
325 g(0x80 | op_reg | REG_VALUE(r));
326 gen_le32(c);
327 } else {
328 g(0x00 | op_reg | REG_VALUE(r));
330 } else {
331 g(0x00 | op_reg | REG_VALUE(r));
335 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
336 opcode bits */
337 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
339 gen_modrm_impl(op_reg, r, sym, c, 0);
342 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
343 opcode bits */
344 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
346 int is_got;
347 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
348 orex(1, r, op_reg, opcode);
349 gen_modrm_impl(op_reg, r, sym, c, is_got);
353 /* load 'r' from value 'sv' */
354 void load(int r, SValue *sv)
356 int v, t, ft, fc, fr;
357 SValue v1;
359 #ifdef TCC_TARGET_PE
360 SValue v2;
361 sv = pe_getimport(sv, &v2);
362 #endif
364 fr = sv->r;
365 ft = sv->type.t & ~VT_DEFSIGN;
366 fc = sv->c.i;
368 ft &= ~(VT_VOLATILE | VT_CONSTANT);
370 #ifndef TCC_TARGET_PE
371 /* we use indirect access via got */
372 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
373 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
374 /* use the result register as a temporal register */
375 int tr = r | TREG_MEM;
376 if (is_float(ft)) {
377 /* we cannot use float registers as a temporal register */
378 tr = get_reg(RC_INT) | TREG_MEM;
380 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
382 /* load from the temporal register */
383 fr = tr | VT_LVAL;
385 #endif
387 v = fr & VT_VALMASK;
388 if (fr & VT_LVAL) {
389 int b, ll;
390 if (v == VT_LLOCAL) {
391 v1.type.t = VT_PTR;
392 v1.r = VT_LOCAL | VT_LVAL;
393 v1.c.i = fc;
394 fr = r;
395 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
396 fr = get_reg(RC_INT);
397 load(fr, &v1);
399 ll = 0;
400 if ((ft & VT_BTYPE) == VT_FLOAT) {
401 b = 0x6e0f66;
402 r = REG_VALUE(r); /* movd */
403 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
404 b = 0x7e0ff3; /* movq */
405 r = REG_VALUE(r);
406 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
407 b = 0xdb, r = 5; /* fldt */
408 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
409 b = 0xbe0f; /* movsbl */
410 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
411 b = 0xb60f; /* movzbl */
412 } else if ((ft & VT_TYPE) == VT_SHORT) {
413 b = 0xbf0f; /* movswl */
414 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
415 b = 0xb70f; /* movzwl */
416 } else {
417 assert(((ft & VT_BTYPE) == VT_INT) || ((ft & VT_BTYPE) == VT_LLONG)
418 || ((ft & VT_BTYPE) == VT_PTR) || ((ft & VT_BTYPE) == VT_ENUM)
419 || ((ft & VT_BTYPE) == VT_FUNC));
420 ll = is64_type(ft);
421 b = 0x8b;
423 if (ll) {
424 gen_modrm64(b, r, fr, sv->sym, fc);
425 } else {
426 orex(ll, fr, r, b);
427 gen_modrm(r, fr, sv->sym, fc);
429 } else {
430 if (v == VT_CONST) {
431 if (fr & VT_SYM) {
432 #ifdef TCC_TARGET_PE
433 orex(1,0,r,0x8d);
434 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
435 gen_addrpc32(fr, sv->sym, fc);
436 #else
437 if (sv->sym->type.t & VT_STATIC) {
438 orex(1,0,r,0x8d);
439 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
440 gen_addrpc32(fr, sv->sym, fc);
441 } else {
442 orex(1,0,r,0x8b);
443 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
444 gen_gotpcrel(r, sv->sym, fc);
446 #endif
447 } else if (is64_type(ft)) {
448 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
449 gen_le64(sv->c.i);
450 } else {
451 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
452 gen_le32(fc);
454 } else if (v == VT_LOCAL) {
455 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
456 gen_modrm(r, VT_LOCAL, sv->sym, fc);
457 } else if (v == VT_CMP) {
458 orex(0,r,0,0);
459 if ((fc & ~0x100) != TOK_NE)
460 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
461 else
462 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
463 if (fc & 0x100)
465 /* This was a float compare. If the parity bit is
466 set the result was unordered, meaning false for everything
467 except TOK_NE, and true for TOK_NE. */
468 fc &= ~0x100;
469 o(0x037a + (REX_BASE(r) << 8));
471 orex(0,r,0, 0x0f); /* setxx %br */
472 o(fc);
473 o(0xc0 + REG_VALUE(r));
474 } else if (v == VT_JMP || v == VT_JMPI) {
475 t = v & 1;
476 orex(0,r,0,0);
477 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
478 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
479 gsym(fc);
480 orex(0,r,0,0);
481 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
482 } else if (v != r) {
483 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
484 if (v == TREG_ST0) {
485 /* gen_cvt_ftof(VT_DOUBLE); */
486 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
487 /* movsd -0x10(%rsp),%xmmN */
488 o(0x100ff2);
489 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
490 o(0xf024);
491 } else {
492 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
493 if ((ft & VT_BTYPE) == VT_FLOAT) {
494 o(0x100ff3);
495 } else {
496 assert((ft & VT_BTYPE) == VT_DOUBLE);
497 o(0x100ff2);
499 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
501 } else if (r == TREG_ST0) {
502 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
503 /* gen_cvt_ftof(VT_LDOUBLE); */
504 /* movsd %xmmN,-0x10(%rsp) */
505 o(0x110ff2);
506 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
507 o(0xf024);
508 o(0xf02444dd); /* fldl -0x10(%rsp) */
509 } else {
510 orex(1,r,v, 0x89);
511 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
517 /* store register 'r' in lvalue 'v' */
518 void store(int r, SValue *v)
520 int fr, bt, ft, fc;
521 int op64 = 0;
522 /* store the REX prefix in this variable when PIC is enabled */
523 int pic = 0;
525 #ifdef TCC_TARGET_PE
526 SValue v2;
527 v = pe_getimport(v, &v2);
528 #endif
530 ft = v->type.t;
531 fc = v->c.i;
532 fr = v->r & VT_VALMASK;
533 ft &= ~(VT_VOLATILE | VT_CONSTANT);
534 bt = ft & VT_BTYPE;
536 #ifndef TCC_TARGET_PE
537 /* we need to access the variable via got */
538 if (fr == VT_CONST && (v->r & VT_SYM)) {
539 /* mov xx(%rip), %r11 */
540 o(0x1d8b4c);
541 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
542 pic = is64_type(bt) ? 0x49 : 0x41;
544 #endif
546 /* XXX: incorrect if float reg to reg */
547 if (bt == VT_FLOAT) {
548 o(0x66);
549 o(pic);
550 o(0x7e0f); /* movd */
551 r = REG_VALUE(r);
552 } else if (bt == VT_DOUBLE) {
553 o(0x66);
554 o(pic);
555 o(0xd60f); /* movq */
556 r = REG_VALUE(r);
557 } else if (bt == VT_LDOUBLE) {
558 o(0xc0d9); /* fld %st(0) */
559 o(pic);
560 o(0xdb); /* fstpt */
561 r = 7;
562 } else {
563 if (bt == VT_SHORT)
564 o(0x66);
565 o(pic);
566 if (bt == VT_BYTE || bt == VT_BOOL)
567 orex(0, 0, r, 0x88);
568 else if (is64_type(bt))
569 op64 = 0x89;
570 else
571 orex(0, 0, r, 0x89);
573 if (pic) {
574 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
575 if (op64)
576 o(op64);
577 o(3 + (r << 3));
578 } else if (op64) {
579 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
580 gen_modrm64(op64, r, v->r, v->sym, fc);
581 } else if (fr != r) {
582 /* XXX: don't we really come here? */
583 abort();
584 o(0xc0 + fr + r * 8); /* mov r, fr */
586 } else {
587 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
588 gen_modrm(r, v->r, v->sym, fc);
589 } else if (fr != r) {
590 /* XXX: don't we really come here? */
591 abort();
592 o(0xc0 + fr + r * 8); /* mov r, fr */
597 /* 'is_jmp' is '1' if it is a jump */
598 static void gcall_or_jmp(int is_jmp)
600 int r;
601 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
602 ((vtop->r & VT_SYM) || (vtop->c.i-4) == (int)(vtop->c.i-4))) {
603 /* constant case */
604 if (vtop->r & VT_SYM) {
605 /* relocation case */
606 #ifdef TCC_TARGET_PE
607 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
608 #else
609 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
610 #endif
611 } else {
612 /* put an empty PC32 relocation */
613 put_elf_reloca(symtab_section, cur_text_section,
614 ind + 1, R_X86_64_PC32, 0, (int)(vtop->c.i-4));
616 oad(0xe8 + is_jmp, 0); /* call/jmp im */
617 } else {
618 /* otherwise, indirect call */
619 r = TREG_R11;
620 load(r, vtop);
621 o(0x41); /* REX */
622 o(0xff); /* call/jmp *r */
623 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
627 #if defined(CONFIG_TCC_BCHECK)
628 #ifndef TCC_TARGET_PE
629 static addr_t func_bound_offset;
630 static unsigned long func_bound_ind;
631 #endif
633 static void gen_static_call(int v)
635 Sym *sym = external_global_sym(v, &func_old_type, 0);
636 oad(0xe8, 0);
637 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
640 /* generate a bounded pointer addition */
641 ST_FUNC void gen_bounded_ptr_add(void)
643 /* save all temporary registers */
644 save_regs(0);
646 /* prepare fast x86_64 function call */
647 gv(RC_RAX);
648 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
649 vtop--;
651 gv(RC_RAX);
652 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
653 vtop--;
655 /* do a fast function call */
656 gen_static_call(TOK___bound_ptr_add);
658 /* returned pointer is in rax */
659 vtop++;
660 vtop->r = TREG_RAX | VT_BOUNDED;
663 /* relocation offset of the bounding function call point */
664 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
667 /* patch pointer addition in vtop so that pointer dereferencing is
668 also tested */
669 ST_FUNC void gen_bounded_ptr_deref(void)
671 addr_t func;
672 int size, align;
673 ElfW(Rela) *rel;
674 Sym *sym;
676 size = 0;
677 /* XXX: put that code in generic part of tcc */
678 if (!is_float(vtop->type.t)) {
679 if (vtop->r & VT_LVAL_BYTE)
680 size = 1;
681 else if (vtop->r & VT_LVAL_SHORT)
682 size = 2;
684 if (!size)
685 size = type_size(&vtop->type, &align);
686 switch(size) {
687 case 1: func = TOK___bound_ptr_indir1; break;
688 case 2: func = TOK___bound_ptr_indir2; break;
689 case 4: func = TOK___bound_ptr_indir4; break;
690 case 8: func = TOK___bound_ptr_indir8; break;
691 case 12: func = TOK___bound_ptr_indir12; break;
692 case 16: func = TOK___bound_ptr_indir16; break;
693 default:
694 tcc_error("unhandled size when dereferencing bounded pointer");
695 func = 0;
696 break;
699 sym = external_global_sym(func, &func_old_type, 0);
700 if (!sym->c)
701 put_extern_sym(sym, NULL, 0, 0);
703 /* patch relocation */
704 /* XXX: find a better solution ? */
706 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
707 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
709 #endif
711 #ifdef TCC_TARGET_PE
713 #define REGN 4
714 static const uint8_t arg_regs[REGN] = {
715 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
718 /* Prepare arguments in R10 and R11 rather than RCX and RDX
719 because gv() will not ever use these */
720 static int arg_prepare_reg(int idx) {
721 if (idx == 0 || idx == 1)
722 /* idx=0: r10, idx=1: r11 */
723 return idx + 10;
724 else
725 return arg_regs[idx];
728 static int func_scratch;
730 /* Generate function call. The function address is pushed first, then
731 all the parameters in call order. This functions pops all the
732 parameters and the function address. */
734 void gen_offs_sp(int b, int r, int d)
736 orex(1,0,r & 0x100 ? 0 : r, b);
737 if (d == (char)d) {
738 o(0x2444 | (REG_VALUE(r) << 3));
739 g(d);
740 } else {
741 o(0x2484 | (REG_VALUE(r) << 3));
742 gen_le32(d);
746 /* Return the number of registers needed to return the struct, or 0 if
747 returning via struct pointer. */
748 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
750 int size, align;
751 *regsize = 8;
752 *ret_align = 1; // Never have to re-align return values for x86-64
753 size = type_size(vt, &align);
754 ret->ref = NULL;
755 if (size > 8) {
756 return 0;
757 } else if (size > 4) {
758 ret->t = VT_LLONG;
759 return 1;
760 } else if (size > 2) {
761 ret->t = VT_INT;
762 return 1;
763 } else if (size > 1) {
764 ret->t = VT_SHORT;
765 return 1;
766 } else {
767 ret->t = VT_BYTE;
768 return 1;
772 static int is_sse_float(int t) {
773 int bt;
774 bt = t & VT_BTYPE;
775 return bt == VT_DOUBLE || bt == VT_FLOAT;
778 int gfunc_arg_size(CType *type) {
779 int align;
780 if (type->t & (VT_ARRAY|VT_BITFIELD))
781 return 8;
782 return type_size(type, &align);
785 void gfunc_call(int nb_args)
787 int size, r, args_size, i, d, bt, struct_size;
788 int arg;
790 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
791 arg = nb_args;
793 /* for struct arguments, we need to call memcpy and the function
794 call breaks register passing arguments we are preparing.
795 So, we process arguments which will be passed by stack first. */
796 struct_size = args_size;
797 for(i = 0; i < nb_args; i++) {
798 SValue *sv;
800 --arg;
801 sv = &vtop[-i];
802 bt = (sv->type.t & VT_BTYPE);
803 size = gfunc_arg_size(&sv->type);
805 if (size <= 8)
806 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
808 if (bt == VT_STRUCT) {
809 /* align to stack align size */
810 size = (size + 15) & ~15;
811 /* generate structure store */
812 r = get_reg(RC_INT);
813 gen_offs_sp(0x8d, r, struct_size);
814 struct_size += size;
816 /* generate memcpy call */
817 vset(&sv->type, r | VT_LVAL, 0);
818 vpushv(sv);
819 vstore();
820 --vtop;
821 } else if (bt == VT_LDOUBLE) {
822 gv(RC_ST0);
823 gen_offs_sp(0xdb, 0x107, struct_size);
824 struct_size += 16;
828 if (func_scratch < struct_size)
829 func_scratch = struct_size;
831 arg = nb_args;
832 struct_size = args_size;
834 for(i = 0; i < nb_args; i++) {
835 --arg;
836 bt = (vtop->type.t & VT_BTYPE);
838 size = gfunc_arg_size(&vtop->type);
839 if (size > 8) {
840 /* align to stack align size */
841 size = (size + 15) & ~15;
842 if (arg >= REGN) {
843 d = get_reg(RC_INT);
844 gen_offs_sp(0x8d, d, struct_size);
845 gen_offs_sp(0x89, d, arg*8);
846 } else {
847 d = arg_prepare_reg(arg);
848 gen_offs_sp(0x8d, d, struct_size);
850 struct_size += size;
851 } else {
852 if (is_sse_float(vtop->type.t)) {
853 gv(RC_XMM0); /* only use one float register */
854 if (arg >= REGN) {
855 /* movq %xmm0, j*8(%rsp) */
856 gen_offs_sp(0xd60f66, 0x100, arg*8);
857 } else {
858 /* movaps %xmm0, %xmmN */
859 o(0x280f);
860 o(0xc0 + (arg << 3));
861 d = arg_prepare_reg(arg);
862 /* mov %xmm0, %rxx */
863 o(0x66);
864 orex(1,d,0, 0x7e0f);
865 o(0xc0 + REG_VALUE(d));
867 } else {
868 if (bt == VT_STRUCT) {
869 vtop->type.ref = NULL;
870 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
871 : size > 1 ? VT_SHORT : VT_BYTE;
874 r = gv(RC_INT);
875 if (arg >= REGN) {
876 gen_offs_sp(0x89, r, arg*8);
877 } else {
878 d = arg_prepare_reg(arg);
879 orex(1,d,r,0x89); /* mov */
880 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
884 vtop--;
886 save_regs(0);
888 /* Copy R10 and R11 into RCX and RDX, respectively */
889 if (nb_args > 0) {
890 o(0xd1894c); /* mov %r10, %rcx */
891 if (nb_args > 1) {
892 o(0xda894c); /* mov %r11, %rdx */
896 gcall_or_jmp(0);
897 /* other compilers don't clear the upper bits when returning char/short */
898 bt = vtop->type.ref->type.t & (VT_BTYPE | VT_UNSIGNED);
899 if (bt == (VT_BYTE | VT_UNSIGNED))
900 o(0xc0b60f); /* movzbl %al, %eax */
901 else if (bt == VT_BYTE)
902 o(0xc0be0f); /* movsbl %al, %eax */
903 else if (bt == VT_SHORT)
904 o(0x98); /* cwtl */
905 else if (bt == (VT_SHORT | VT_UNSIGNED))
906 o(0xc0b70f); /* movzbl %al, %eax */
907 #if 0 /* handled in gen_cast() */
908 else if (bt == VT_INT)
909 o(0x9848); /* cltq */
910 else if (bt == (VT_INT | VT_UNSIGNED))
911 o(0xc089); /* mov %eax,%eax */
912 #endif
913 vtop--;
917 #define FUNC_PROLOG_SIZE 11
919 /* generate function prolog of type 't' */
920 void gfunc_prolog(CType *func_type)
922 int addr, reg_param_index, bt, size;
923 Sym *sym;
924 CType *type;
926 func_ret_sub = 0;
927 func_scratch = 0;
928 loc = 0;
930 addr = PTR_SIZE * 2;
931 ind += FUNC_PROLOG_SIZE;
932 func_sub_sp_offset = ind;
933 reg_param_index = 0;
935 sym = func_type->ref;
937 /* if the function returns a structure, then add an
938 implicit pointer parameter */
939 func_vt = sym->type;
940 func_var = (sym->c == FUNC_ELLIPSIS);
941 size = gfunc_arg_size(&func_vt);
942 if (size > 8) {
943 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
944 func_vc = addr;
945 reg_param_index++;
946 addr += 8;
949 /* define parameters */
950 while ((sym = sym->next) != NULL) {
951 type = &sym->type;
952 bt = type->t & VT_BTYPE;
953 size = gfunc_arg_size(type);
954 if (size > 8) {
955 if (reg_param_index < REGN) {
956 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
958 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL | VT_REF, addr);
959 } else {
960 if (reg_param_index < REGN) {
961 /* save arguments passed by register */
962 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
963 o(0xd60f66); /* movq */
964 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
965 } else {
966 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
969 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
971 addr += 8;
972 reg_param_index++;
975 while (reg_param_index < REGN) {
976 if (func_type->ref->c == FUNC_ELLIPSIS) {
977 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
978 addr += 8;
980 reg_param_index++;
984 /* generate function epilog */
985 void gfunc_epilog(void)
987 int v, saved_ind;
989 o(0xc9); /* leave */
990 if (func_ret_sub == 0) {
991 o(0xc3); /* ret */
992 } else {
993 o(0xc2); /* ret n */
994 g(func_ret_sub);
995 g(func_ret_sub >> 8);
998 saved_ind = ind;
999 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1000 /* align local size to word & save local variables */
1001 v = (func_scratch + -loc + 15) & -16;
1003 if (v >= 4096) {
1004 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
1005 oad(0xb8, v); /* mov stacksize, %eax */
1006 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1007 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1008 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1009 } else {
1010 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1011 o(0xec8148); /* sub rsp, stacksize */
1012 gen_le32(v);
1015 cur_text_section->data_offset = saved_ind;
1016 pe_add_unwind_data(ind, saved_ind, v);
1017 ind = cur_text_section->data_offset;
1020 #else
1022 static void gadd_sp(int val)
1024 if (val == (char)val) {
1025 o(0xc48348);
1026 g(val);
1027 } else {
1028 oad(0xc48148, val); /* add $xxx, %rsp */
1032 typedef enum X86_64_Mode {
1033 x86_64_mode_none,
1034 x86_64_mode_memory,
1035 x86_64_mode_integer,
1036 x86_64_mode_sse,
1037 x86_64_mode_x87
1038 } X86_64_Mode;
1040 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1042 if (a == b)
1043 return a;
1044 else if (a == x86_64_mode_none)
1045 return b;
1046 else if (b == x86_64_mode_none)
1047 return a;
1048 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1049 return x86_64_mode_memory;
1050 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1051 return x86_64_mode_integer;
1052 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1053 return x86_64_mode_memory;
1054 else
1055 return x86_64_mode_sse;
1058 static X86_64_Mode classify_x86_64_inner(CType *ty)
1060 X86_64_Mode mode;
1061 Sym *f;
1063 switch (ty->t & VT_BTYPE) {
1064 case VT_VOID: return x86_64_mode_none;
1066 case VT_INT:
1067 case VT_BYTE:
1068 case VT_SHORT:
1069 case VT_LLONG:
1070 case VT_BOOL:
1071 case VT_PTR:
1072 case VT_FUNC:
1073 case VT_ENUM: return x86_64_mode_integer;
1075 case VT_FLOAT:
1076 case VT_DOUBLE: return x86_64_mode_sse;
1078 case VT_LDOUBLE: return x86_64_mode_x87;
1080 case VT_STRUCT:
1081 f = ty->ref;
1083 mode = x86_64_mode_none;
1084 for (f = f->next; f; f = f->next)
1085 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1087 return mode;
1089 assert(0);
1090 return 0;
1093 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1095 X86_64_Mode mode;
1096 int size, align, ret_t = 0;
1098 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1099 *psize = 8;
1100 *palign = 8;
1101 *reg_count = 1;
1102 ret_t = ty->t;
1103 mode = x86_64_mode_integer;
1104 } else {
1105 size = type_size(ty, &align);
1106 *psize = (size + 7) & ~7;
1107 *palign = (align + 7) & ~7;
1109 if (size > 16) {
1110 mode = x86_64_mode_memory;
1111 } else {
1112 mode = classify_x86_64_inner(ty);
1113 switch (mode) {
1114 case x86_64_mode_integer:
1115 if (size > 8) {
1116 *reg_count = 2;
1117 ret_t = VT_QLONG;
1118 } else {
1119 *reg_count = 1;
1120 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1122 break;
1124 case x86_64_mode_x87:
1125 *reg_count = 1;
1126 ret_t = VT_LDOUBLE;
1127 break;
1129 case x86_64_mode_sse:
1130 if (size > 8) {
1131 *reg_count = 2;
1132 ret_t = VT_QFLOAT;
1133 } else {
1134 *reg_count = 1;
1135 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1137 break;
1138 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1143 if (ret) {
1144 ret->ref = NULL;
1145 ret->t = ret_t;
1148 return mode;
1151 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1153 /* This definition must be synced with stdarg.h */
1154 enum __va_arg_type {
1155 __va_gen_reg, __va_float_reg, __va_stack
1157 int size, align, reg_count;
1158 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1159 switch (mode) {
1160 default: return __va_stack;
1161 case x86_64_mode_integer: return __va_gen_reg;
1162 case x86_64_mode_sse: return __va_float_reg;
1166 /* Return the number of registers needed to return the struct, or 0 if
1167 returning via struct pointer. */
1168 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1170 int size, align, reg_count;
1171 *ret_align = 1; // Never have to re-align return values for x86-64
1172 *regsize = 8;
1173 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1176 #define REGN 6
1177 static const uint8_t arg_regs[REGN] = {
1178 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1181 static int arg_prepare_reg(int idx) {
1182 if (idx == 2 || idx == 3)
1183 /* idx=2: r10, idx=3: r11 */
1184 return idx + 8;
1185 else
1186 return arg_regs[idx];
1189 /* Generate function call. The function address is pushed first, then
1190 all the parameters in call order. This functions pops all the
1191 parameters and the function address. */
1192 void gfunc_call(int nb_args)
1194 X86_64_Mode mode;
1195 CType type;
1196 int size, align, r, args_size, stack_adjust, run_start, run_end, i, reg_count;
1197 int nb_reg_args = 0;
1198 int nb_sse_args = 0;
1199 int sse_reg, gen_reg;
1201 /* calculate the number of integer/float register arguments */
1202 for(i = 0; i < nb_args; i++) {
1203 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1204 if (mode == x86_64_mode_sse)
1205 nb_sse_args += reg_count;
1206 else if (mode == x86_64_mode_integer)
1207 nb_reg_args += reg_count;
1210 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1211 and ended by a 16-byte aligned argument. This is because, from the point of view of
1212 the callee, argument alignment is computed from the bottom up. */
1213 /* for struct arguments, we need to call memcpy and the function
1214 call breaks register passing arguments we are preparing.
1215 So, we process arguments which will be passed by stack first. */
1216 gen_reg = nb_reg_args;
1217 sse_reg = nb_sse_args;
1218 run_start = 0;
1219 args_size = 0;
1220 while (run_start != nb_args) {
1221 int run_gen_reg = gen_reg, run_sse_reg = sse_reg;
1223 run_end = nb_args;
1224 stack_adjust = 0;
1225 for(i = run_start; (i < nb_args) && (run_end == nb_args); i++) {
1226 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1227 switch (mode) {
1228 case x86_64_mode_memory:
1229 case x86_64_mode_x87:
1230 stack_arg:
1231 if (align == 16)
1232 run_end = i;
1233 else
1234 stack_adjust += size;
1235 break;
1237 case x86_64_mode_sse:
1238 sse_reg -= reg_count;
1239 if (sse_reg + reg_count > 8) goto stack_arg;
1240 break;
1242 case x86_64_mode_integer:
1243 gen_reg -= reg_count;
1244 if (gen_reg + reg_count > REGN) goto stack_arg;
1245 break;
1246 default: break; /* nothing to be done for x86_64_mode_none */
1250 gen_reg = run_gen_reg;
1251 sse_reg = run_sse_reg;
1253 /* adjust stack to align SSE boundary */
1254 if (stack_adjust &= 15) {
1255 /* fetch cpu flag before the following sub will change the value */
1256 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1257 gv(RC_INT);
1259 stack_adjust = 16 - stack_adjust;
1260 o(0x48);
1261 oad(0xec81, stack_adjust); /* sub $xxx, %rsp */
1262 args_size += stack_adjust;
1265 for(i = run_start; i < run_end;) {
1266 /* Swap argument to top, it will possibly be changed here,
1267 and might use more temps. At the end of the loop we keep
1268 in on the stack and swap it back to its original position
1269 if it is a register. */
1270 SValue tmp = vtop[0];
1271 int arg_stored = 1;
1273 vtop[0] = vtop[-i];
1274 vtop[-i] = tmp;
1275 mode = classify_x86_64_arg(&vtop->type, NULL, &size, &align, &reg_count);
1277 switch (vtop->type.t & VT_BTYPE) {
1278 case VT_STRUCT:
1279 if (mode == x86_64_mode_sse) {
1280 if (sse_reg > 8)
1281 sse_reg -= reg_count;
1282 else
1283 arg_stored = 0;
1284 } else if (mode == x86_64_mode_integer) {
1285 if (gen_reg > REGN)
1286 gen_reg -= reg_count;
1287 else
1288 arg_stored = 0;
1291 if (arg_stored) {
1292 /* allocate the necessary size on stack */
1293 o(0x48);
1294 oad(0xec81, size); /* sub $xxx, %rsp */
1295 /* generate structure store */
1296 r = get_reg(RC_INT);
1297 orex(1, r, 0, 0x89); /* mov %rsp, r */
1298 o(0xe0 + REG_VALUE(r));
1299 vset(&vtop->type, r | VT_LVAL, 0);
1300 vswap();
1301 vstore();
1302 args_size += size;
1304 break;
1306 case VT_LDOUBLE:
1307 assert(0);
1308 break;
1310 case VT_FLOAT:
1311 case VT_DOUBLE:
1312 assert(mode == x86_64_mode_sse);
1313 if (sse_reg > 8) {
1314 --sse_reg;
1315 r = gv(RC_FLOAT);
1316 o(0x50); /* push $rax */
1317 /* movq %xmmN, (%rsp) */
1318 o(0xd60f66);
1319 o(0x04 + REG_VALUE(r)*8);
1320 o(0x24);
1321 args_size += size;
1322 } else {
1323 arg_stored = 0;
1325 break;
1327 default:
1328 assert(mode == x86_64_mode_integer);
1329 /* simple type */
1330 /* XXX: implicit cast ? */
1331 if (gen_reg > REGN) {
1332 --gen_reg;
1333 r = gv(RC_INT);
1334 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1335 args_size += size;
1336 } else {
1337 arg_stored = 0;
1339 break;
1342 /* And swap the argument back to it's original position. */
1343 tmp = vtop[0];
1344 vtop[0] = vtop[-i];
1345 vtop[-i] = tmp;
1347 if (arg_stored) {
1348 vrotb(i+1);
1349 assert((vtop->type.t == tmp.type.t) && (vtop->r == tmp.r));
1350 vpop();
1351 --nb_args;
1352 --run_end;
1353 } else {
1354 ++i;
1358 /* handle 16 byte aligned arguments at end of run */
1359 run_start = i = run_end;
1360 while (i < nb_args) {
1361 /* Rotate argument to top since it will always be popped */
1362 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1363 if (align != 16)
1364 break;
1366 vrotb(i+1);
1368 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1369 gv(RC_ST0);
1370 oad(0xec8148, size); /* sub $xxx, %rsp */
1371 o(0x7cdb); /* fstpt 0(%rsp) */
1372 g(0x24);
1373 g(0x00);
1374 args_size += size;
1375 } else {
1376 assert(mode == x86_64_mode_memory);
1378 /* allocate the necessary size on stack */
1379 o(0x48);
1380 oad(0xec81, size); /* sub $xxx, %rsp */
1381 /* generate structure store */
1382 r = get_reg(RC_INT);
1383 orex(1, r, 0, 0x89); /* mov %rsp, r */
1384 o(0xe0 + REG_VALUE(r));
1385 vset(&vtop->type, r | VT_LVAL, 0);
1386 vswap();
1387 vstore();
1388 args_size += size;
1391 vpop();
1392 --nb_args;
1396 /* XXX This should be superfluous. */
1397 save_regs(0); /* save used temporary registers */
1399 /* then, we prepare register passing arguments.
1400 Note that we cannot set RDX and RCX in this loop because gv()
1401 may break these temporary registers. Let's use R10 and R11
1402 instead of them */
1403 assert(gen_reg <= REGN);
1404 assert(sse_reg <= 8);
1405 for(i = 0; i < nb_args; i++) {
1406 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1407 /* Alter stack entry type so that gv() knows how to treat it */
1408 vtop->type = type;
1409 if (mode == x86_64_mode_sse) {
1410 if (reg_count == 2) {
1411 sse_reg -= 2;
1412 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1413 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1414 /* movaps %xmm0, %xmmN */
1415 o(0x280f);
1416 o(0xc0 + (sse_reg << 3));
1417 /* movaps %xmm1, %xmmN */
1418 o(0x280f);
1419 o(0xc1 + ((sse_reg+1) << 3));
1421 } else {
1422 assert(reg_count == 1);
1423 --sse_reg;
1424 /* Load directly to register */
1425 gv(RC_XMM0 << sse_reg);
1427 } else if (mode == x86_64_mode_integer) {
1428 /* simple type */
1429 /* XXX: implicit cast ? */
1430 int d;
1431 gen_reg -= reg_count;
1432 r = gv(RC_INT);
1433 d = arg_prepare_reg(gen_reg);
1434 orex(1,d,r,0x89); /* mov */
1435 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1436 if (reg_count == 2) {
1437 d = arg_prepare_reg(gen_reg+1);
1438 orex(1,d,vtop->r2,0x89); /* mov */
1439 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1442 vtop--;
1444 assert(gen_reg == 0);
1445 assert(sse_reg == 0);
1447 /* We shouldn't have many operands on the stack anymore, but the
1448 call address itself is still there, and it might be in %eax
1449 (or edx/ecx) currently, which the below writes would clobber.
1450 So evict all remaining operands here. */
1451 save_regs(0);
1453 /* Copy R10 and R11 into RDX and RCX, respectively */
1454 if (nb_reg_args > 2) {
1455 o(0xd2894c); /* mov %r10, %rdx */
1456 if (nb_reg_args > 3) {
1457 o(0xd9894c); /* mov %r11, %rcx */
1461 if (vtop->type.ref->c != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1462 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1463 gcall_or_jmp(0);
1464 if (args_size)
1465 gadd_sp(args_size);
1466 vtop--;
1470 #define FUNC_PROLOG_SIZE 11
1472 static void push_arg_reg(int i) {
1473 loc -= 8;
1474 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1477 /* generate function prolog of type 't' */
1478 void gfunc_prolog(CType *func_type)
1480 X86_64_Mode mode;
1481 int i, addr, align, size, reg_count;
1482 int param_addr = 0, reg_param_index, sse_param_index;
1483 Sym *sym;
1484 CType *type;
1486 sym = func_type->ref;
1487 addr = PTR_SIZE * 2;
1488 loc = 0;
1489 ind += FUNC_PROLOG_SIZE;
1490 func_sub_sp_offset = ind;
1491 func_ret_sub = 0;
1493 if (func_type->ref->c == FUNC_ELLIPSIS) {
1494 int seen_reg_num, seen_sse_num, seen_stack_size;
1495 seen_reg_num = seen_sse_num = 0;
1496 /* frame pointer and return address */
1497 seen_stack_size = PTR_SIZE * 2;
1498 /* count the number of seen parameters */
1499 sym = func_type->ref;
1500 while ((sym = sym->next) != NULL) {
1501 type = &sym->type;
1502 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1503 switch (mode) {
1504 default:
1505 stack_arg:
1506 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1507 break;
1509 case x86_64_mode_integer:
1510 if (seen_reg_num + reg_count <= 8) {
1511 seen_reg_num += reg_count;
1512 } else {
1513 seen_reg_num = 8;
1514 goto stack_arg;
1516 break;
1518 case x86_64_mode_sse:
1519 if (seen_sse_num + reg_count <= 8) {
1520 seen_sse_num += reg_count;
1521 } else {
1522 seen_sse_num = 8;
1523 goto stack_arg;
1525 break;
1529 loc -= 16;
1530 /* movl $0x????????, -0x10(%rbp) */
1531 o(0xf045c7);
1532 gen_le32(seen_reg_num * 8);
1533 /* movl $0x????????, -0xc(%rbp) */
1534 o(0xf445c7);
1535 gen_le32(seen_sse_num * 16 + 48);
1536 /* movl $0x????????, -0x8(%rbp) */
1537 o(0xf845c7);
1538 gen_le32(seen_stack_size);
1540 /* save all register passing arguments */
1541 for (i = 0; i < 8; i++) {
1542 loc -= 16;
1543 o(0xd60f66); /* movq */
1544 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1545 /* movq $0, loc+8(%rbp) */
1546 o(0x85c748);
1547 gen_le32(loc + 8);
1548 gen_le32(0);
1550 for (i = 0; i < REGN; i++) {
1551 push_arg_reg(REGN-1-i);
1555 sym = func_type->ref;
1556 reg_param_index = 0;
1557 sse_param_index = 0;
1559 /* if the function returns a structure, then add an
1560 implicit pointer parameter */
1561 func_vt = sym->type;
1562 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1563 if (mode == x86_64_mode_memory) {
1564 push_arg_reg(reg_param_index);
1565 func_vc = loc;
1566 reg_param_index++;
1568 /* define parameters */
1569 while ((sym = sym->next) != NULL) {
1570 type = &sym->type;
1571 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1572 switch (mode) {
1573 case x86_64_mode_sse:
1574 if (sse_param_index + reg_count <= 8) {
1575 /* save arguments passed by register */
1576 loc -= reg_count * 8;
1577 param_addr = loc;
1578 for (i = 0; i < reg_count; ++i) {
1579 o(0xd60f66); /* movq */
1580 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1581 ++sse_param_index;
1583 } else {
1584 addr = (addr + align - 1) & -align;
1585 param_addr = addr;
1586 addr += size;
1588 break;
1590 case x86_64_mode_memory:
1591 case x86_64_mode_x87:
1592 addr = (addr + align - 1) & -align;
1593 param_addr = addr;
1594 addr += size;
1595 break;
1597 case x86_64_mode_integer: {
1598 if (reg_param_index + reg_count <= REGN) {
1599 /* save arguments passed by register */
1600 loc -= reg_count * 8;
1601 param_addr = loc;
1602 for (i = 0; i < reg_count; ++i) {
1603 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1604 ++reg_param_index;
1606 } else {
1607 addr = (addr + align - 1) & -align;
1608 param_addr = addr;
1609 addr += size;
1611 break;
1613 default: break; /* nothing to be done for x86_64_mode_none */
1615 sym_push(sym->v & ~SYM_FIELD, type,
1616 VT_LOCAL | VT_LVAL, param_addr);
1619 #ifdef CONFIG_TCC_BCHECK
1620 /* leave some room for bound checking code */
1621 if (tcc_state->do_bounds_check) {
1622 func_bound_offset = lbounds_section->data_offset;
1623 func_bound_ind = ind;
1624 oad(0xb8, 0); /* lbound section pointer */
1625 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1626 oad(0xb8, 0); /* call to function */
1628 #endif
1631 /* generate function epilog */
1632 void gfunc_epilog(void)
1634 int v, saved_ind;
1636 #ifdef CONFIG_TCC_BCHECK
1637 if (tcc_state->do_bounds_check
1638 && func_bound_offset != lbounds_section->data_offset)
1640 addr_t saved_ind;
1641 addr_t *bounds_ptr;
1642 Sym *sym_data;
1644 /* add end of table info */
1645 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
1646 *bounds_ptr = 0;
1648 /* generate bound local allocation */
1649 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
1650 func_bound_offset, lbounds_section->data_offset);
1651 saved_ind = ind;
1652 ind = func_bound_ind;
1653 greloc(cur_text_section, sym_data, ind + 1, R_386_32);
1654 ind = ind + 5 + 3;
1655 gen_static_call(TOK___bound_local_new);
1656 ind = saved_ind;
1658 /* generate bound check local freeing */
1659 o(0x5250); /* save returned value, if any */
1660 greloc(cur_text_section, sym_data, ind + 1, R_386_32);
1661 oad(0xb8, 0); /* mov xxx, %rax */
1662 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1663 gen_static_call(TOK___bound_local_delete);
1664 o(0x585a); /* restore returned value, if any */
1666 #endif
1667 o(0xc9); /* leave */
1668 if (func_ret_sub == 0) {
1669 o(0xc3); /* ret */
1670 } else {
1671 o(0xc2); /* ret n */
1672 g(func_ret_sub);
1673 g(func_ret_sub >> 8);
1675 /* align local size to word & save local variables */
1676 v = (-loc + 15) & -16;
1677 saved_ind = ind;
1678 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1679 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1680 o(0xec8148); /* sub rsp, stacksize */
1681 gen_le32(v);
1682 ind = saved_ind;
1685 #endif /* not PE */
1687 /* generate a jump to a label */
1688 int gjmp(int t)
1690 return psym(0xe9, t);
1693 /* generate a jump to a fixed address */
1694 void gjmp_addr(int a)
1696 int r;
1697 r = a - ind - 2;
1698 if (r == (char)r) {
1699 g(0xeb);
1700 g(r);
1701 } else {
1702 oad(0xe9, a - ind - 5);
1706 ST_FUNC void gtst_addr(int inv, int a)
1708 inv ^= (vtop--)->c.i;
1709 a -= ind + 2;
1710 if (a == (char)a) {
1711 g(inv - 32);
1712 g(a);
1713 } else {
1714 g(0x0f);
1715 oad(inv - 16, a - 4);
1719 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1720 ST_FUNC int gtst(int inv, int t)
1722 int v = vtop->r & VT_VALMASK;
1723 if (v == VT_CMP) {
1724 /* fast case : can jump directly since flags are set */
1725 if (vtop->c.i & 0x100)
1727 /* This was a float compare. If the parity flag is set
1728 the result was unordered. For anything except != this
1729 means false and we don't jump (anding both conditions).
1730 For != this means true (oring both).
1731 Take care about inverting the test. We need to jump
1732 to our target if the result was unordered and test wasn't NE,
1733 otherwise if unordered we don't want to jump. */
1734 vtop->c.i &= ~0x100;
1735 if (inv == (vtop->c.i == TOK_NE))
1736 o(0x067a); /* jp +6 */
1737 else
1739 g(0x0f);
1740 t = psym(0x8a, t); /* jp t */
1743 g(0x0f);
1744 t = psym((vtop->c.i - 16) ^ inv, t);
1745 } else if (v == VT_JMP || v == VT_JMPI) {
1746 /* && or || optimization */
1747 if ((v & 1) == inv) {
1748 /* insert vtop->c jump list in t */
1749 uint32_t n1, n = vtop->c.i;
1750 if (n) {
1751 while ((n1 = read32le(cur_text_section->data + n)))
1752 n = n1;
1753 write32le(cur_text_section->data + n, t);
1754 t = vtop->c.i;
1756 } else {
1757 t = gjmp(t);
1758 gsym(vtop->c.i);
1761 vtop--;
1762 return t;
1765 /* generate an integer binary operation */
1766 void gen_opi(int op)
1768 int r, fr, opc, c;
1769 int ll, uu, cc;
1771 ll = is64_type(vtop[-1].type.t);
1772 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1773 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1775 switch(op) {
1776 case '+':
1777 case TOK_ADDC1: /* add with carry generation */
1778 opc = 0;
1779 gen_op8:
1780 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1781 /* constant case */
1782 vswap();
1783 r = gv(RC_INT);
1784 vswap();
1785 c = vtop->c.i;
1786 if (c == (char)c) {
1787 /* XXX: generate inc and dec for smaller code ? */
1788 orex(ll, r, 0, 0x83);
1789 o(0xc0 | (opc << 3) | REG_VALUE(r));
1790 g(c);
1791 } else {
1792 orex(ll, r, 0, 0x81);
1793 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1795 } else {
1796 gv2(RC_INT, RC_INT);
1797 r = vtop[-1].r;
1798 fr = vtop[0].r;
1799 orex(ll, r, fr, (opc << 3) | 0x01);
1800 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1802 vtop--;
1803 if (op >= TOK_ULT && op <= TOK_GT) {
1804 vtop->r = VT_CMP;
1805 vtop->c.i = op;
1807 break;
1808 case '-':
1809 case TOK_SUBC1: /* sub with carry generation */
1810 opc = 5;
1811 goto gen_op8;
1812 case TOK_ADDC2: /* add with carry use */
1813 opc = 2;
1814 goto gen_op8;
1815 case TOK_SUBC2: /* sub with carry use */
1816 opc = 3;
1817 goto gen_op8;
1818 case '&':
1819 opc = 4;
1820 goto gen_op8;
1821 case '^':
1822 opc = 6;
1823 goto gen_op8;
1824 case '|':
1825 opc = 1;
1826 goto gen_op8;
1827 case '*':
1828 gv2(RC_INT, RC_INT);
1829 r = vtop[-1].r;
1830 fr = vtop[0].r;
1831 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1832 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1833 vtop--;
1834 break;
1835 case TOK_SHL:
1836 opc = 4;
1837 goto gen_shift;
1838 case TOK_SHR:
1839 opc = 5;
1840 goto gen_shift;
1841 case TOK_SAR:
1842 opc = 7;
1843 gen_shift:
1844 opc = 0xc0 | (opc << 3);
1845 if (cc) {
1846 /* constant case */
1847 vswap();
1848 r = gv(RC_INT);
1849 vswap();
1850 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1851 o(opc | REG_VALUE(r));
1852 g(vtop->c.i & (ll ? 63 : 31));
1853 } else {
1854 /* we generate the shift in ecx */
1855 gv2(RC_INT, RC_RCX);
1856 r = vtop[-1].r;
1857 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1858 o(opc | REG_VALUE(r));
1860 vtop--;
1861 break;
1862 case TOK_UDIV:
1863 case TOK_UMOD:
1864 uu = 1;
1865 goto divmod;
1866 case '/':
1867 case '%':
1868 case TOK_PDIV:
1869 uu = 0;
1870 divmod:
1871 /* first operand must be in eax */
1872 /* XXX: need better constraint for second operand */
1873 gv2(RC_RAX, RC_RCX);
1874 r = vtop[-1].r;
1875 fr = vtop[0].r;
1876 vtop--;
1877 save_reg(TREG_RDX);
1878 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1879 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1880 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1881 if (op == '%' || op == TOK_UMOD)
1882 r = TREG_RDX;
1883 else
1884 r = TREG_RAX;
1885 vtop->r = r;
1886 break;
1887 default:
1888 opc = 7;
1889 goto gen_op8;
1893 void gen_opl(int op)
1895 gen_opi(op);
1898 /* generate a floating point operation 'v = t1 op t2' instruction. The
1899 two operands are guaranted to have the same floating point type */
1900 /* XXX: need to use ST1 too */
1901 void gen_opf(int op)
1903 int a, ft, fc, swapped, r;
1904 int float_type =
1905 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1907 /* convert constants to memory references */
1908 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1909 vswap();
1910 gv(float_type);
1911 vswap();
1913 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1914 gv(float_type);
1916 /* must put at least one value in the floating point register */
1917 if ((vtop[-1].r & VT_LVAL) &&
1918 (vtop[0].r & VT_LVAL)) {
1919 vswap();
1920 gv(float_type);
1921 vswap();
1923 swapped = 0;
1924 /* swap the stack if needed so that t1 is the register and t2 is
1925 the memory reference */
1926 if (vtop[-1].r & VT_LVAL) {
1927 vswap();
1928 swapped = 1;
1930 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1931 if (op >= TOK_ULT && op <= TOK_GT) {
1932 /* load on stack second operand */
1933 load(TREG_ST0, vtop);
1934 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1935 if (op == TOK_GE || op == TOK_GT)
1936 swapped = !swapped;
1937 else if (op == TOK_EQ || op == TOK_NE)
1938 swapped = 0;
1939 if (swapped)
1940 o(0xc9d9); /* fxch %st(1) */
1941 if (op == TOK_EQ || op == TOK_NE)
1942 o(0xe9da); /* fucompp */
1943 else
1944 o(0xd9de); /* fcompp */
1945 o(0xe0df); /* fnstsw %ax */
1946 if (op == TOK_EQ) {
1947 o(0x45e480); /* and $0x45, %ah */
1948 o(0x40fC80); /* cmp $0x40, %ah */
1949 } else if (op == TOK_NE) {
1950 o(0x45e480); /* and $0x45, %ah */
1951 o(0x40f480); /* xor $0x40, %ah */
1952 op = TOK_NE;
1953 } else if (op == TOK_GE || op == TOK_LE) {
1954 o(0x05c4f6); /* test $0x05, %ah */
1955 op = TOK_EQ;
1956 } else {
1957 o(0x45c4f6); /* test $0x45, %ah */
1958 op = TOK_EQ;
1960 vtop--;
1961 vtop->r = VT_CMP;
1962 vtop->c.i = op;
1963 } else {
1964 /* no memory reference possible for long double operations */
1965 load(TREG_ST0, vtop);
1966 swapped = !swapped;
1968 switch(op) {
1969 default:
1970 case '+':
1971 a = 0;
1972 break;
1973 case '-':
1974 a = 4;
1975 if (swapped)
1976 a++;
1977 break;
1978 case '*':
1979 a = 1;
1980 break;
1981 case '/':
1982 a = 6;
1983 if (swapped)
1984 a++;
1985 break;
1987 ft = vtop->type.t;
1988 fc = vtop->c.i;
1989 o(0xde); /* fxxxp %st, %st(1) */
1990 o(0xc1 + (a << 3));
1991 vtop--;
1993 } else {
1994 if (op >= TOK_ULT && op <= TOK_GT) {
1995 /* if saved lvalue, then we must reload it */
1996 r = vtop->r;
1997 fc = vtop->c.i;
1998 if ((r & VT_VALMASK) == VT_LLOCAL) {
1999 SValue v1;
2000 r = get_reg(RC_INT);
2001 v1.type.t = VT_PTR;
2002 v1.r = VT_LOCAL | VT_LVAL;
2003 v1.c.i = fc;
2004 load(r, &v1);
2005 fc = 0;
2008 if (op == TOK_EQ || op == TOK_NE) {
2009 swapped = 0;
2010 } else {
2011 if (op == TOK_LE || op == TOK_LT)
2012 swapped = !swapped;
2013 if (op == TOK_LE || op == TOK_GE) {
2014 op = 0x93; /* setae */
2015 } else {
2016 op = 0x97; /* seta */
2020 if (swapped) {
2021 gv(RC_FLOAT);
2022 vswap();
2024 assert(!(vtop[-1].r & VT_LVAL));
2026 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
2027 o(0x66);
2028 if (op == TOK_EQ || op == TOK_NE)
2029 o(0x2e0f); /* ucomisd */
2030 else
2031 o(0x2f0f); /* comisd */
2033 if (vtop->r & VT_LVAL) {
2034 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2035 } else {
2036 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2039 vtop--;
2040 vtop->r = VT_CMP;
2041 vtop->c.i = op | 0x100;
2042 } else {
2043 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
2044 switch(op) {
2045 default:
2046 case '+':
2047 a = 0;
2048 break;
2049 case '-':
2050 a = 4;
2051 break;
2052 case '*':
2053 a = 1;
2054 break;
2055 case '/':
2056 a = 6;
2057 break;
2059 ft = vtop->type.t;
2060 fc = vtop->c.i;
2061 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2063 r = vtop->r;
2064 /* if saved lvalue, then we must reload it */
2065 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2066 SValue v1;
2067 r = get_reg(RC_INT);
2068 v1.type.t = VT_PTR;
2069 v1.r = VT_LOCAL | VT_LVAL;
2070 v1.c.i = fc;
2071 load(r, &v1);
2072 fc = 0;
2075 assert(!(vtop[-1].r & VT_LVAL));
2076 if (swapped) {
2077 assert(vtop->r & VT_LVAL);
2078 gv(RC_FLOAT);
2079 vswap();
2082 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2083 o(0xf2);
2084 } else {
2085 o(0xf3);
2087 o(0x0f);
2088 o(0x58 + a);
2090 if (vtop->r & VT_LVAL) {
2091 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2092 } else {
2093 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2096 vtop--;
2101 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2102 and 'long long' cases. */
2103 void gen_cvt_itof(int t)
2105 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2106 save_reg(TREG_ST0);
2107 gv(RC_INT);
2108 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2109 /* signed long long to float/double/long double (unsigned case
2110 is handled generically) */
2111 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2112 o(0x242cdf); /* fildll (%rsp) */
2113 o(0x08c48348); /* add $8, %rsp */
2114 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2115 (VT_INT | VT_UNSIGNED)) {
2116 /* unsigned int to float/double/long double */
2117 o(0x6a); /* push $0 */
2118 g(0x00);
2119 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2120 o(0x242cdf); /* fildll (%rsp) */
2121 o(0x10c48348); /* add $16, %rsp */
2122 } else {
2123 /* int to float/double/long double */
2124 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2125 o(0x2404db); /* fildl (%rsp) */
2126 o(0x08c48348); /* add $8, %rsp */
2128 vtop->r = TREG_ST0;
2129 } else {
2130 int r = get_reg(RC_FLOAT);
2131 gv(RC_INT);
2132 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2133 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2134 (VT_INT | VT_UNSIGNED) ||
2135 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2136 o(0x48); /* REX */
2138 o(0x2a0f);
2139 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2140 vtop->r = r;
2144 /* convert from one floating point type to another */
2145 void gen_cvt_ftof(int t)
2147 int ft, bt, tbt;
2149 ft = vtop->type.t;
2150 bt = ft & VT_BTYPE;
2151 tbt = t & VT_BTYPE;
2153 if (bt == VT_FLOAT) {
2154 gv(RC_FLOAT);
2155 if (tbt == VT_DOUBLE) {
2156 o(0x140f); /* unpcklps */
2157 o(0xc0 + REG_VALUE(vtop->r)*9);
2158 o(0x5a0f); /* cvtps2pd */
2159 o(0xc0 + REG_VALUE(vtop->r)*9);
2160 } else if (tbt == VT_LDOUBLE) {
2161 save_reg(RC_ST0);
2162 /* movss %xmm0,-0x10(%rsp) */
2163 o(0x110ff3);
2164 o(0x44 + REG_VALUE(vtop->r)*8);
2165 o(0xf024);
2166 o(0xf02444d9); /* flds -0x10(%rsp) */
2167 vtop->r = TREG_ST0;
2169 } else if (bt == VT_DOUBLE) {
2170 gv(RC_FLOAT);
2171 if (tbt == VT_FLOAT) {
2172 o(0x140f66); /* unpcklpd */
2173 o(0xc0 + REG_VALUE(vtop->r)*9);
2174 o(0x5a0f66); /* cvtpd2ps */
2175 o(0xc0 + REG_VALUE(vtop->r)*9);
2176 } else if (tbt == VT_LDOUBLE) {
2177 save_reg(RC_ST0);
2178 /* movsd %xmm0,-0x10(%rsp) */
2179 o(0x110ff2);
2180 o(0x44 + REG_VALUE(vtop->r)*8);
2181 o(0xf024);
2182 o(0xf02444dd); /* fldl -0x10(%rsp) */
2183 vtop->r = TREG_ST0;
2185 } else {
2186 int r;
2187 gv(RC_ST0);
2188 r = get_reg(RC_FLOAT);
2189 if (tbt == VT_DOUBLE) {
2190 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2191 /* movsd -0x10(%rsp),%xmm0 */
2192 o(0x100ff2);
2193 o(0x44 + REG_VALUE(r)*8);
2194 o(0xf024);
2195 vtop->r = r;
2196 } else if (tbt == VT_FLOAT) {
2197 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2198 /* movss -0x10(%rsp),%xmm0 */
2199 o(0x100ff3);
2200 o(0x44 + REG_VALUE(r)*8);
2201 o(0xf024);
2202 vtop->r = r;
2207 /* convert fp to int 't' type */
2208 void gen_cvt_ftoi(int t)
2210 int ft, bt, size, r;
2211 ft = vtop->type.t;
2212 bt = ft & VT_BTYPE;
2213 if (bt == VT_LDOUBLE) {
2214 gen_cvt_ftof(VT_DOUBLE);
2215 bt = VT_DOUBLE;
2218 gv(RC_FLOAT);
2219 if (t != VT_INT)
2220 size = 8;
2221 else
2222 size = 4;
2224 r = get_reg(RC_INT);
2225 if (bt == VT_FLOAT) {
2226 o(0xf3);
2227 } else if (bt == VT_DOUBLE) {
2228 o(0xf2);
2229 } else {
2230 assert(0);
2232 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2233 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2234 vtop->r = r;
2237 /* computed goto support */
2238 void ggoto(void)
2240 gcall_or_jmp(1);
2241 vtop--;
2244 /* Save the stack pointer onto the stack and return the location of its address */
2245 ST_FUNC void gen_vla_sp_save(int addr) {
2246 /* mov %rsp,addr(%rbp)*/
2247 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2250 /* Restore the SP from a location on the stack */
2251 ST_FUNC void gen_vla_sp_restore(int addr) {
2252 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2255 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2256 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2257 #ifdef TCC_TARGET_PE
2258 /* alloca does more than just adjust %rsp on Windows */
2259 vpush_global_sym(&func_old_type, TOK_alloca);
2260 vswap(); /* Move alloca ref past allocation size */
2261 gfunc_call(1);
2262 #else
2263 int r;
2264 r = gv(RC_INT); /* allocation size */
2265 /* sub r,%rsp */
2266 o(0x2b48);
2267 o(0xe0 | REG_VALUE(r));
2268 /* We align to 16 bytes rather than align */
2269 /* and ~15, %rsp */
2270 o(0xf0e48348);
2271 vpop();
2272 #endif
2276 /* end of x86-64 code generator */
2277 /*************************************************************/
2278 #endif /* ! TARGET_DEFS_ONLY */
2279 /******************************************************/