tccpp: allow "0x1e+1" in asm
[tinycc.git] / x86_64-gen.c
blob921813b5c8b426e8bc921a97e27c6aced0d44776
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 8
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
34 #define RC_RAX 0x0004
35 #define RC_RCX 0x0008
36 #define RC_RDX 0x0010
37 #define RC_ST0 0x0080 /* only for long double */
38 #define RC_R8 0x0100
39 #define RC_R9 0x0200
40 #define RC_R10 0x0400
41 #define RC_R11 0x0800
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
56 enum {
57 TREG_RAX = 0,
58 TREG_RCX = 1,
59 TREG_RDX = 2,
60 TREG_RSP = 4,
61 TREG_RSI = 6,
62 TREG_RDI = 7,
64 TREG_R8 = 8,
65 TREG_R9 = 9,
66 TREG_R10 = 10,
67 TREG_R11 = 11,
69 TREG_XMM0 = 16,
70 TREG_XMM1 = 17,
71 TREG_XMM2 = 18,
72 TREG_XMM3 = 19,
73 TREG_XMM4 = 20,
74 TREG_XMM5 = 21,
75 TREG_XMM6 = 22,
76 TREG_XMM7 = 23,
78 TREG_ST0 = 24,
80 TREG_MEM = 0x20
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
96 #define PTR_SIZE 8
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
102 #define MAX_ALIGN 16
104 /******************************************************/
105 /* ELF defines */
107 #define EM_TCC_TARGET EM_X86_64
109 /* relocation type for 32 bit data relocation */
110 #define R_DATA_32 R_X86_64_32
111 #define R_DATA_PTR R_X86_64_64
112 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
113 #define R_COPY R_X86_64_COPY
115 #define ELF_START_ADDR 0x400000
116 #define ELF_PAGE_SIZE 0x200000
118 /******************************************************/
119 #else /* ! TARGET_DEFS_ONLY */
120 /******************************************************/
121 #include "tcc.h"
122 #include <assert.h>
124 ST_DATA const int reg_classes[NB_REGS] = {
125 /* eax */ RC_INT | RC_RAX,
126 /* ecx */ RC_INT | RC_RCX,
127 /* edx */ RC_INT | RC_RDX,
133 RC_R8,
134 RC_R9,
135 RC_R10,
136 RC_R11,
141 /* xmm0 */ RC_FLOAT | RC_XMM0,
142 /* xmm1 */ RC_FLOAT | RC_XMM1,
143 /* xmm2 */ RC_FLOAT | RC_XMM2,
144 /* xmm3 */ RC_FLOAT | RC_XMM3,
145 /* xmm4 */ RC_FLOAT | RC_XMM4,
146 /* xmm5 */ RC_FLOAT | RC_XMM5,
147 /* xmm6 an xmm7 are included so gv() can be used on them,
148 but they are not tagged with RC_FLOAT because they are
149 callee saved on Windows */
150 RC_XMM6,
151 RC_XMM7,
152 /* st0 */ RC_ST0
155 static unsigned long func_sub_sp_offset;
156 static int func_ret_sub;
158 /* XXX: make it faster ? */
159 ST_FUNC void g(int c)
161 int ind1;
162 ind1 = ind + 1;
163 if (ind1 > cur_text_section->data_allocated)
164 section_realloc(cur_text_section, ind1);
165 cur_text_section->data[ind] = c;
166 ind = ind1;
169 ST_FUNC void o(unsigned int c)
171 while (c) {
172 g(c);
173 c = c >> 8;
177 ST_FUNC void gen_le16(int v)
179 g(v);
180 g(v >> 8);
183 ST_FUNC void gen_le32(int c)
185 g(c);
186 g(c >> 8);
187 g(c >> 16);
188 g(c >> 24);
191 ST_FUNC void gen_le64(int64_t c)
193 g(c);
194 g(c >> 8);
195 g(c >> 16);
196 g(c >> 24);
197 g(c >> 32);
198 g(c >> 40);
199 g(c >> 48);
200 g(c >> 56);
203 static void orex(int ll, int r, int r2, int b)
205 if ((r & VT_VALMASK) >= VT_CONST)
206 r = 0;
207 if ((r2 & VT_VALMASK) >= VT_CONST)
208 r2 = 0;
209 if (ll || REX_BASE(r) || REX_BASE(r2))
210 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
211 o(b);
214 /* output a symbol and patch all calls to it */
215 ST_FUNC void gsym_addr(int t, int a)
217 while (t) {
218 unsigned char *ptr = cur_text_section->data + t;
219 uint32_t n = read32le(ptr); /* next value */
220 write32le(ptr, a - t - 4);
221 t = n;
225 void gsym(int t)
227 gsym_addr(t, ind);
230 /* psym is used to put an instruction with a data field which is a
231 reference to a symbol. It is in fact the same as oad ! */
232 #define psym oad
234 static int is64_type(int t)
236 return ((t & VT_BTYPE) == VT_PTR ||
237 (t & VT_BTYPE) == VT_FUNC ||
238 (t & VT_BTYPE) == VT_LLONG);
241 /* instruction + 4 bytes data. Return the address of the data */
242 ST_FUNC int oad(int c, int s)
244 int ind1;
246 o(c);
247 ind1 = ind + 4;
248 if (ind1 > cur_text_section->data_allocated)
249 section_realloc(cur_text_section, ind1);
250 write32le(cur_text_section->data + ind, s);
251 s = ind;
252 ind = ind1;
253 return s;
256 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
258 if (r & VT_SYM)
259 greloca(cur_text_section, sym, ind, R_X86_64_32, c), c=0;
260 gen_le32(c);
263 /* output constant with relocation if 'r & VT_SYM' is true */
264 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
266 if (r & VT_SYM)
267 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
268 gen_le64(c);
271 /* output constant with relocation if 'r & VT_SYM' is true */
272 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
274 if (r & VT_SYM)
275 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
276 gen_le32(c-4);
279 /* output got address with relocation */
280 static void gen_gotpcrel(int r, Sym *sym, int c)
282 #ifndef TCC_TARGET_PE
283 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
284 #else
285 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
286 get_tok_str(sym->v, NULL), c, r,
287 cur_text_section->data[ind-3],
288 cur_text_section->data[ind-2],
289 cur_text_section->data[ind-1]
291 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
292 #endif
293 gen_le32(0);
294 if (c) {
295 /* we use add c, %xxx for displacement */
296 orex(1, r, 0, 0x81);
297 o(0xc0 + REG_VALUE(r));
298 gen_le32(c);
302 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
304 op_reg = REG_VALUE(op_reg) << 3;
305 if ((r & VT_VALMASK) == VT_CONST) {
306 /* constant memory reference */
307 o(0x05 | op_reg);
308 if (is_got) {
309 gen_gotpcrel(r, sym, c);
310 } else {
311 gen_addrpc32(r, sym, c);
313 } else if ((r & VT_VALMASK) == VT_LOCAL) {
314 /* currently, we use only ebp as base */
315 if (c == (char)c) {
316 /* short reference */
317 o(0x45 | op_reg);
318 g(c);
319 } else {
320 oad(0x85 | op_reg, c);
322 } else if ((r & VT_VALMASK) >= TREG_MEM) {
323 if (c) {
324 g(0x80 | op_reg | REG_VALUE(r));
325 gen_le32(c);
326 } else {
327 g(0x00 | op_reg | REG_VALUE(r));
329 } else {
330 g(0x00 | op_reg | REG_VALUE(r));
334 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
335 opcode bits */
336 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
338 gen_modrm_impl(op_reg, r, sym, c, 0);
341 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
342 opcode bits */
343 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
345 int is_got;
346 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
347 orex(1, r, op_reg, opcode);
348 gen_modrm_impl(op_reg, r, sym, c, is_got);
352 /* load 'r' from value 'sv' */
353 void load(int r, SValue *sv)
355 int v, t, ft, fc, fr;
356 SValue v1;
358 #ifdef TCC_TARGET_PE
359 SValue v2;
360 sv = pe_getimport(sv, &v2);
361 #endif
363 fr = sv->r;
364 ft = sv->type.t & ~VT_DEFSIGN;
365 fc = sv->c.i;
367 ft &= ~(VT_VOLATILE | VT_CONSTANT);
369 #ifndef TCC_TARGET_PE
370 /* we use indirect access via got */
371 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
372 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
373 /* use the result register as a temporal register */
374 int tr = r | TREG_MEM;
375 if (is_float(ft)) {
376 /* we cannot use float registers as a temporal register */
377 tr = get_reg(RC_INT) | TREG_MEM;
379 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
381 /* load from the temporal register */
382 fr = tr | VT_LVAL;
384 #endif
386 v = fr & VT_VALMASK;
387 if (fr & VT_LVAL) {
388 int b, ll;
389 if (v == VT_LLOCAL) {
390 v1.type.t = VT_PTR;
391 v1.r = VT_LOCAL | VT_LVAL;
392 v1.c.i = fc;
393 fr = r;
394 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
395 fr = get_reg(RC_INT);
396 load(fr, &v1);
398 ll = 0;
399 if ((ft & VT_BTYPE) == VT_FLOAT) {
400 b = 0x6e0f66;
401 r = REG_VALUE(r); /* movd */
402 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
403 b = 0x7e0ff3; /* movq */
404 r = REG_VALUE(r);
405 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
406 b = 0xdb, r = 5; /* fldt */
407 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
408 b = 0xbe0f; /* movsbl */
409 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
410 b = 0xb60f; /* movzbl */
411 } else if ((ft & VT_TYPE) == VT_SHORT) {
412 b = 0xbf0f; /* movswl */
413 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
414 b = 0xb70f; /* movzwl */
415 } else {
416 assert(((ft & VT_BTYPE) == VT_INT) || ((ft & VT_BTYPE) == VT_LLONG)
417 || ((ft & VT_BTYPE) == VT_PTR) || ((ft & VT_BTYPE) == VT_ENUM)
418 || ((ft & VT_BTYPE) == VT_FUNC));
419 ll = is64_type(ft);
420 b = 0x8b;
422 if (ll) {
423 gen_modrm64(b, r, fr, sv->sym, fc);
424 } else {
425 orex(ll, fr, r, b);
426 gen_modrm(r, fr, sv->sym, fc);
428 } else {
429 if (v == VT_CONST) {
430 if (fr & VT_SYM) {
431 #ifdef TCC_TARGET_PE
432 orex(1,0,r,0x8d);
433 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
434 gen_addrpc32(fr, sv->sym, fc);
435 #else
436 if (sv->sym->type.t & VT_STATIC) {
437 orex(1,0,r,0x8d);
438 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
439 gen_addrpc32(fr, sv->sym, fc);
440 } else {
441 orex(1,0,r,0x8b);
442 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
443 gen_gotpcrel(r, sv->sym, fc);
445 #endif
446 } else if (is64_type(ft)) {
447 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
448 gen_le64(sv->c.i);
449 } else {
450 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
451 gen_le32(fc);
453 } else if (v == VT_LOCAL) {
454 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
455 gen_modrm(r, VT_LOCAL, sv->sym, fc);
456 } else if (v == VT_CMP) {
457 orex(0,r,0,0);
458 if ((fc & ~0x100) != TOK_NE)
459 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
460 else
461 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
462 if (fc & 0x100)
464 /* This was a float compare. If the parity bit is
465 set the result was unordered, meaning false for everything
466 except TOK_NE, and true for TOK_NE. */
467 fc &= ~0x100;
468 o(0x037a + (REX_BASE(r) << 8));
470 orex(0,r,0, 0x0f); /* setxx %br */
471 o(fc);
472 o(0xc0 + REG_VALUE(r));
473 } else if (v == VT_JMP || v == VT_JMPI) {
474 t = v & 1;
475 orex(0,r,0,0);
476 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
477 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
478 gsym(fc);
479 orex(0,r,0,0);
480 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
481 } else if (v != r) {
482 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
483 if (v == TREG_ST0) {
484 /* gen_cvt_ftof(VT_DOUBLE); */
485 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
486 /* movsd -0x10(%rsp),%xmmN */
487 o(0x100ff2);
488 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
489 o(0xf024);
490 } else {
491 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
492 if ((ft & VT_BTYPE) == VT_FLOAT) {
493 o(0x100ff3);
494 } else {
495 assert((ft & VT_BTYPE) == VT_DOUBLE);
496 o(0x100ff2);
498 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
500 } else if (r == TREG_ST0) {
501 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
502 /* gen_cvt_ftof(VT_LDOUBLE); */
503 /* movsd %xmmN,-0x10(%rsp) */
504 o(0x110ff2);
505 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
506 o(0xf024);
507 o(0xf02444dd); /* fldl -0x10(%rsp) */
508 } else {
509 orex(1,r,v, 0x89);
510 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
516 /* store register 'r' in lvalue 'v' */
517 void store(int r, SValue *v)
519 int fr, bt, ft, fc;
520 int op64 = 0;
521 /* store the REX prefix in this variable when PIC is enabled */
522 int pic = 0;
524 #ifdef TCC_TARGET_PE
525 SValue v2;
526 v = pe_getimport(v, &v2);
527 #endif
529 ft = v->type.t;
530 fc = v->c.i;
531 fr = v->r & VT_VALMASK;
532 ft &= ~(VT_VOLATILE | VT_CONSTANT);
533 bt = ft & VT_BTYPE;
535 #ifndef TCC_TARGET_PE
536 /* we need to access the variable via got */
537 if (fr == VT_CONST && (v->r & VT_SYM)) {
538 /* mov xx(%rip), %r11 */
539 o(0x1d8b4c);
540 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
541 pic = is64_type(bt) ? 0x49 : 0x41;
543 #endif
545 /* XXX: incorrect if float reg to reg */
546 if (bt == VT_FLOAT) {
547 o(0x66);
548 o(pic);
549 o(0x7e0f); /* movd */
550 r = REG_VALUE(r);
551 } else if (bt == VT_DOUBLE) {
552 o(0x66);
553 o(pic);
554 o(0xd60f); /* movq */
555 r = REG_VALUE(r);
556 } else if (bt == VT_LDOUBLE) {
557 o(0xc0d9); /* fld %st(0) */
558 o(pic);
559 o(0xdb); /* fstpt */
560 r = 7;
561 } else {
562 if (bt == VT_SHORT)
563 o(0x66);
564 o(pic);
565 if (bt == VT_BYTE || bt == VT_BOOL)
566 orex(0, 0, r, 0x88);
567 else if (is64_type(bt))
568 op64 = 0x89;
569 else
570 orex(0, 0, r, 0x89);
572 if (pic) {
573 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
574 if (op64)
575 o(op64);
576 o(3 + (r << 3));
577 } else if (op64) {
578 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
579 gen_modrm64(op64, r, v->r, v->sym, fc);
580 } else if (fr != r) {
581 /* XXX: don't we really come here? */
582 abort();
583 o(0xc0 + fr + r * 8); /* mov r, fr */
585 } else {
586 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
587 gen_modrm(r, v->r, v->sym, fc);
588 } else if (fr != r) {
589 /* XXX: don't we really come here? */
590 abort();
591 o(0xc0 + fr + r * 8); /* mov r, fr */
596 /* 'is_jmp' is '1' if it is a jump */
597 static void gcall_or_jmp(int is_jmp)
599 int r;
600 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
601 ((vtop->r & VT_SYM) || (vtop->c.i-4) == (int)(vtop->c.i-4))) {
602 /* constant case */
603 if (vtop->r & VT_SYM) {
604 /* relocation case */
605 #ifdef TCC_TARGET_PE
606 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
607 #else
608 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
609 #endif
610 } else {
611 /* put an empty PC32 relocation */
612 put_elf_reloca(symtab_section, cur_text_section,
613 ind + 1, R_X86_64_PC32, 0, (int)(vtop->c.i-4));
615 oad(0xe8 + is_jmp, 0); /* call/jmp im */
616 } else {
617 /* otherwise, indirect call */
618 r = TREG_R11;
619 load(r, vtop);
620 o(0x41); /* REX */
621 o(0xff); /* call/jmp *r */
622 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
626 #if defined(CONFIG_TCC_BCHECK)
627 #ifndef TCC_TARGET_PE
628 static addr_t func_bound_offset;
629 static unsigned long func_bound_ind;
630 #endif
632 static void gen_static_call(int v)
634 Sym *sym = external_global_sym(v, &func_old_type, 0);
635 oad(0xe8, 0);
636 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
639 /* generate a bounded pointer addition */
640 ST_FUNC void gen_bounded_ptr_add(void)
642 /* save all temporary registers */
643 save_regs(0);
645 /* prepare fast x86_64 function call */
646 gv(RC_RAX);
647 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
648 vtop--;
650 gv(RC_RAX);
651 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
652 vtop--;
654 /* do a fast function call */
655 gen_static_call(TOK___bound_ptr_add);
657 /* returned pointer is in rax */
658 vtop++;
659 vtop->r = TREG_RAX | VT_BOUNDED;
662 /* relocation offset of the bounding function call point */
663 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
666 /* patch pointer addition in vtop so that pointer dereferencing is
667 also tested */
668 ST_FUNC void gen_bounded_ptr_deref(void)
670 addr_t func;
671 int size, align;
672 ElfW(Rela) *rel;
673 Sym *sym;
675 size = 0;
676 /* XXX: put that code in generic part of tcc */
677 if (!is_float(vtop->type.t)) {
678 if (vtop->r & VT_LVAL_BYTE)
679 size = 1;
680 else if (vtop->r & VT_LVAL_SHORT)
681 size = 2;
683 if (!size)
684 size = type_size(&vtop->type, &align);
685 switch(size) {
686 case 1: func = TOK___bound_ptr_indir1; break;
687 case 2: func = TOK___bound_ptr_indir2; break;
688 case 4: func = TOK___bound_ptr_indir4; break;
689 case 8: func = TOK___bound_ptr_indir8; break;
690 case 12: func = TOK___bound_ptr_indir12; break;
691 case 16: func = TOK___bound_ptr_indir16; break;
692 default:
693 tcc_error("unhandled size when dereferencing bounded pointer");
694 func = 0;
695 break;
698 sym = external_global_sym(func, &func_old_type, 0);
699 if (!sym->c)
700 put_extern_sym(sym, NULL, 0, 0);
702 /* patch relocation */
703 /* XXX: find a better solution ? */
705 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
706 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
708 #endif
710 #ifdef TCC_TARGET_PE
712 #define REGN 4
713 static const uint8_t arg_regs[REGN] = {
714 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
717 /* Prepare arguments in R10 and R11 rather than RCX and RDX
718 because gv() will not ever use these */
719 static int arg_prepare_reg(int idx) {
720 if (idx == 0 || idx == 1)
721 /* idx=0: r10, idx=1: r11 */
722 return idx + 10;
723 else
724 return arg_regs[idx];
727 static int func_scratch;
729 /* Generate function call. The function address is pushed first, then
730 all the parameters in call order. This functions pops all the
731 parameters and the function address. */
733 void gen_offs_sp(int b, int r, int d)
735 orex(1,0,r & 0x100 ? 0 : r, b);
736 if (d == (char)d) {
737 o(0x2444 | (REG_VALUE(r) << 3));
738 g(d);
739 } else {
740 o(0x2484 | (REG_VALUE(r) << 3));
741 gen_le32(d);
745 /* Return the number of registers needed to return the struct, or 0 if
746 returning via struct pointer. */
747 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
749 int size, align;
750 *regsize = 8;
751 *ret_align = 1; // Never have to re-align return values for x86-64
752 size = type_size(vt, &align);
753 ret->ref = NULL;
754 if (size > 8) {
755 return 0;
756 } else if (size > 4) {
757 ret->t = VT_LLONG;
758 return 1;
759 } else if (size > 2) {
760 ret->t = VT_INT;
761 return 1;
762 } else if (size > 1) {
763 ret->t = VT_SHORT;
764 return 1;
765 } else {
766 ret->t = VT_BYTE;
767 return 1;
771 static int is_sse_float(int t) {
772 int bt;
773 bt = t & VT_BTYPE;
774 return bt == VT_DOUBLE || bt == VT_FLOAT;
777 int gfunc_arg_size(CType *type) {
778 int align;
779 if (type->t & (VT_ARRAY|VT_BITFIELD))
780 return 8;
781 return type_size(type, &align);
784 void gfunc_call(int nb_args)
786 int size, r, args_size, i, d, bt, struct_size;
787 int arg;
789 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
790 arg = nb_args;
792 /* for struct arguments, we need to call memcpy and the function
793 call breaks register passing arguments we are preparing.
794 So, we process arguments which will be passed by stack first. */
795 struct_size = args_size;
796 for(i = 0; i < nb_args; i++) {
797 SValue *sv;
799 --arg;
800 sv = &vtop[-i];
801 bt = (sv->type.t & VT_BTYPE);
802 size = gfunc_arg_size(&sv->type);
804 if (size <= 8)
805 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
807 if (bt == VT_STRUCT) {
808 /* align to stack align size */
809 size = (size + 15) & ~15;
810 /* generate structure store */
811 r = get_reg(RC_INT);
812 gen_offs_sp(0x8d, r, struct_size);
813 struct_size += size;
815 /* generate memcpy call */
816 vset(&sv->type, r | VT_LVAL, 0);
817 vpushv(sv);
818 vstore();
819 --vtop;
820 } else if (bt == VT_LDOUBLE) {
821 gv(RC_ST0);
822 gen_offs_sp(0xdb, 0x107, struct_size);
823 struct_size += 16;
827 if (func_scratch < struct_size)
828 func_scratch = struct_size;
830 arg = nb_args;
831 struct_size = args_size;
833 for(i = 0; i < nb_args; i++) {
834 --arg;
835 bt = (vtop->type.t & VT_BTYPE);
837 size = gfunc_arg_size(&vtop->type);
838 if (size > 8) {
839 /* align to stack align size */
840 size = (size + 15) & ~15;
841 if (arg >= REGN) {
842 d = get_reg(RC_INT);
843 gen_offs_sp(0x8d, d, struct_size);
844 gen_offs_sp(0x89, d, arg*8);
845 } else {
846 d = arg_prepare_reg(arg);
847 gen_offs_sp(0x8d, d, struct_size);
849 struct_size += size;
850 } else {
851 if (is_sse_float(vtop->type.t)) {
852 gv(RC_XMM0); /* only use one float register */
853 if (arg >= REGN) {
854 /* movq %xmm0, j*8(%rsp) */
855 gen_offs_sp(0xd60f66, 0x100, arg*8);
856 } else {
857 /* movaps %xmm0, %xmmN */
858 o(0x280f);
859 o(0xc0 + (arg << 3));
860 d = arg_prepare_reg(arg);
861 /* mov %xmm0, %rxx */
862 o(0x66);
863 orex(1,d,0, 0x7e0f);
864 o(0xc0 + REG_VALUE(d));
866 } else {
867 if (bt == VT_STRUCT) {
868 vtop->type.ref = NULL;
869 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
870 : size > 1 ? VT_SHORT : VT_BYTE;
873 r = gv(RC_INT);
874 if (arg >= REGN) {
875 gen_offs_sp(0x89, r, arg*8);
876 } else {
877 d = arg_prepare_reg(arg);
878 orex(1,d,r,0x89); /* mov */
879 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
883 vtop--;
885 save_regs(0);
887 /* Copy R10 and R11 into RCX and RDX, respectively */
888 if (nb_args > 0) {
889 o(0xd1894c); /* mov %r10, %rcx */
890 if (nb_args > 1) {
891 o(0xda894c); /* mov %r11, %rdx */
895 gcall_or_jmp(0);
896 vtop--;
900 #define FUNC_PROLOG_SIZE 11
902 /* generate function prolog of type 't' */
903 void gfunc_prolog(CType *func_type)
905 int addr, reg_param_index, bt, size;
906 Sym *sym;
907 CType *type;
909 func_ret_sub = 0;
910 func_scratch = 0;
911 loc = 0;
913 addr = PTR_SIZE * 2;
914 ind += FUNC_PROLOG_SIZE;
915 func_sub_sp_offset = ind;
916 reg_param_index = 0;
918 sym = func_type->ref;
920 /* if the function returns a structure, then add an
921 implicit pointer parameter */
922 func_vt = sym->type;
923 func_var = (sym->c == FUNC_ELLIPSIS);
924 size = gfunc_arg_size(&func_vt);
925 if (size > 8) {
926 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
927 func_vc = addr;
928 reg_param_index++;
929 addr += 8;
932 /* define parameters */
933 while ((sym = sym->next) != NULL) {
934 type = &sym->type;
935 bt = type->t & VT_BTYPE;
936 size = gfunc_arg_size(type);
937 if (size > 8) {
938 if (reg_param_index < REGN) {
939 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
941 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL | VT_REF, addr);
942 } else {
943 if (reg_param_index < REGN) {
944 /* save arguments passed by register */
945 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
946 o(0xd60f66); /* movq */
947 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
948 } else {
949 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
952 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
954 addr += 8;
955 reg_param_index++;
958 while (reg_param_index < REGN) {
959 if (func_type->ref->c == FUNC_ELLIPSIS) {
960 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
961 addr += 8;
963 reg_param_index++;
967 /* generate function epilog */
968 void gfunc_epilog(void)
970 int v, saved_ind;
972 o(0xc9); /* leave */
973 if (func_ret_sub == 0) {
974 o(0xc3); /* ret */
975 } else {
976 o(0xc2); /* ret n */
977 g(func_ret_sub);
978 g(func_ret_sub >> 8);
981 saved_ind = ind;
982 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
983 /* align local size to word & save local variables */
984 v = (func_scratch + -loc + 15) & -16;
986 if (v >= 4096) {
987 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
988 oad(0xb8, v); /* mov stacksize, %eax */
989 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
990 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
991 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
992 } else {
993 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
994 o(0xec8148); /* sub rsp, stacksize */
995 gen_le32(v);
998 cur_text_section->data_offset = saved_ind;
999 pe_add_unwind_data(ind, saved_ind, v);
1000 ind = cur_text_section->data_offset;
1003 #else
1005 static void gadd_sp(int val)
1007 if (val == (char)val) {
1008 o(0xc48348);
1009 g(val);
1010 } else {
1011 oad(0xc48148, val); /* add $xxx, %rsp */
1015 typedef enum X86_64_Mode {
1016 x86_64_mode_none,
1017 x86_64_mode_memory,
1018 x86_64_mode_integer,
1019 x86_64_mode_sse,
1020 x86_64_mode_x87
1021 } X86_64_Mode;
1023 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1025 if (a == b)
1026 return a;
1027 else if (a == x86_64_mode_none)
1028 return b;
1029 else if (b == x86_64_mode_none)
1030 return a;
1031 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1032 return x86_64_mode_memory;
1033 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1034 return x86_64_mode_integer;
1035 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1036 return x86_64_mode_memory;
1037 else
1038 return x86_64_mode_sse;
1041 static X86_64_Mode classify_x86_64_inner(CType *ty)
1043 X86_64_Mode mode;
1044 Sym *f;
1046 switch (ty->t & VT_BTYPE) {
1047 case VT_VOID: return x86_64_mode_none;
1049 case VT_INT:
1050 case VT_BYTE:
1051 case VT_SHORT:
1052 case VT_LLONG:
1053 case VT_BOOL:
1054 case VT_PTR:
1055 case VT_FUNC:
1056 case VT_ENUM: return x86_64_mode_integer;
1058 case VT_FLOAT:
1059 case VT_DOUBLE: return x86_64_mode_sse;
1061 case VT_LDOUBLE: return x86_64_mode_x87;
1063 case VT_STRUCT:
1064 f = ty->ref;
1066 mode = x86_64_mode_none;
1067 for (f = f->next; f; f = f->next)
1068 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1070 return mode;
1073 assert(0);
1076 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1078 X86_64_Mode mode;
1079 int size, align, ret_t = 0;
1081 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1082 *psize = 8;
1083 *palign = 8;
1084 *reg_count = 1;
1085 ret_t = ty->t;
1086 mode = x86_64_mode_integer;
1087 } else {
1088 size = type_size(ty, &align);
1089 *psize = (size + 7) & ~7;
1090 *palign = (align + 7) & ~7;
1092 if (size > 16) {
1093 mode = x86_64_mode_memory;
1094 } else {
1095 mode = classify_x86_64_inner(ty);
1096 switch (mode) {
1097 case x86_64_mode_integer:
1098 if (size > 8) {
1099 *reg_count = 2;
1100 ret_t = VT_QLONG;
1101 } else {
1102 *reg_count = 1;
1103 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1105 break;
1107 case x86_64_mode_x87:
1108 *reg_count = 1;
1109 ret_t = VT_LDOUBLE;
1110 break;
1112 case x86_64_mode_sse:
1113 if (size > 8) {
1114 *reg_count = 2;
1115 ret_t = VT_QFLOAT;
1116 } else {
1117 *reg_count = 1;
1118 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1120 break;
1121 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1126 if (ret) {
1127 ret->ref = NULL;
1128 ret->t = ret_t;
1131 return mode;
1134 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1136 /* This definition must be synced with stdarg.h */
1137 enum __va_arg_type {
1138 __va_gen_reg, __va_float_reg, __va_stack
1140 int size, align, reg_count;
1141 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1142 switch (mode) {
1143 default: return __va_stack;
1144 case x86_64_mode_integer: return __va_gen_reg;
1145 case x86_64_mode_sse: return __va_float_reg;
1149 /* Return the number of registers needed to return the struct, or 0 if
1150 returning via struct pointer. */
1151 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1153 int size, align, reg_count;
1154 *ret_align = 1; // Never have to re-align return values for x86-64
1155 *regsize = 8;
1156 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1159 #define REGN 6
1160 static const uint8_t arg_regs[REGN] = {
1161 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1164 static int arg_prepare_reg(int idx) {
1165 if (idx == 2 || idx == 3)
1166 /* idx=2: r10, idx=3: r11 */
1167 return idx + 8;
1168 else
1169 return arg_regs[idx];
1172 /* Generate function call. The function address is pushed first, then
1173 all the parameters in call order. This functions pops all the
1174 parameters and the function address. */
1175 void gfunc_call(int nb_args)
1177 X86_64_Mode mode;
1178 CType type;
1179 int size, align, r, args_size, stack_adjust, run_start, run_end, i, reg_count;
1180 int nb_reg_args = 0;
1181 int nb_sse_args = 0;
1182 int sse_reg, gen_reg;
1184 /* calculate the number of integer/float register arguments */
1185 for(i = 0; i < nb_args; i++) {
1186 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1187 if (mode == x86_64_mode_sse)
1188 nb_sse_args += reg_count;
1189 else if (mode == x86_64_mode_integer)
1190 nb_reg_args += reg_count;
1193 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1194 and ended by a 16-byte aligned argument. This is because, from the point of view of
1195 the callee, argument alignment is computed from the bottom up. */
1196 /* for struct arguments, we need to call memcpy and the function
1197 call breaks register passing arguments we are preparing.
1198 So, we process arguments which will be passed by stack first. */
1199 gen_reg = nb_reg_args;
1200 sse_reg = nb_sse_args;
1201 run_start = 0;
1202 args_size = 0;
1203 while (run_start != nb_args) {
1204 int run_gen_reg = gen_reg, run_sse_reg = sse_reg;
1206 run_end = nb_args;
1207 stack_adjust = 0;
1208 for(i = run_start; (i < nb_args) && (run_end == nb_args); i++) {
1209 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1210 switch (mode) {
1211 case x86_64_mode_memory:
1212 case x86_64_mode_x87:
1213 stack_arg:
1214 if (align == 16)
1215 run_end = i;
1216 else
1217 stack_adjust += size;
1218 break;
1220 case x86_64_mode_sse:
1221 sse_reg -= reg_count;
1222 if (sse_reg + reg_count > 8) goto stack_arg;
1223 break;
1225 case x86_64_mode_integer:
1226 gen_reg -= reg_count;
1227 if (gen_reg + reg_count > REGN) goto stack_arg;
1228 break;
1229 default: break; /* nothing to be done for x86_64_mode_none */
1233 gen_reg = run_gen_reg;
1234 sse_reg = run_sse_reg;
1236 /* adjust stack to align SSE boundary */
1237 if (stack_adjust &= 15) {
1238 /* fetch cpu flag before the following sub will change the value */
1239 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1240 gv(RC_INT);
1242 stack_adjust = 16 - stack_adjust;
1243 o(0x48);
1244 oad(0xec81, stack_adjust); /* sub $xxx, %rsp */
1245 args_size += stack_adjust;
1248 for(i = run_start; i < run_end;) {
1249 /* Swap argument to top, it will possibly be changed here,
1250 and might use more temps. At the end of the loop we keep
1251 in on the stack and swap it back to its original position
1252 if it is a register. */
1253 SValue tmp = vtop[0];
1254 int arg_stored = 1;
1256 vtop[0] = vtop[-i];
1257 vtop[-i] = tmp;
1258 mode = classify_x86_64_arg(&vtop->type, NULL, &size, &align, &reg_count);
1260 switch (vtop->type.t & VT_BTYPE) {
1261 case VT_STRUCT:
1262 if (mode == x86_64_mode_sse) {
1263 if (sse_reg > 8)
1264 sse_reg -= reg_count;
1265 else
1266 arg_stored = 0;
1267 } else if (mode == x86_64_mode_integer) {
1268 if (gen_reg > REGN)
1269 gen_reg -= reg_count;
1270 else
1271 arg_stored = 0;
1274 if (arg_stored) {
1275 /* allocate the necessary size on stack */
1276 o(0x48);
1277 oad(0xec81, size); /* sub $xxx, %rsp */
1278 /* generate structure store */
1279 r = get_reg(RC_INT);
1280 orex(1, r, 0, 0x89); /* mov %rsp, r */
1281 o(0xe0 + REG_VALUE(r));
1282 vset(&vtop->type, r | VT_LVAL, 0);
1283 vswap();
1284 vstore();
1285 args_size += size;
1287 break;
1289 case VT_LDOUBLE:
1290 assert(0);
1291 break;
1293 case VT_FLOAT:
1294 case VT_DOUBLE:
1295 assert(mode == x86_64_mode_sse);
1296 if (sse_reg > 8) {
1297 --sse_reg;
1298 r = gv(RC_FLOAT);
1299 o(0x50); /* push $rax */
1300 /* movq %xmmN, (%rsp) */
1301 o(0xd60f66);
1302 o(0x04 + REG_VALUE(r)*8);
1303 o(0x24);
1304 args_size += size;
1305 } else {
1306 arg_stored = 0;
1308 break;
1310 default:
1311 assert(mode == x86_64_mode_integer);
1312 /* simple type */
1313 /* XXX: implicit cast ? */
1314 if (gen_reg > REGN) {
1315 --gen_reg;
1316 r = gv(RC_INT);
1317 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1318 args_size += size;
1319 } else {
1320 arg_stored = 0;
1322 break;
1325 /* And swap the argument back to it's original position. */
1326 tmp = vtop[0];
1327 vtop[0] = vtop[-i];
1328 vtop[-i] = tmp;
1330 if (arg_stored) {
1331 vrotb(i+1);
1332 assert((vtop->type.t == tmp.type.t) && (vtop->r == tmp.r));
1333 vpop();
1334 --nb_args;
1335 --run_end;
1336 } else {
1337 ++i;
1341 /* handle 16 byte aligned arguments at end of run */
1342 run_start = i = run_end;
1343 while (i < nb_args) {
1344 /* Rotate argument to top since it will always be popped */
1345 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1346 if (align != 16)
1347 break;
1349 vrotb(i+1);
1351 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1352 gv(RC_ST0);
1353 oad(0xec8148, size); /* sub $xxx, %rsp */
1354 o(0x7cdb); /* fstpt 0(%rsp) */
1355 g(0x24);
1356 g(0x00);
1357 args_size += size;
1358 } else {
1359 assert(mode == x86_64_mode_memory);
1361 /* allocate the necessary size on stack */
1362 o(0x48);
1363 oad(0xec81, size); /* sub $xxx, %rsp */
1364 /* generate structure store */
1365 r = get_reg(RC_INT);
1366 orex(1, r, 0, 0x89); /* mov %rsp, r */
1367 o(0xe0 + REG_VALUE(r));
1368 vset(&vtop->type, r | VT_LVAL, 0);
1369 vswap();
1370 vstore();
1371 args_size += size;
1374 vpop();
1375 --nb_args;
1379 /* XXX This should be superfluous. */
1380 save_regs(0); /* save used temporary registers */
1382 /* then, we prepare register passing arguments.
1383 Note that we cannot set RDX and RCX in this loop because gv()
1384 may break these temporary registers. Let's use R10 and R11
1385 instead of them */
1386 assert(gen_reg <= REGN);
1387 assert(sse_reg <= 8);
1388 for(i = 0; i < nb_args; i++) {
1389 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1390 /* Alter stack entry type so that gv() knows how to treat it */
1391 vtop->type = type;
1392 if (mode == x86_64_mode_sse) {
1393 if (reg_count == 2) {
1394 sse_reg -= 2;
1395 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1396 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1397 /* movaps %xmm0, %xmmN */
1398 o(0x280f);
1399 o(0xc0 + (sse_reg << 3));
1400 /* movaps %xmm1, %xmmN */
1401 o(0x280f);
1402 o(0xc1 + ((sse_reg+1) << 3));
1404 } else {
1405 assert(reg_count == 1);
1406 --sse_reg;
1407 /* Load directly to register */
1408 gv(RC_XMM0 << sse_reg);
1410 } else if (mode == x86_64_mode_integer) {
1411 /* simple type */
1412 /* XXX: implicit cast ? */
1413 int d;
1414 gen_reg -= reg_count;
1415 r = gv(RC_INT);
1416 d = arg_prepare_reg(gen_reg);
1417 orex(1,d,r,0x89); /* mov */
1418 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1419 if (reg_count == 2) {
1420 d = arg_prepare_reg(gen_reg+1);
1421 orex(1,d,vtop->r2,0x89); /* mov */
1422 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1425 vtop--;
1427 assert(gen_reg == 0);
1428 assert(sse_reg == 0);
1430 /* We shouldn't have many operands on the stack anymore, but the
1431 call address itself is still there, and it might be in %eax
1432 (or edx/ecx) currently, which the below writes would clobber.
1433 So evict all remaining operands here. */
1434 save_regs(0);
1436 /* Copy R10 and R11 into RDX and RCX, respectively */
1437 if (nb_reg_args > 2) {
1438 o(0xd2894c); /* mov %r10, %rdx */
1439 if (nb_reg_args > 3) {
1440 o(0xd9894c); /* mov %r11, %rcx */
1444 if (vtop->type.ref->c != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1445 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1446 gcall_or_jmp(0);
1447 if (args_size)
1448 gadd_sp(args_size);
1449 vtop--;
1453 #define FUNC_PROLOG_SIZE 11
1455 static void push_arg_reg(int i) {
1456 loc -= 8;
1457 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1460 /* generate function prolog of type 't' */
1461 void gfunc_prolog(CType *func_type)
1463 X86_64_Mode mode;
1464 int i, addr, align, size, reg_count;
1465 int param_addr = 0, reg_param_index, sse_param_index;
1466 Sym *sym;
1467 CType *type;
1469 sym = func_type->ref;
1470 addr = PTR_SIZE * 2;
1471 loc = 0;
1472 ind += FUNC_PROLOG_SIZE;
1473 func_sub_sp_offset = ind;
1474 func_ret_sub = 0;
1476 if (func_type->ref->c == FUNC_ELLIPSIS) {
1477 int seen_reg_num, seen_sse_num, seen_stack_size;
1478 seen_reg_num = seen_sse_num = 0;
1479 /* frame pointer and return address */
1480 seen_stack_size = PTR_SIZE * 2;
1481 /* count the number of seen parameters */
1482 sym = func_type->ref;
1483 while ((sym = sym->next) != NULL) {
1484 type = &sym->type;
1485 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1486 switch (mode) {
1487 default:
1488 stack_arg:
1489 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1490 break;
1492 case x86_64_mode_integer:
1493 if (seen_reg_num + reg_count <= 8) {
1494 seen_reg_num += reg_count;
1495 } else {
1496 seen_reg_num = 8;
1497 goto stack_arg;
1499 break;
1501 case x86_64_mode_sse:
1502 if (seen_sse_num + reg_count <= 8) {
1503 seen_sse_num += reg_count;
1504 } else {
1505 seen_sse_num = 8;
1506 goto stack_arg;
1508 break;
1512 loc -= 16;
1513 /* movl $0x????????, -0x10(%rbp) */
1514 o(0xf045c7);
1515 gen_le32(seen_reg_num * 8);
1516 /* movl $0x????????, -0xc(%rbp) */
1517 o(0xf445c7);
1518 gen_le32(seen_sse_num * 16 + 48);
1519 /* movl $0x????????, -0x8(%rbp) */
1520 o(0xf845c7);
1521 gen_le32(seen_stack_size);
1523 /* save all register passing arguments */
1524 for (i = 0; i < 8; i++) {
1525 loc -= 16;
1526 o(0xd60f66); /* movq */
1527 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1528 /* movq $0, loc+8(%rbp) */
1529 o(0x85c748);
1530 gen_le32(loc + 8);
1531 gen_le32(0);
1533 for (i = 0; i < REGN; i++) {
1534 push_arg_reg(REGN-1-i);
1538 sym = func_type->ref;
1539 reg_param_index = 0;
1540 sse_param_index = 0;
1542 /* if the function returns a structure, then add an
1543 implicit pointer parameter */
1544 func_vt = sym->type;
1545 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1546 if (mode == x86_64_mode_memory) {
1547 push_arg_reg(reg_param_index);
1548 func_vc = loc;
1549 reg_param_index++;
1551 /* define parameters */
1552 while ((sym = sym->next) != NULL) {
1553 type = &sym->type;
1554 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1555 switch (mode) {
1556 case x86_64_mode_sse:
1557 if (sse_param_index + reg_count <= 8) {
1558 /* save arguments passed by register */
1559 loc -= reg_count * 8;
1560 param_addr = loc;
1561 for (i = 0; i < reg_count; ++i) {
1562 o(0xd60f66); /* movq */
1563 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1564 ++sse_param_index;
1566 } else {
1567 addr = (addr + align - 1) & -align;
1568 param_addr = addr;
1569 addr += size;
1571 break;
1573 case x86_64_mode_memory:
1574 case x86_64_mode_x87:
1575 addr = (addr + align - 1) & -align;
1576 param_addr = addr;
1577 addr += size;
1578 break;
1580 case x86_64_mode_integer: {
1581 if (reg_param_index + reg_count <= REGN) {
1582 /* save arguments passed by register */
1583 loc -= reg_count * 8;
1584 param_addr = loc;
1585 for (i = 0; i < reg_count; ++i) {
1586 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1587 ++reg_param_index;
1589 } else {
1590 addr = (addr + align - 1) & -align;
1591 param_addr = addr;
1592 addr += size;
1594 break;
1596 default: break; /* nothing to be done for x86_64_mode_none */
1598 sym_push(sym->v & ~SYM_FIELD, type,
1599 VT_LOCAL | VT_LVAL, param_addr);
1602 #ifdef CONFIG_TCC_BCHECK
1603 /* leave some room for bound checking code */
1604 if (tcc_state->do_bounds_check) {
1605 func_bound_offset = lbounds_section->data_offset;
1606 func_bound_ind = ind;
1607 oad(0xb8, 0); /* lbound section pointer */
1608 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1609 oad(0xb8, 0); /* call to function */
1611 #endif
1614 /* generate function epilog */
1615 void gfunc_epilog(void)
1617 int v, saved_ind;
1619 #ifdef CONFIG_TCC_BCHECK
1620 if (tcc_state->do_bounds_check
1621 && func_bound_offset != lbounds_section->data_offset)
1623 addr_t saved_ind;
1624 addr_t *bounds_ptr;
1625 Sym *sym_data;
1627 /* add end of table info */
1628 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
1629 *bounds_ptr = 0;
1631 /* generate bound local allocation */
1632 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
1633 func_bound_offset, lbounds_section->data_offset);
1634 saved_ind = ind;
1635 ind = func_bound_ind;
1636 greloc(cur_text_section, sym_data, ind + 1, R_386_32);
1637 ind = ind + 5 + 3;
1638 gen_static_call(TOK___bound_local_new);
1639 ind = saved_ind;
1641 /* generate bound check local freeing */
1642 o(0x5250); /* save returned value, if any */
1643 greloc(cur_text_section, sym_data, ind + 1, R_386_32);
1644 oad(0xb8, 0); /* mov xxx, %rax */
1645 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1646 gen_static_call(TOK___bound_local_delete);
1647 o(0x585a); /* restore returned value, if any */
1649 #endif
1650 o(0xc9); /* leave */
1651 if (func_ret_sub == 0) {
1652 o(0xc3); /* ret */
1653 } else {
1654 o(0xc2); /* ret n */
1655 g(func_ret_sub);
1656 g(func_ret_sub >> 8);
1658 /* align local size to word & save local variables */
1659 v = (-loc + 15) & -16;
1660 saved_ind = ind;
1661 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1662 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1663 o(0xec8148); /* sub rsp, stacksize */
1664 gen_le32(v);
1665 ind = saved_ind;
1668 #endif /* not PE */
1670 /* generate a jump to a label */
1671 int gjmp(int t)
1673 return psym(0xe9, t);
1676 /* generate a jump to a fixed address */
1677 void gjmp_addr(int a)
1679 int r;
1680 r = a - ind - 2;
1681 if (r == (char)r) {
1682 g(0xeb);
1683 g(r);
1684 } else {
1685 oad(0xe9, a - ind - 5);
1689 ST_FUNC void gtst_addr(int inv, int a)
1691 inv ^= (vtop--)->c.i;
1692 a -= ind + 2;
1693 if (a == (char)a) {
1694 g(inv - 32);
1695 g(a);
1696 } else {
1697 g(0x0f);
1698 oad(inv - 16, a - 4);
1702 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1703 ST_FUNC int gtst(int inv, int t)
1705 int v = vtop->r & VT_VALMASK;
1706 if (v == VT_CMP) {
1707 /* fast case : can jump directly since flags are set */
1708 if (vtop->c.i & 0x100)
1710 /* This was a float compare. If the parity flag is set
1711 the result was unordered. For anything except != this
1712 means false and we don't jump (anding both conditions).
1713 For != this means true (oring both).
1714 Take care about inverting the test. We need to jump
1715 to our target if the result was unordered and test wasn't NE,
1716 otherwise if unordered we don't want to jump. */
1717 vtop->c.i &= ~0x100;
1718 if (inv == (vtop->c.i == TOK_NE))
1719 o(0x067a); /* jp +6 */
1720 else
1722 g(0x0f);
1723 t = psym(0x8a, t); /* jp t */
1726 g(0x0f);
1727 t = psym((vtop->c.i - 16) ^ inv, t);
1728 } else if (v == VT_JMP || v == VT_JMPI) {
1729 /* && or || optimization */
1730 if ((v & 1) == inv) {
1731 /* insert vtop->c jump list in t */
1732 uint32_t n1, n = vtop->c.i;
1733 if (n) {
1734 while ((n1 = read32le(cur_text_section->data + n)))
1735 n = n1;
1736 write32le(cur_text_section->data + n, t);
1737 t = vtop->c.i;
1739 } else {
1740 t = gjmp(t);
1741 gsym(vtop->c.i);
1744 vtop--;
1745 return t;
1748 /* generate an integer binary operation */
1749 void gen_opi(int op)
1751 int r, fr, opc, c;
1752 int ll, uu, cc;
1754 ll = is64_type(vtop[-1].type.t);
1755 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1756 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1758 switch(op) {
1759 case '+':
1760 case TOK_ADDC1: /* add with carry generation */
1761 opc = 0;
1762 gen_op8:
1763 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1764 /* constant case */
1765 vswap();
1766 r = gv(RC_INT);
1767 vswap();
1768 c = vtop->c.i;
1769 if (c == (char)c) {
1770 /* XXX: generate inc and dec for smaller code ? */
1771 orex(ll, r, 0, 0x83);
1772 o(0xc0 | (opc << 3) | REG_VALUE(r));
1773 g(c);
1774 } else {
1775 orex(ll, r, 0, 0x81);
1776 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1778 } else {
1779 gv2(RC_INT, RC_INT);
1780 r = vtop[-1].r;
1781 fr = vtop[0].r;
1782 orex(ll, r, fr, (opc << 3) | 0x01);
1783 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1785 vtop--;
1786 if (op >= TOK_ULT && op <= TOK_GT) {
1787 vtop->r = VT_CMP;
1788 vtop->c.i = op;
1790 break;
1791 case '-':
1792 case TOK_SUBC1: /* sub with carry generation */
1793 opc = 5;
1794 goto gen_op8;
1795 case TOK_ADDC2: /* add with carry use */
1796 opc = 2;
1797 goto gen_op8;
1798 case TOK_SUBC2: /* sub with carry use */
1799 opc = 3;
1800 goto gen_op8;
1801 case '&':
1802 opc = 4;
1803 goto gen_op8;
1804 case '^':
1805 opc = 6;
1806 goto gen_op8;
1807 case '|':
1808 opc = 1;
1809 goto gen_op8;
1810 case '*':
1811 gv2(RC_INT, RC_INT);
1812 r = vtop[-1].r;
1813 fr = vtop[0].r;
1814 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1815 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1816 vtop--;
1817 break;
1818 case TOK_SHL:
1819 opc = 4;
1820 goto gen_shift;
1821 case TOK_SHR:
1822 opc = 5;
1823 goto gen_shift;
1824 case TOK_SAR:
1825 opc = 7;
1826 gen_shift:
1827 opc = 0xc0 | (opc << 3);
1828 if (cc) {
1829 /* constant case */
1830 vswap();
1831 r = gv(RC_INT);
1832 vswap();
1833 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1834 o(opc | REG_VALUE(r));
1835 g(vtop->c.i & (ll ? 63 : 31));
1836 } else {
1837 /* we generate the shift in ecx */
1838 gv2(RC_INT, RC_RCX);
1839 r = vtop[-1].r;
1840 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1841 o(opc | REG_VALUE(r));
1843 vtop--;
1844 break;
1845 case TOK_UDIV:
1846 case TOK_UMOD:
1847 uu = 1;
1848 goto divmod;
1849 case '/':
1850 case '%':
1851 case TOK_PDIV:
1852 uu = 0;
1853 divmod:
1854 /* first operand must be in eax */
1855 /* XXX: need better constraint for second operand */
1856 gv2(RC_RAX, RC_RCX);
1857 r = vtop[-1].r;
1858 fr = vtop[0].r;
1859 vtop--;
1860 save_reg(TREG_RDX);
1861 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1862 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1863 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1864 if (op == '%' || op == TOK_UMOD)
1865 r = TREG_RDX;
1866 else
1867 r = TREG_RAX;
1868 vtop->r = r;
1869 break;
1870 default:
1871 opc = 7;
1872 goto gen_op8;
1876 void gen_opl(int op)
1878 gen_opi(op);
1881 /* generate a floating point operation 'v = t1 op t2' instruction. The
1882 two operands are guaranted to have the same floating point type */
1883 /* XXX: need to use ST1 too */
1884 void gen_opf(int op)
1886 int a, ft, fc, swapped, r;
1887 int float_type =
1888 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1890 /* convert constants to memory references */
1891 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1892 vswap();
1893 gv(float_type);
1894 vswap();
1896 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1897 gv(float_type);
1899 /* must put at least one value in the floating point register */
1900 if ((vtop[-1].r & VT_LVAL) &&
1901 (vtop[0].r & VT_LVAL)) {
1902 vswap();
1903 gv(float_type);
1904 vswap();
1906 swapped = 0;
1907 /* swap the stack if needed so that t1 is the register and t2 is
1908 the memory reference */
1909 if (vtop[-1].r & VT_LVAL) {
1910 vswap();
1911 swapped = 1;
1913 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1914 if (op >= TOK_ULT && op <= TOK_GT) {
1915 /* load on stack second operand */
1916 load(TREG_ST0, vtop);
1917 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1918 if (op == TOK_GE || op == TOK_GT)
1919 swapped = !swapped;
1920 else if (op == TOK_EQ || op == TOK_NE)
1921 swapped = 0;
1922 if (swapped)
1923 o(0xc9d9); /* fxch %st(1) */
1924 if (op == TOK_EQ || op == TOK_NE)
1925 o(0xe9da); /* fucompp */
1926 else
1927 o(0xd9de); /* fcompp */
1928 o(0xe0df); /* fnstsw %ax */
1929 if (op == TOK_EQ) {
1930 o(0x45e480); /* and $0x45, %ah */
1931 o(0x40fC80); /* cmp $0x40, %ah */
1932 } else if (op == TOK_NE) {
1933 o(0x45e480); /* and $0x45, %ah */
1934 o(0x40f480); /* xor $0x40, %ah */
1935 op = TOK_NE;
1936 } else if (op == TOK_GE || op == TOK_LE) {
1937 o(0x05c4f6); /* test $0x05, %ah */
1938 op = TOK_EQ;
1939 } else {
1940 o(0x45c4f6); /* test $0x45, %ah */
1941 op = TOK_EQ;
1943 vtop--;
1944 vtop->r = VT_CMP;
1945 vtop->c.i = op;
1946 } else {
1947 /* no memory reference possible for long double operations */
1948 load(TREG_ST0, vtop);
1949 swapped = !swapped;
1951 switch(op) {
1952 default:
1953 case '+':
1954 a = 0;
1955 break;
1956 case '-':
1957 a = 4;
1958 if (swapped)
1959 a++;
1960 break;
1961 case '*':
1962 a = 1;
1963 break;
1964 case '/':
1965 a = 6;
1966 if (swapped)
1967 a++;
1968 break;
1970 ft = vtop->type.t;
1971 fc = vtop->c.i;
1972 o(0xde); /* fxxxp %st, %st(1) */
1973 o(0xc1 + (a << 3));
1974 vtop--;
1976 } else {
1977 if (op >= TOK_ULT && op <= TOK_GT) {
1978 /* if saved lvalue, then we must reload it */
1979 r = vtop->r;
1980 fc = vtop->c.i;
1981 if ((r & VT_VALMASK) == VT_LLOCAL) {
1982 SValue v1;
1983 r = get_reg(RC_INT);
1984 v1.type.t = VT_PTR;
1985 v1.r = VT_LOCAL | VT_LVAL;
1986 v1.c.i = fc;
1987 load(r, &v1);
1988 fc = 0;
1991 if (op == TOK_EQ || op == TOK_NE) {
1992 swapped = 0;
1993 } else {
1994 if (op == TOK_LE || op == TOK_LT)
1995 swapped = !swapped;
1996 if (op == TOK_LE || op == TOK_GE) {
1997 op = 0x93; /* setae */
1998 } else {
1999 op = 0x97; /* seta */
2003 if (swapped) {
2004 gv(RC_FLOAT);
2005 vswap();
2007 assert(!(vtop[-1].r & VT_LVAL));
2009 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
2010 o(0x66);
2011 if (op == TOK_EQ || op == TOK_NE)
2012 o(0x2e0f); /* ucomisd */
2013 else
2014 o(0x2f0f); /* comisd */
2016 if (vtop->r & VT_LVAL) {
2017 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2018 } else {
2019 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2022 vtop--;
2023 vtop->r = VT_CMP;
2024 vtop->c.i = op | 0x100;
2025 } else {
2026 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
2027 switch(op) {
2028 default:
2029 case '+':
2030 a = 0;
2031 break;
2032 case '-':
2033 a = 4;
2034 break;
2035 case '*':
2036 a = 1;
2037 break;
2038 case '/':
2039 a = 6;
2040 break;
2042 ft = vtop->type.t;
2043 fc = vtop->c.i;
2044 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2046 r = vtop->r;
2047 /* if saved lvalue, then we must reload it */
2048 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2049 SValue v1;
2050 r = get_reg(RC_INT);
2051 v1.type.t = VT_PTR;
2052 v1.r = VT_LOCAL | VT_LVAL;
2053 v1.c.i = fc;
2054 load(r, &v1);
2055 fc = 0;
2058 assert(!(vtop[-1].r & VT_LVAL));
2059 if (swapped) {
2060 assert(vtop->r & VT_LVAL);
2061 gv(RC_FLOAT);
2062 vswap();
2065 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2066 o(0xf2);
2067 } else {
2068 o(0xf3);
2070 o(0x0f);
2071 o(0x58 + a);
2073 if (vtop->r & VT_LVAL) {
2074 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2075 } else {
2076 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2079 vtop--;
2084 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2085 and 'long long' cases. */
2086 void gen_cvt_itof(int t)
2088 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2089 save_reg(TREG_ST0);
2090 gv(RC_INT);
2091 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2092 /* signed long long to float/double/long double (unsigned case
2093 is handled generically) */
2094 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2095 o(0x242cdf); /* fildll (%rsp) */
2096 o(0x08c48348); /* add $8, %rsp */
2097 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2098 (VT_INT | VT_UNSIGNED)) {
2099 /* unsigned int to float/double/long double */
2100 o(0x6a); /* push $0 */
2101 g(0x00);
2102 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2103 o(0x242cdf); /* fildll (%rsp) */
2104 o(0x10c48348); /* add $16, %rsp */
2105 } else {
2106 /* int to float/double/long double */
2107 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2108 o(0x2404db); /* fildl (%rsp) */
2109 o(0x08c48348); /* add $8, %rsp */
2111 vtop->r = TREG_ST0;
2112 } else {
2113 int r = get_reg(RC_FLOAT);
2114 gv(RC_INT);
2115 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2116 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2117 (VT_INT | VT_UNSIGNED) ||
2118 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2119 o(0x48); /* REX */
2121 o(0x2a0f);
2122 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2123 vtop->r = r;
2127 /* convert from one floating point type to another */
2128 void gen_cvt_ftof(int t)
2130 int ft, bt, tbt;
2132 ft = vtop->type.t;
2133 bt = ft & VT_BTYPE;
2134 tbt = t & VT_BTYPE;
2136 if (bt == VT_FLOAT) {
2137 gv(RC_FLOAT);
2138 if (tbt == VT_DOUBLE) {
2139 o(0x140f); /* unpcklps */
2140 o(0xc0 + REG_VALUE(vtop->r)*9);
2141 o(0x5a0f); /* cvtps2pd */
2142 o(0xc0 + REG_VALUE(vtop->r)*9);
2143 } else if (tbt == VT_LDOUBLE) {
2144 save_reg(RC_ST0);
2145 /* movss %xmm0,-0x10(%rsp) */
2146 o(0x110ff3);
2147 o(0x44 + REG_VALUE(vtop->r)*8);
2148 o(0xf024);
2149 o(0xf02444d9); /* flds -0x10(%rsp) */
2150 vtop->r = TREG_ST0;
2152 } else if (bt == VT_DOUBLE) {
2153 gv(RC_FLOAT);
2154 if (tbt == VT_FLOAT) {
2155 o(0x140f66); /* unpcklpd */
2156 o(0xc0 + REG_VALUE(vtop->r)*9);
2157 o(0x5a0f66); /* cvtpd2ps */
2158 o(0xc0 + REG_VALUE(vtop->r)*9);
2159 } else if (tbt == VT_LDOUBLE) {
2160 save_reg(RC_ST0);
2161 /* movsd %xmm0,-0x10(%rsp) */
2162 o(0x110ff2);
2163 o(0x44 + REG_VALUE(vtop->r)*8);
2164 o(0xf024);
2165 o(0xf02444dd); /* fldl -0x10(%rsp) */
2166 vtop->r = TREG_ST0;
2168 } else {
2169 int r;
2170 gv(RC_ST0);
2171 r = get_reg(RC_FLOAT);
2172 if (tbt == VT_DOUBLE) {
2173 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2174 /* movsd -0x10(%rsp),%xmm0 */
2175 o(0x100ff2);
2176 o(0x44 + REG_VALUE(r)*8);
2177 o(0xf024);
2178 vtop->r = r;
2179 } else if (tbt == VT_FLOAT) {
2180 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2181 /* movss -0x10(%rsp),%xmm0 */
2182 o(0x100ff3);
2183 o(0x44 + REG_VALUE(r)*8);
2184 o(0xf024);
2185 vtop->r = r;
2190 /* convert fp to int 't' type */
2191 void gen_cvt_ftoi(int t)
2193 int ft, bt, size, r;
2194 ft = vtop->type.t;
2195 bt = ft & VT_BTYPE;
2196 if (bt == VT_LDOUBLE) {
2197 gen_cvt_ftof(VT_DOUBLE);
2198 bt = VT_DOUBLE;
2201 gv(RC_FLOAT);
2202 if (t != VT_INT)
2203 size = 8;
2204 else
2205 size = 4;
2207 r = get_reg(RC_INT);
2208 if (bt == VT_FLOAT) {
2209 o(0xf3);
2210 } else if (bt == VT_DOUBLE) {
2211 o(0xf2);
2212 } else {
2213 assert(0);
2215 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2216 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2217 vtop->r = r;
2220 /* computed goto support */
2221 void ggoto(void)
2223 gcall_or_jmp(1);
2224 vtop--;
2227 /* Save the stack pointer onto the stack and return the location of its address */
2228 ST_FUNC void gen_vla_sp_save(int addr) {
2229 /* mov %rsp,addr(%rbp)*/
2230 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2233 /* Restore the SP from a location on the stack */
2234 ST_FUNC void gen_vla_sp_restore(int addr) {
2235 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2238 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2239 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2240 #ifdef TCC_TARGET_PE
2241 /* alloca does more than just adjust %rsp on Windows */
2242 vpush_global_sym(&func_old_type, TOK_alloca);
2243 vswap(); /* Move alloca ref past allocation size */
2244 gfunc_call(1);
2245 #else
2246 int r;
2247 r = gv(RC_INT); /* allocation size */
2248 /* sub r,%rsp */
2249 o(0x2b48);
2250 o(0xe0 | REG_VALUE(r));
2251 /* We align to 16 bytes rather than align */
2252 /* and ~15, %rsp */
2253 o(0xf0e48348);
2254 vpop();
2255 #endif
2259 /* end of x86-64 code generator */
2260 /*************************************************************/
2261 #endif /* ! TARGET_DEFS_ONLY */
2262 /******************************************************/