allow c11 feature only when -std=c11 is use
[tinycc.git] / x86_64-gen.c
blobe39696659d9ac5c0fabdfa019c62376ece1d1cd8
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
35 #define RC_RAX 0x0004
36 #define RC_RCX 0x0008
37 #define RC_RDX 0x0010
38 #define RC_ST0 0x0080 /* only for long double */
39 #define RC_R8 0x0100
40 #define RC_R9 0x0200
41 #define RC_R10 0x0400
42 #define RC_R11 0x0800
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_LRET RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_QRET RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
57 enum {
58 TREG_RAX = 0,
59 TREG_RCX = 1,
60 TREG_RDX = 2,
61 TREG_RSP = 4,
62 TREG_RSI = 6,
63 TREG_RDI = 7,
65 TREG_R8 = 8,
66 TREG_R9 = 9,
67 TREG_R10 = 10,
68 TREG_R11 = 11,
70 TREG_XMM0 = 16,
71 TREG_XMM1 = 17,
72 TREG_XMM2 = 18,
73 TREG_XMM3 = 19,
74 TREG_XMM4 = 20,
75 TREG_XMM5 = 21,
76 TREG_XMM6 = 22,
77 TREG_XMM7 = 23,
79 TREG_ST0 = 24,
81 TREG_MEM = 0x20
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_LRET TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_QRET TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
97 #define PTR_SIZE 8
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
103 #define MAX_ALIGN 16
105 /******************************************************/
106 #else /* ! TARGET_DEFS_ONLY */
107 /******************************************************/
108 #include "tcc.h"
109 #include <assert.h>
111 ST_DATA const int reg_classes[NB_REGS] = {
112 /* eax */ RC_INT | RC_RAX,
113 /* ecx */ RC_INT | RC_RCX,
114 /* edx */ RC_INT | RC_RDX,
120 RC_R8,
121 RC_R9,
122 RC_R10,
123 RC_R11,
128 /* xmm0 */ RC_FLOAT | RC_XMM0,
129 /* xmm1 */ RC_FLOAT | RC_XMM1,
130 /* xmm2 */ RC_FLOAT | RC_XMM2,
131 /* xmm3 */ RC_FLOAT | RC_XMM3,
132 /* xmm4 */ RC_FLOAT | RC_XMM4,
133 /* xmm5 */ RC_FLOAT | RC_XMM5,
134 /* xmm6 an xmm7 are included so gv() can be used on them,
135 but they are not tagged with RC_FLOAT because they are
136 callee saved on Windows */
137 RC_XMM6,
138 RC_XMM7,
139 /* st0 */ RC_ST0
142 static unsigned long func_sub_sp_offset;
143 static int func_ret_sub;
145 /* XXX: make it faster ? */
146 ST_FUNC void g(int c)
148 int ind1;
149 if (nocode_wanted)
150 return;
151 ind1 = ind + 1;
152 if (ind1 > cur_text_section->data_allocated)
153 section_realloc(cur_text_section, ind1);
154 cur_text_section->data[ind] = c;
155 ind = ind1;
158 ST_FUNC void o(unsigned int c)
160 while (c) {
161 g(c);
162 c = c >> 8;
166 ST_FUNC void gen_le16(int v)
168 g(v);
169 g(v >> 8);
172 ST_FUNC void gen_le32(int c)
174 g(c);
175 g(c >> 8);
176 g(c >> 16);
177 g(c >> 24);
180 ST_FUNC void gen_le64(int64_t c)
182 g(c);
183 g(c >> 8);
184 g(c >> 16);
185 g(c >> 24);
186 g(c >> 32);
187 g(c >> 40);
188 g(c >> 48);
189 g(c >> 56);
192 static void orex(int ll, int r, int r2, int b)
194 if ((r & VT_VALMASK) >= VT_CONST)
195 r = 0;
196 if ((r2 & VT_VALMASK) >= VT_CONST)
197 r2 = 0;
198 if (ll || REX_BASE(r) || REX_BASE(r2))
199 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
200 o(b);
203 /* output a symbol and patch all calls to it */
204 ST_FUNC void gsym_addr(int t, int a)
206 while (t) {
207 unsigned char *ptr = cur_text_section->data + t;
208 uint32_t n = read32le(ptr); /* next value */
209 write32le(ptr, a - t - 4);
210 t = n;
214 void gsym(int t)
216 gsym_addr(t, ind);
220 static int is64_type(int t)
222 return ((t & VT_BTYPE) == VT_PTR ||
223 (t & VT_BTYPE) == VT_FUNC ||
224 (t & VT_BTYPE) == VT_LLONG);
227 /* instruction + 4 bytes data. Return the address of the data */
228 static int oad(int c, int s)
230 int t;
231 if (nocode_wanted)
232 return s;
233 o(c);
234 t = ind;
235 gen_le32(s);
236 return t;
239 /* generate jmp to a label */
240 #define gjmp2(instr,lbl) oad(instr,lbl)
242 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
244 if (r & VT_SYM)
245 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
246 gen_le32(c);
249 /* output constant with relocation if 'r & VT_SYM' is true */
250 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
252 if (r & VT_SYM)
253 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
254 gen_le64(c);
257 /* output constant with relocation if 'r & VT_SYM' is true */
258 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
260 if (r & VT_SYM)
261 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
262 gen_le32(c-4);
265 /* output got address with relocation */
266 static void gen_gotpcrel(int r, Sym *sym, int c)
268 #ifdef TCC_TARGET_PE
269 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
270 get_tok_str(sym->v, NULL), c, r,
271 cur_text_section->data[ind-3],
272 cur_text_section->data[ind-2],
273 cur_text_section->data[ind-1]
275 #endif
276 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
277 gen_le32(0);
278 if (c) {
279 /* we use add c, %xxx for displacement */
280 orex(1, r, 0, 0x81);
281 o(0xc0 + REG_VALUE(r));
282 gen_le32(c);
286 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
288 op_reg = REG_VALUE(op_reg) << 3;
289 if ((r & VT_VALMASK) == VT_CONST) {
290 /* constant memory reference */
291 if (!(r & VT_SYM)) {
292 /* Absolute memory reference */
293 o(0x04 | op_reg); /* [sib] | destreg */
294 oad(0x25, c); /* disp32 */
295 } else {
296 o(0x05 | op_reg); /* (%rip)+disp32 | destreg */
297 if (is_got) {
298 gen_gotpcrel(r, sym, c);
299 } else {
300 gen_addrpc32(r, sym, c);
303 } else if ((r & VT_VALMASK) == VT_LOCAL) {
304 /* currently, we use only ebp as base */
305 if (c == (char)c) {
306 /* short reference */
307 o(0x45 | op_reg);
308 g(c);
309 } else {
310 oad(0x85 | op_reg, c);
312 } else if ((r & VT_VALMASK) >= TREG_MEM) {
313 if (c) {
314 g(0x80 | op_reg | REG_VALUE(r));
315 gen_le32(c);
316 } else {
317 g(0x00 | op_reg | REG_VALUE(r));
319 } else {
320 g(0x00 | op_reg | REG_VALUE(r));
324 /* generate a modrm reference. 'op_reg' contains the additional 3
325 opcode bits */
326 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
328 gen_modrm_impl(op_reg, r, sym, c, 0);
331 /* generate a modrm reference. 'op_reg' contains the additional 3
332 opcode bits */
333 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
335 int is_got;
336 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
337 orex(1, r, op_reg, opcode);
338 gen_modrm_impl(op_reg, r, sym, c, is_got);
342 /* load 'r' from value 'sv' */
343 void load(int r, SValue *sv)
345 int v, t, ft, fc, fr;
346 SValue v1;
348 #ifdef TCC_TARGET_PE
349 SValue v2;
350 sv = pe_getimport(sv, &v2);
351 #endif
353 fr = sv->r;
354 ft = sv->type.t & ~VT_DEFSIGN;
355 fc = sv->c.i;
356 if (fc != sv->c.i && (fr & VT_SYM))
357 tcc_error("64 bit addend in load");
359 ft &= ~(VT_VOLATILE | VT_CONSTANT);
361 #ifndef TCC_TARGET_PE
362 /* we use indirect access via got */
363 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
364 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
365 /* use the result register as a temporal register */
366 int tr = r | TREG_MEM;
367 if (is_float(ft)) {
368 /* we cannot use float registers as a temporal register */
369 tr = get_reg(RC_INT) | TREG_MEM;
371 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
373 /* load from the temporal register */
374 fr = tr | VT_LVAL;
376 #endif
378 v = fr & VT_VALMASK;
379 if (fr & VT_LVAL) {
380 int b, ll;
381 if (v == VT_LLOCAL) {
382 v1.type.t = VT_PTR;
383 v1.r = VT_LOCAL | VT_LVAL;
384 v1.c.i = fc;
385 fr = r;
386 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
387 fr = get_reg(RC_INT);
388 load(fr, &v1);
390 if (fc != sv->c.i) {
391 /* If the addends doesn't fit into a 32bit signed
392 we must use a 64bit move. We've checked above
393 that this doesn't have a sym associated. */
394 v1.type.t = VT_LLONG;
395 v1.r = VT_CONST;
396 v1.c.i = sv->c.i;
397 fr = r;
398 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
399 fr = get_reg(RC_INT);
400 load(fr, &v1);
401 fc = 0;
403 ll = 0;
404 /* Like GCC we can load from small enough properly sized
405 structs and unions as well.
406 XXX maybe move to generic operand handling, but should
407 occur only with asm, so tccasm.c might also be a better place */
408 if ((ft & VT_BTYPE) == VT_STRUCT) {
409 int align;
410 switch (type_size(&sv->type, &align)) {
411 case 1: ft = VT_BYTE; break;
412 case 2: ft = VT_SHORT; break;
413 case 4: ft = VT_INT; break;
414 case 8: ft = VT_LLONG; break;
415 default:
416 tcc_error("invalid aggregate type for register load");
417 break;
420 if ((ft & VT_BTYPE) == VT_FLOAT) {
421 b = 0x6e0f66;
422 r = REG_VALUE(r); /* movd */
423 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
424 b = 0x7e0ff3; /* movq */
425 r = REG_VALUE(r);
426 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
427 b = 0xdb, r = 5; /* fldt */
428 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
429 b = 0xbe0f; /* movsbl */
430 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
431 b = 0xb60f; /* movzbl */
432 } else if ((ft & VT_TYPE) == VT_SHORT) {
433 b = 0xbf0f; /* movswl */
434 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
435 b = 0xb70f; /* movzwl */
436 } else {
437 assert(((ft & VT_BTYPE) == VT_INT)
438 || ((ft & VT_BTYPE) == VT_LLONG)
439 || ((ft & VT_BTYPE) == VT_PTR)
440 || ((ft & VT_BTYPE) == VT_FUNC)
442 ll = is64_type(ft);
443 b = 0x8b;
445 if (ll) {
446 gen_modrm64(b, r, fr, sv->sym, fc);
447 } else {
448 orex(ll, fr, r, b);
449 gen_modrm(r, fr, sv->sym, fc);
451 } else {
452 if (v == VT_CONST) {
453 if (fr & VT_SYM) {
454 #ifdef TCC_TARGET_PE
455 orex(1,0,r,0x8d);
456 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
457 gen_addrpc32(fr, sv->sym, fc);
458 #else
459 if (sv->sym->type.t & VT_STATIC) {
460 orex(1,0,r,0x8d);
461 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
462 gen_addrpc32(fr, sv->sym, fc);
463 } else {
464 orex(1,0,r,0x8b);
465 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
466 gen_gotpcrel(r, sv->sym, fc);
468 #endif
469 } else if (is64_type(ft)) {
470 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
471 gen_le64(sv->c.i);
472 } else {
473 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
474 gen_le32(fc);
476 } else if (v == VT_LOCAL) {
477 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
478 gen_modrm(r, VT_LOCAL, sv->sym, fc);
479 } else if (v == VT_CMP) {
480 orex(0,r,0,0);
481 if ((fc & ~0x100) != TOK_NE)
482 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
483 else
484 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
485 if (fc & 0x100)
487 /* This was a float compare. If the parity bit is
488 set the result was unordered, meaning false for everything
489 except TOK_NE, and true for TOK_NE. */
490 fc &= ~0x100;
491 o(0x037a + (REX_BASE(r) << 8));
493 orex(0,r,0, 0x0f); /* setxx %br */
494 o(fc);
495 o(0xc0 + REG_VALUE(r));
496 } else if (v == VT_JMP || v == VT_JMPI) {
497 t = v & 1;
498 orex(0,r,0,0);
499 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
500 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
501 gsym(fc);
502 orex(0,r,0,0);
503 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
504 } else if (v != r) {
505 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
506 if (v == TREG_ST0) {
507 /* gen_cvt_ftof(VT_DOUBLE); */
508 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
509 /* movsd -0x10(%rsp),%xmmN */
510 o(0x100ff2);
511 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
512 o(0xf024);
513 } else {
514 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
515 if ((ft & VT_BTYPE) == VT_FLOAT) {
516 o(0x100ff3);
517 } else {
518 assert((ft & VT_BTYPE) == VT_DOUBLE);
519 o(0x100ff2);
521 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
523 } else if (r == TREG_ST0) {
524 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
525 /* gen_cvt_ftof(VT_LDOUBLE); */
526 /* movsd %xmmN,-0x10(%rsp) */
527 o(0x110ff2);
528 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
529 o(0xf024);
530 o(0xf02444dd); /* fldl -0x10(%rsp) */
531 } else {
532 orex(1,r,v, 0x89);
533 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
539 /* store register 'r' in lvalue 'v' */
540 void store(int r, SValue *v)
542 int fr, bt, ft, fc;
543 int op64 = 0;
544 /* store the REX prefix in this variable when PIC is enabled */
545 int pic = 0;
547 #ifdef TCC_TARGET_PE
548 SValue v2;
549 v = pe_getimport(v, &v2);
550 #endif
552 fr = v->r & VT_VALMASK;
553 ft = v->type.t;
554 fc = v->c.i;
555 if (fc != v->c.i && (fr & VT_SYM))
556 tcc_error("64 bit addend in store");
557 ft &= ~(VT_VOLATILE | VT_CONSTANT);
558 bt = ft & VT_BTYPE;
560 #ifndef TCC_TARGET_PE
561 /* we need to access the variable via got */
562 if (fr == VT_CONST && (v->r & VT_SYM)) {
563 /* mov xx(%rip), %r11 */
564 o(0x1d8b4c);
565 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
566 pic = is64_type(bt) ? 0x49 : 0x41;
568 #endif
570 /* XXX: incorrect if float reg to reg */
571 if (bt == VT_FLOAT) {
572 o(0x66);
573 o(pic);
574 o(0x7e0f); /* movd */
575 r = REG_VALUE(r);
576 } else if (bt == VT_DOUBLE) {
577 o(0x66);
578 o(pic);
579 o(0xd60f); /* movq */
580 r = REG_VALUE(r);
581 } else if (bt == VT_LDOUBLE) {
582 o(0xc0d9); /* fld %st(0) */
583 o(pic);
584 o(0xdb); /* fstpt */
585 r = 7;
586 } else {
587 if (bt == VT_SHORT)
588 o(0x66);
589 o(pic);
590 if (bt == VT_BYTE || bt == VT_BOOL)
591 orex(0, 0, r, 0x88);
592 else if (is64_type(bt))
593 op64 = 0x89;
594 else
595 orex(0, 0, r, 0x89);
597 if (pic) {
598 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
599 if (op64)
600 o(op64);
601 o(3 + (r << 3));
602 } else if (op64) {
603 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
604 gen_modrm64(op64, r, v->r, v->sym, fc);
605 } else if (fr != r) {
606 /* XXX: don't we really come here? */
607 abort();
608 o(0xc0 + fr + r * 8); /* mov r, fr */
610 } else {
611 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
612 gen_modrm(r, v->r, v->sym, fc);
613 } else if (fr != r) {
614 /* XXX: don't we really come here? */
615 abort();
616 o(0xc0 + fr + r * 8); /* mov r, fr */
621 /* 'is_jmp' is '1' if it is a jump */
622 static void gcall_or_jmp(int is_jmp)
624 int r;
625 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
626 ((vtop->r & VT_SYM) && (vtop->c.i-4) == (int)(vtop->c.i-4))) {
627 /* constant symbolic case -> simple relocation */
628 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
629 oad(0xe8 + is_jmp, 0); /* call/jmp im */
630 } else {
631 /* otherwise, indirect call */
632 r = TREG_R11;
633 load(r, vtop);
634 o(0x41); /* REX */
635 o(0xff); /* call/jmp *r */
636 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
640 #if defined(CONFIG_TCC_BCHECK)
641 #ifndef TCC_TARGET_PE
642 static addr_t func_bound_offset;
643 static unsigned long func_bound_ind;
644 #endif
646 static void gen_static_call(int v)
648 Sym *sym = external_global_sym(v, &func_old_type, 0);
649 oad(0xe8, 0);
650 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
653 /* generate a bounded pointer addition */
654 ST_FUNC void gen_bounded_ptr_add(void)
656 /* save all temporary registers */
657 save_regs(0);
659 /* prepare fast x86_64 function call */
660 gv(RC_RAX);
661 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
662 vtop--;
664 gv(RC_RAX);
665 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
666 vtop--;
668 /* do a fast function call */
669 gen_static_call(TOK___bound_ptr_add);
671 /* returned pointer is in rax */
672 vtop++;
673 vtop->r = TREG_RAX | VT_BOUNDED;
676 /* relocation offset of the bounding function call point */
677 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
680 /* patch pointer addition in vtop so that pointer dereferencing is
681 also tested */
682 ST_FUNC void gen_bounded_ptr_deref(void)
684 addr_t func;
685 int size, align;
686 ElfW(Rela) *rel;
687 Sym *sym;
689 size = 0;
690 /* XXX: put that code in generic part of tcc */
691 if (!is_float(vtop->type.t)) {
692 if (vtop->r & VT_LVAL_BYTE)
693 size = 1;
694 else if (vtop->r & VT_LVAL_SHORT)
695 size = 2;
697 if (!size)
698 size = type_size(&vtop->type, &align);
699 switch(size) {
700 case 1: func = TOK___bound_ptr_indir1; break;
701 case 2: func = TOK___bound_ptr_indir2; break;
702 case 4: func = TOK___bound_ptr_indir4; break;
703 case 8: func = TOK___bound_ptr_indir8; break;
704 case 12: func = TOK___bound_ptr_indir12; break;
705 case 16: func = TOK___bound_ptr_indir16; break;
706 default:
707 tcc_error("unhandled size when dereferencing bounded pointer");
708 func = 0;
709 break;
712 sym = external_global_sym(func, &func_old_type, 0);
713 if (!sym->c)
714 put_extern_sym(sym, NULL, 0, 0);
716 /* patch relocation */
717 /* XXX: find a better solution ? */
719 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
720 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
722 #endif
724 #ifdef TCC_TARGET_PE
726 #define REGN 4
727 static const uint8_t arg_regs[REGN] = {
728 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
731 /* Prepare arguments in R10 and R11 rather than RCX and RDX
732 because gv() will not ever use these */
733 static int arg_prepare_reg(int idx) {
734 if (idx == 0 || idx == 1)
735 /* idx=0: r10, idx=1: r11 */
736 return idx + 10;
737 else
738 return arg_regs[idx];
741 static int func_scratch, func_alloca;
743 /* Generate function call. The function address is pushed first, then
744 all the parameters in call order. This functions pops all the
745 parameters and the function address. */
747 static void gen_offs_sp(int b, int r, int d)
749 orex(1,0,r & 0x100 ? 0 : r, b);
750 if (d == (char)d) {
751 o(0x2444 | (REG_VALUE(r) << 3));
752 g(d);
753 } else {
754 o(0x2484 | (REG_VALUE(r) << 3));
755 gen_le32(d);
759 static int using_regs(int size)
761 return !(size > 8 || (size & (size - 1)));
764 /* Return the number of registers needed to return the struct, or 0 if
765 returning via struct pointer. */
766 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
768 int size, align;
769 *ret_align = 1; // Never have to re-align return values for x86-64
770 *regsize = 8;
771 size = type_size(vt, &align);
772 if (!using_regs(size))
773 return 0;
774 if (size == 8)
775 ret->t = VT_LLONG;
776 else if (size == 4)
777 ret->t = VT_INT;
778 else if (size == 2)
779 ret->t = VT_SHORT;
780 else
781 ret->t = VT_BYTE;
782 ret->ref = NULL;
783 return 1;
786 static int is_sse_float(int t) {
787 int bt;
788 bt = t & VT_BTYPE;
789 return bt == VT_DOUBLE || bt == VT_FLOAT;
792 static int gfunc_arg_size(CType *type) {
793 int align;
794 if (type->t & (VT_ARRAY|VT_BITFIELD))
795 return 8;
796 return type_size(type, &align);
799 void gfunc_call(int nb_args)
801 int size, r, args_size, i, d, bt, struct_size;
802 int arg;
804 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
805 arg = nb_args;
807 /* for struct arguments, we need to call memcpy and the function
808 call breaks register passing arguments we are preparing.
809 So, we process arguments which will be passed by stack first. */
810 struct_size = args_size;
811 for(i = 0; i < nb_args; i++) {
812 SValue *sv;
814 --arg;
815 sv = &vtop[-i];
816 bt = (sv->type.t & VT_BTYPE);
817 size = gfunc_arg_size(&sv->type);
819 if (using_regs(size))
820 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
822 if (bt == VT_STRUCT) {
823 /* align to stack align size */
824 size = (size + 15) & ~15;
825 /* generate structure store */
826 r = get_reg(RC_INT);
827 gen_offs_sp(0x8d, r, struct_size);
828 struct_size += size;
830 /* generate memcpy call */
831 vset(&sv->type, r | VT_LVAL, 0);
832 vpushv(sv);
833 vstore();
834 --vtop;
835 } else if (bt == VT_LDOUBLE) {
836 gv(RC_ST0);
837 gen_offs_sp(0xdb, 0x107, struct_size);
838 struct_size += 16;
842 if (func_scratch < struct_size)
843 func_scratch = struct_size;
845 arg = nb_args;
846 struct_size = args_size;
848 for(i = 0; i < nb_args; i++) {
849 --arg;
850 bt = (vtop->type.t & VT_BTYPE);
852 size = gfunc_arg_size(&vtop->type);
853 if (!using_regs(size)) {
854 /* align to stack align size */
855 size = (size + 15) & ~15;
856 if (arg >= REGN) {
857 d = get_reg(RC_INT);
858 gen_offs_sp(0x8d, d, struct_size);
859 gen_offs_sp(0x89, d, arg*8);
860 } else {
861 d = arg_prepare_reg(arg);
862 gen_offs_sp(0x8d, d, struct_size);
864 struct_size += size;
865 } else {
866 if (is_sse_float(vtop->type.t)) {
867 if (tcc_state->nosse)
868 tcc_error("SSE disabled");
869 if (arg >= REGN) {
870 gv(RC_XMM0);
871 /* movq %xmm0, j*8(%rsp) */
872 gen_offs_sp(0xd60f66, 0x100, arg*8);
873 } else {
874 /* Load directly to xmmN register */
875 gv(RC_XMM0 << arg);
876 d = arg_prepare_reg(arg);
877 /* mov %xmmN, %rxx */
878 o(0x66);
879 orex(1,d,0, 0x7e0f);
880 o(0xc0 + arg*8 + REG_VALUE(d));
882 } else {
883 if (bt == VT_STRUCT) {
884 vtop->type.ref = NULL;
885 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
886 : size > 1 ? VT_SHORT : VT_BYTE;
889 r = gv(RC_INT);
890 if (arg >= REGN) {
891 gen_offs_sp(0x89, r, arg*8);
892 } else {
893 d = arg_prepare_reg(arg);
894 orex(1,d,r,0x89); /* mov */
895 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
899 vtop--;
901 save_regs(0);
902 /* Copy R10 and R11 into RCX and RDX, respectively */
903 if (nb_args > 0) {
904 o(0xd1894c); /* mov %r10, %rcx */
905 if (nb_args > 1) {
906 o(0xda894c); /* mov %r11, %rdx */
910 gcall_or_jmp(0);
912 if ((vtop->r & VT_SYM) && vtop->sym->v == TOK_alloca) {
913 /* need to add the "func_scratch" area after alloca */
914 o(0x0548), gen_le32(func_alloca), func_alloca = ind - 4;
917 /* other compilers don't clear the upper bits when returning char/short */
918 bt = vtop->type.ref->type.t & (VT_BTYPE | VT_UNSIGNED);
919 if (bt == (VT_BYTE | VT_UNSIGNED))
920 o(0xc0b60f); /* movzbl %al, %eax */
921 else if (bt == VT_BYTE)
922 o(0xc0be0f); /* movsbl %al, %eax */
923 else if (bt == VT_SHORT)
924 o(0x98); /* cwtl */
925 else if (bt == (VT_SHORT | VT_UNSIGNED))
926 o(0xc0b70f); /* movzbl %al, %eax */
927 #if 0 /* handled in gen_cast() */
928 else if (bt == VT_INT)
929 o(0x9848); /* cltq */
930 else if (bt == (VT_INT | VT_UNSIGNED))
931 o(0xc089); /* mov %eax,%eax */
932 #endif
933 vtop--;
937 #define FUNC_PROLOG_SIZE 11
939 /* generate function prolog of type 't' */
940 void gfunc_prolog(CType *func_type)
942 int addr, reg_param_index, bt, size;
943 Sym *sym;
944 CType *type;
946 func_ret_sub = 0;
947 func_scratch = 0;
948 func_alloca = 0;
949 loc = 0;
951 addr = PTR_SIZE * 2;
952 ind += FUNC_PROLOG_SIZE;
953 func_sub_sp_offset = ind;
954 reg_param_index = 0;
956 sym = func_type->ref;
958 /* if the function returns a structure, then add an
959 implicit pointer parameter */
960 func_vt = sym->type;
961 func_var = (sym->f.func_type == FUNC_ELLIPSIS);
962 size = gfunc_arg_size(&func_vt);
963 if (!using_regs(size)) {
964 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
965 func_vc = addr;
966 reg_param_index++;
967 addr += 8;
970 /* define parameters */
971 while ((sym = sym->next) != NULL) {
972 type = &sym->type;
973 bt = type->t & VT_BTYPE;
974 size = gfunc_arg_size(type);
975 if (!using_regs(size)) {
976 if (reg_param_index < REGN) {
977 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
979 sym_push(sym->v & ~SYM_FIELD, type,
980 VT_LLOCAL | lvalue_type(type->t), addr);
981 } else {
982 if (reg_param_index < REGN) {
983 /* save arguments passed by register */
984 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
985 if (tcc_state->nosse)
986 tcc_error("SSE disabled");
987 o(0xd60f66); /* movq */
988 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
989 } else {
990 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
993 sym_push(sym->v & ~SYM_FIELD, type,
994 VT_LOCAL | lvalue_type(type->t), addr);
996 addr += 8;
997 reg_param_index++;
1000 while (reg_param_index < REGN) {
1001 if (func_type->ref->f.func_type == FUNC_ELLIPSIS) {
1002 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1003 addr += 8;
1005 reg_param_index++;
1009 /* generate function epilog */
1010 void gfunc_epilog(void)
1012 int v, saved_ind;
1014 o(0xc9); /* leave */
1015 if (func_ret_sub == 0) {
1016 o(0xc3); /* ret */
1017 } else {
1018 o(0xc2); /* ret n */
1019 g(func_ret_sub);
1020 g(func_ret_sub >> 8);
1023 saved_ind = ind;
1024 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1025 /* align local size to word & save local variables */
1026 func_scratch = (func_scratch + 15) & -16;
1027 v = (func_scratch + -loc + 15) & -16;
1029 if (v >= 4096) {
1030 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
1031 oad(0xb8, v); /* mov stacksize, %eax */
1032 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1033 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1034 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1035 } else {
1036 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1037 o(0xec8148); /* sub rsp, stacksize */
1038 gen_le32(v);
1041 /* add the "func_scratch" area after each alloca seen */
1042 while (func_alloca) {
1043 unsigned char *ptr = cur_text_section->data + func_alloca;
1044 func_alloca = read32le(ptr);
1045 write32le(ptr, func_scratch);
1048 cur_text_section->data_offset = saved_ind;
1049 pe_add_unwind_data(ind, saved_ind, v);
1050 ind = cur_text_section->data_offset;
1053 #else
1055 static void gadd_sp(int val)
1057 if (val == (char)val) {
1058 o(0xc48348);
1059 g(val);
1060 } else {
1061 oad(0xc48148, val); /* add $xxx, %rsp */
1065 typedef enum X86_64_Mode {
1066 x86_64_mode_none,
1067 x86_64_mode_memory,
1068 x86_64_mode_integer,
1069 x86_64_mode_sse,
1070 x86_64_mode_x87
1071 } X86_64_Mode;
1073 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1075 if (a == b)
1076 return a;
1077 else if (a == x86_64_mode_none)
1078 return b;
1079 else if (b == x86_64_mode_none)
1080 return a;
1081 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1082 return x86_64_mode_memory;
1083 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1084 return x86_64_mode_integer;
1085 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1086 return x86_64_mode_memory;
1087 else
1088 return x86_64_mode_sse;
1091 static X86_64_Mode classify_x86_64_inner(CType *ty)
1093 X86_64_Mode mode;
1094 Sym *f;
1096 switch (ty->t & VT_BTYPE) {
1097 case VT_VOID: return x86_64_mode_none;
1099 case VT_INT:
1100 case VT_BYTE:
1101 case VT_SHORT:
1102 case VT_LLONG:
1103 case VT_BOOL:
1104 case VT_PTR:
1105 case VT_FUNC:
1106 return x86_64_mode_integer;
1108 case VT_FLOAT:
1109 case VT_DOUBLE: return x86_64_mode_sse;
1111 case VT_LDOUBLE: return x86_64_mode_x87;
1113 case VT_STRUCT:
1114 f = ty->ref;
1116 mode = x86_64_mode_none;
1117 for (f = f->next; f; f = f->next)
1118 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1120 return mode;
1122 assert(0);
1123 return 0;
1126 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1128 X86_64_Mode mode;
1129 int size, align, ret_t = 0;
1131 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1132 *psize = 8;
1133 *palign = 8;
1134 *reg_count = 1;
1135 ret_t = ty->t;
1136 mode = x86_64_mode_integer;
1137 } else {
1138 size = type_size(ty, &align);
1139 *psize = (size + 7) & ~7;
1140 *palign = (align + 7) & ~7;
1142 if (size > 16) {
1143 mode = x86_64_mode_memory;
1144 } else {
1145 mode = classify_x86_64_inner(ty);
1146 switch (mode) {
1147 case x86_64_mode_integer:
1148 if (size > 8) {
1149 *reg_count = 2;
1150 ret_t = VT_QLONG;
1151 } else {
1152 *reg_count = 1;
1153 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1155 break;
1157 case x86_64_mode_x87:
1158 *reg_count = 1;
1159 ret_t = VT_LDOUBLE;
1160 break;
1162 case x86_64_mode_sse:
1163 if (size > 8) {
1164 *reg_count = 2;
1165 ret_t = VT_QFLOAT;
1166 } else {
1167 *reg_count = 1;
1168 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1170 break;
1171 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1176 if (ret) {
1177 ret->ref = NULL;
1178 ret->t = ret_t;
1181 return mode;
1184 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1186 /* This definition must be synced with stdarg.h */
1187 enum __va_arg_type {
1188 __va_gen_reg, __va_float_reg, __va_stack
1190 int size, align, reg_count;
1191 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1192 switch (mode) {
1193 default: return __va_stack;
1194 case x86_64_mode_integer: return __va_gen_reg;
1195 case x86_64_mode_sse: return __va_float_reg;
1199 /* Return the number of registers needed to return the struct, or 0 if
1200 returning via struct pointer. */
1201 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1203 int size, align, reg_count;
1204 *ret_align = 1; // Never have to re-align return values for x86-64
1205 *regsize = 8;
1206 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1209 #define REGN 6
1210 static const uint8_t arg_regs[REGN] = {
1211 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1214 static int arg_prepare_reg(int idx) {
1215 if (idx == 2 || idx == 3)
1216 /* idx=2: r10, idx=3: r11 */
1217 return idx + 8;
1218 else
1219 return arg_regs[idx];
1222 /* Generate function call. The function address is pushed first, then
1223 all the parameters in call order. This functions pops all the
1224 parameters and the function address. */
1225 void gfunc_call(int nb_args)
1227 X86_64_Mode mode;
1228 CType type;
1229 int size, align, r, args_size, stack_adjust, i, reg_count;
1230 int nb_reg_args = 0;
1231 int nb_sse_args = 0;
1232 int sse_reg, gen_reg;
1233 char _onstack[nb_args], *onstack = _onstack;
1235 /* calculate the number of integer/float register arguments, remember
1236 arguments to be passed via stack (in onstack[]), and also remember
1237 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1238 to be done in a left-to-right pass over arguments. */
1239 stack_adjust = 0;
1240 for(i = nb_args - 1; i >= 0; i--) {
1241 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1242 if (mode == x86_64_mode_sse && nb_sse_args + reg_count <= 8) {
1243 nb_sse_args += reg_count;
1244 onstack[i] = 0;
1245 } else if (mode == x86_64_mode_integer && nb_reg_args + reg_count <= REGN) {
1246 nb_reg_args += reg_count;
1247 onstack[i] = 0;
1248 } else if (mode == x86_64_mode_none) {
1249 onstack[i] = 0;
1250 } else {
1251 if (align == 16 && (stack_adjust &= 15)) {
1252 onstack[i] = 2;
1253 stack_adjust = 0;
1254 } else
1255 onstack[i] = 1;
1256 stack_adjust += size;
1260 if (nb_sse_args && tcc_state->nosse)
1261 tcc_error("SSE disabled but floating point arguments passed");
1263 /* fetch cpu flag before generating any code */
1264 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1265 gv(RC_INT);
1267 /* for struct arguments, we need to call memcpy and the function
1268 call breaks register passing arguments we are preparing.
1269 So, we process arguments which will be passed by stack first. */
1270 gen_reg = nb_reg_args;
1271 sse_reg = nb_sse_args;
1272 args_size = 0;
1273 stack_adjust &= 15;
1274 for (i = 0; i < nb_args;) {
1275 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1276 if (!onstack[i]) {
1277 ++i;
1278 continue;
1280 /* Possibly adjust stack to align SSE boundary. We're processing
1281 args from right to left while allocating happens left to right
1282 (stack grows down), so the adjustment needs to happen _after_
1283 an argument that requires it. */
1284 if (stack_adjust) {
1285 o(0x50); /* push %rax; aka sub $8,%rsp */
1286 args_size += 8;
1287 stack_adjust = 0;
1289 if (onstack[i] == 2)
1290 stack_adjust = 1;
1292 vrotb(i+1);
1294 switch (vtop->type.t & VT_BTYPE) {
1295 case VT_STRUCT:
1296 /* allocate the necessary size on stack */
1297 o(0x48);
1298 oad(0xec81, size); /* sub $xxx, %rsp */
1299 /* generate structure store */
1300 r = get_reg(RC_INT);
1301 orex(1, r, 0, 0x89); /* mov %rsp, r */
1302 o(0xe0 + REG_VALUE(r));
1303 vset(&vtop->type, r | VT_LVAL, 0);
1304 vswap();
1305 vstore();
1306 break;
1308 case VT_LDOUBLE:
1309 gv(RC_ST0);
1310 oad(0xec8148, size); /* sub $xxx, %rsp */
1311 o(0x7cdb); /* fstpt 0(%rsp) */
1312 g(0x24);
1313 g(0x00);
1314 break;
1316 case VT_FLOAT:
1317 case VT_DOUBLE:
1318 assert(mode == x86_64_mode_sse);
1319 r = gv(RC_FLOAT);
1320 o(0x50); /* push $rax */
1321 /* movq %xmmN, (%rsp) */
1322 o(0xd60f66);
1323 o(0x04 + REG_VALUE(r)*8);
1324 o(0x24);
1325 break;
1327 default:
1328 assert(mode == x86_64_mode_integer);
1329 /* simple type */
1330 /* XXX: implicit cast ? */
1331 r = gv(RC_INT);
1332 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1333 break;
1335 args_size += size;
1337 vpop();
1338 --nb_args;
1339 onstack++;
1342 /* XXX This should be superfluous. */
1343 save_regs(0); /* save used temporary registers */
1345 /* then, we prepare register passing arguments.
1346 Note that we cannot set RDX and RCX in this loop because gv()
1347 may break these temporary registers. Let's use R10 and R11
1348 instead of them */
1349 assert(gen_reg <= REGN);
1350 assert(sse_reg <= 8);
1351 for(i = 0; i < nb_args; i++) {
1352 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1353 /* Alter stack entry type so that gv() knows how to treat it */
1354 vtop->type = type;
1355 if (mode == x86_64_mode_sse) {
1356 if (reg_count == 2) {
1357 sse_reg -= 2;
1358 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1359 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1360 /* movaps %xmm0, %xmmN */
1361 o(0x280f);
1362 o(0xc0 + (sse_reg << 3));
1363 /* movaps %xmm1, %xmmN */
1364 o(0x280f);
1365 o(0xc1 + ((sse_reg+1) << 3));
1367 } else {
1368 assert(reg_count == 1);
1369 --sse_reg;
1370 /* Load directly to register */
1371 gv(RC_XMM0 << sse_reg);
1373 } else if (mode == x86_64_mode_integer) {
1374 /* simple type */
1375 /* XXX: implicit cast ? */
1376 int d;
1377 gen_reg -= reg_count;
1378 r = gv(RC_INT);
1379 d = arg_prepare_reg(gen_reg);
1380 orex(1,d,r,0x89); /* mov */
1381 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1382 if (reg_count == 2) {
1383 d = arg_prepare_reg(gen_reg+1);
1384 orex(1,d,vtop->r2,0x89); /* mov */
1385 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1388 vtop--;
1390 assert(gen_reg == 0);
1391 assert(sse_reg == 0);
1393 /* We shouldn't have many operands on the stack anymore, but the
1394 call address itself is still there, and it might be in %eax
1395 (or edx/ecx) currently, which the below writes would clobber.
1396 So evict all remaining operands here. */
1397 save_regs(0);
1399 /* Copy R10 and R11 into RDX and RCX, respectively */
1400 if (nb_reg_args > 2) {
1401 o(0xd2894c); /* mov %r10, %rdx */
1402 if (nb_reg_args > 3) {
1403 o(0xd9894c); /* mov %r11, %rcx */
1407 if (vtop->type.ref->f.func_type != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1408 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1409 gcall_or_jmp(0);
1410 if (args_size)
1411 gadd_sp(args_size);
1412 vtop--;
1416 #define FUNC_PROLOG_SIZE 11
1418 static void push_arg_reg(int i) {
1419 loc -= 8;
1420 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1423 /* generate function prolog of type 't' */
1424 void gfunc_prolog(CType *func_type)
1426 X86_64_Mode mode;
1427 int i, addr, align, size, reg_count;
1428 int param_addr = 0, reg_param_index, sse_param_index;
1429 Sym *sym;
1430 CType *type;
1432 sym = func_type->ref;
1433 addr = PTR_SIZE * 2;
1434 loc = 0;
1435 ind += FUNC_PROLOG_SIZE;
1436 func_sub_sp_offset = ind;
1437 func_ret_sub = 0;
1439 if (sym->f.func_type == FUNC_ELLIPSIS) {
1440 int seen_reg_num, seen_sse_num, seen_stack_size;
1441 seen_reg_num = seen_sse_num = 0;
1442 /* frame pointer and return address */
1443 seen_stack_size = PTR_SIZE * 2;
1444 /* count the number of seen parameters */
1445 sym = func_type->ref;
1446 while ((sym = sym->next) != NULL) {
1447 type = &sym->type;
1448 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1449 switch (mode) {
1450 default:
1451 stack_arg:
1452 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1453 break;
1455 case x86_64_mode_integer:
1456 if (seen_reg_num + reg_count > REGN)
1457 goto stack_arg;
1458 seen_reg_num += reg_count;
1459 break;
1461 case x86_64_mode_sse:
1462 if (seen_sse_num + reg_count > 8)
1463 goto stack_arg;
1464 seen_sse_num += reg_count;
1465 break;
1469 loc -= 16;
1470 /* movl $0x????????, -0x10(%rbp) */
1471 o(0xf045c7);
1472 gen_le32(seen_reg_num * 8);
1473 /* movl $0x????????, -0xc(%rbp) */
1474 o(0xf445c7);
1475 gen_le32(seen_sse_num * 16 + 48);
1476 /* movl $0x????????, -0x8(%rbp) */
1477 o(0xf845c7);
1478 gen_le32(seen_stack_size);
1480 /* save all register passing arguments */
1481 for (i = 0; i < 8; i++) {
1482 loc -= 16;
1483 if (!tcc_state->nosse) {
1484 o(0xd60f66); /* movq */
1485 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1487 /* movq $0, loc+8(%rbp) */
1488 o(0x85c748);
1489 gen_le32(loc + 8);
1490 gen_le32(0);
1492 for (i = 0; i < REGN; i++) {
1493 push_arg_reg(REGN-1-i);
1497 sym = func_type->ref;
1498 reg_param_index = 0;
1499 sse_param_index = 0;
1501 /* if the function returns a structure, then add an
1502 implicit pointer parameter */
1503 func_vt = sym->type;
1504 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1505 if (mode == x86_64_mode_memory) {
1506 push_arg_reg(reg_param_index);
1507 func_vc = loc;
1508 reg_param_index++;
1510 /* define parameters */
1511 while ((sym = sym->next) != NULL) {
1512 type = &sym->type;
1513 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1514 switch (mode) {
1515 case x86_64_mode_sse:
1516 if (tcc_state->nosse)
1517 tcc_error("SSE disabled but floating point arguments used");
1518 if (sse_param_index + reg_count <= 8) {
1519 /* save arguments passed by register */
1520 loc -= reg_count * 8;
1521 param_addr = loc;
1522 for (i = 0; i < reg_count; ++i) {
1523 o(0xd60f66); /* movq */
1524 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1525 ++sse_param_index;
1527 } else {
1528 addr = (addr + align - 1) & -align;
1529 param_addr = addr;
1530 addr += size;
1532 break;
1534 case x86_64_mode_memory:
1535 case x86_64_mode_x87:
1536 addr = (addr + align - 1) & -align;
1537 param_addr = addr;
1538 addr += size;
1539 break;
1541 case x86_64_mode_integer: {
1542 if (reg_param_index + reg_count <= REGN) {
1543 /* save arguments passed by register */
1544 loc -= reg_count * 8;
1545 param_addr = loc;
1546 for (i = 0; i < reg_count; ++i) {
1547 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1548 ++reg_param_index;
1550 } else {
1551 addr = (addr + align - 1) & -align;
1552 param_addr = addr;
1553 addr += size;
1555 break;
1557 default: break; /* nothing to be done for x86_64_mode_none */
1559 sym_push(sym->v & ~SYM_FIELD, type,
1560 VT_LOCAL | lvalue_type(type->t), param_addr);
1563 #ifdef CONFIG_TCC_BCHECK
1564 /* leave some room for bound checking code */
1565 if (tcc_state->do_bounds_check) {
1566 func_bound_offset = lbounds_section->data_offset;
1567 func_bound_ind = ind;
1568 oad(0xb8, 0); /* lbound section pointer */
1569 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1570 oad(0xb8, 0); /* call to function */
1572 #endif
1575 /* generate function epilog */
1576 void gfunc_epilog(void)
1578 int v, saved_ind;
1580 #ifdef CONFIG_TCC_BCHECK
1581 if (tcc_state->do_bounds_check
1582 && func_bound_offset != lbounds_section->data_offset)
1584 addr_t saved_ind;
1585 addr_t *bounds_ptr;
1586 Sym *sym_data;
1588 /* add end of table info */
1589 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
1590 *bounds_ptr = 0;
1592 /* generate bound local allocation */
1593 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
1594 func_bound_offset, lbounds_section->data_offset);
1595 saved_ind = ind;
1596 ind = func_bound_ind;
1597 greloca(cur_text_section, sym_data, ind + 1, R_X86_64_64, 0);
1598 ind = ind + 5 + 3;
1599 gen_static_call(TOK___bound_local_new);
1600 ind = saved_ind;
1602 /* generate bound check local freeing */
1603 o(0x5250); /* save returned value, if any */
1604 greloca(cur_text_section, sym_data, ind + 1, R_X86_64_64, 0);
1605 oad(0xb8, 0); /* mov xxx, %rax */
1606 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1607 gen_static_call(TOK___bound_local_delete);
1608 o(0x585a); /* restore returned value, if any */
1610 #endif
1611 o(0xc9); /* leave */
1612 if (func_ret_sub == 0) {
1613 o(0xc3); /* ret */
1614 } else {
1615 o(0xc2); /* ret n */
1616 g(func_ret_sub);
1617 g(func_ret_sub >> 8);
1619 /* align local size to word & save local variables */
1620 v = (-loc + 15) & -16;
1621 saved_ind = ind;
1622 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1623 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1624 o(0xec8148); /* sub rsp, stacksize */
1625 gen_le32(v);
1626 ind = saved_ind;
1629 #endif /* not PE */
1631 ST_FUNC void gen_fill_nops(int bytes)
1633 while (bytes--)
1634 g(0x90);
1637 /* generate a jump to a label */
1638 int gjmp(int t)
1640 return gjmp2(0xe9, t);
1643 /* generate a jump to a fixed address */
1644 void gjmp_addr(int a)
1646 int r;
1647 r = a - ind - 2;
1648 if (r == (char)r) {
1649 g(0xeb);
1650 g(r);
1651 } else {
1652 oad(0xe9, a - ind - 5);
1656 ST_FUNC void gtst_addr(int inv, int a)
1658 int v = vtop->r & VT_VALMASK;
1659 if (v == VT_CMP) {
1660 inv ^= (vtop--)->c.i;
1661 a -= ind + 2;
1662 if (a == (char)a) {
1663 g(inv - 32);
1664 g(a);
1665 } else {
1666 g(0x0f);
1667 oad(inv - 16, a - 4);
1669 } else if ((v & ~1) == VT_JMP) {
1670 if ((v & 1) != inv) {
1671 gjmp_addr(a);
1672 gsym(vtop->c.i);
1673 } else {
1674 gsym(vtop->c.i);
1675 o(0x05eb);
1676 gjmp_addr(a);
1678 vtop--;
1682 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1683 ST_FUNC int gtst(int inv, int t)
1685 int v = vtop->r & VT_VALMASK;
1687 if (nocode_wanted) {
1689 } else if (v == VT_CMP) {
1690 /* fast case : can jump directly since flags are set */
1691 if (vtop->c.i & 0x100)
1693 /* This was a float compare. If the parity flag is set
1694 the result was unordered. For anything except != this
1695 means false and we don't jump (anding both conditions).
1696 For != this means true (oring both).
1697 Take care about inverting the test. We need to jump
1698 to our target if the result was unordered and test wasn't NE,
1699 otherwise if unordered we don't want to jump. */
1700 vtop->c.i &= ~0x100;
1701 if (inv == (vtop->c.i == TOK_NE))
1702 o(0x067a); /* jp +6 */
1703 else
1705 g(0x0f);
1706 t = gjmp2(0x8a, t); /* jp t */
1709 g(0x0f);
1710 t = gjmp2((vtop->c.i - 16) ^ inv, t);
1711 } else if (v == VT_JMP || v == VT_JMPI) {
1712 /* && or || optimization */
1713 if ((v & 1) == inv) {
1714 /* insert vtop->c jump list in t */
1715 uint32_t n1, n = vtop->c.i;
1716 if (n) {
1717 while ((n1 = read32le(cur_text_section->data + n)))
1718 n = n1;
1719 write32le(cur_text_section->data + n, t);
1720 t = vtop->c.i;
1722 } else {
1723 t = gjmp(t);
1724 gsym(vtop->c.i);
1727 vtop--;
1728 return t;
1731 /* generate an integer binary operation */
1732 void gen_opi(int op)
1734 int r, fr, opc, c;
1735 int ll, uu, cc;
1737 ll = is64_type(vtop[-1].type.t);
1738 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1739 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1741 switch(op) {
1742 case '+':
1743 case TOK_ADDC1: /* add with carry generation */
1744 opc = 0;
1745 gen_op8:
1746 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1747 /* constant case */
1748 vswap();
1749 r = gv(RC_INT);
1750 vswap();
1751 c = vtop->c.i;
1752 if (c == (char)c) {
1753 /* XXX: generate inc and dec for smaller code ? */
1754 orex(ll, r, 0, 0x83);
1755 o(0xc0 | (opc << 3) | REG_VALUE(r));
1756 g(c);
1757 } else {
1758 orex(ll, r, 0, 0x81);
1759 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1761 } else {
1762 gv2(RC_INT, RC_INT);
1763 r = vtop[-1].r;
1764 fr = vtop[0].r;
1765 orex(ll, r, fr, (opc << 3) | 0x01);
1766 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1768 vtop--;
1769 if (op >= TOK_ULT && op <= TOK_GT) {
1770 vtop->r = VT_CMP;
1771 vtop->c.i = op;
1773 break;
1774 case '-':
1775 case TOK_SUBC1: /* sub with carry generation */
1776 opc = 5;
1777 goto gen_op8;
1778 case TOK_ADDC2: /* add with carry use */
1779 opc = 2;
1780 goto gen_op8;
1781 case TOK_SUBC2: /* sub with carry use */
1782 opc = 3;
1783 goto gen_op8;
1784 case '&':
1785 opc = 4;
1786 goto gen_op8;
1787 case '^':
1788 opc = 6;
1789 goto gen_op8;
1790 case '|':
1791 opc = 1;
1792 goto gen_op8;
1793 case '*':
1794 gv2(RC_INT, RC_INT);
1795 r = vtop[-1].r;
1796 fr = vtop[0].r;
1797 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1798 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1799 vtop--;
1800 break;
1801 case TOK_SHL:
1802 opc = 4;
1803 goto gen_shift;
1804 case TOK_SHR:
1805 opc = 5;
1806 goto gen_shift;
1807 case TOK_SAR:
1808 opc = 7;
1809 gen_shift:
1810 opc = 0xc0 | (opc << 3);
1811 if (cc) {
1812 /* constant case */
1813 vswap();
1814 r = gv(RC_INT);
1815 vswap();
1816 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1817 o(opc | REG_VALUE(r));
1818 g(vtop->c.i & (ll ? 63 : 31));
1819 } else {
1820 /* we generate the shift in ecx */
1821 gv2(RC_INT, RC_RCX);
1822 r = vtop[-1].r;
1823 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1824 o(opc | REG_VALUE(r));
1826 vtop--;
1827 break;
1828 case TOK_UDIV:
1829 case TOK_UMOD:
1830 uu = 1;
1831 goto divmod;
1832 case '/':
1833 case '%':
1834 case TOK_PDIV:
1835 uu = 0;
1836 divmod:
1837 /* first operand must be in eax */
1838 /* XXX: need better constraint for second operand */
1839 gv2(RC_RAX, RC_RCX);
1840 r = vtop[-1].r;
1841 fr = vtop[0].r;
1842 vtop--;
1843 save_reg(TREG_RDX);
1844 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1845 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1846 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1847 if (op == '%' || op == TOK_UMOD)
1848 r = TREG_RDX;
1849 else
1850 r = TREG_RAX;
1851 vtop->r = r;
1852 break;
1853 default:
1854 opc = 7;
1855 goto gen_op8;
1859 void gen_opl(int op)
1861 gen_opi(op);
1864 /* generate a floating point operation 'v = t1 op t2' instruction. The
1865 two operands are guaranteed to have the same floating point type */
1866 /* XXX: need to use ST1 too */
1867 void gen_opf(int op)
1869 int a, ft, fc, swapped, r;
1870 int float_type =
1871 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1873 /* convert constants to memory references */
1874 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1875 vswap();
1876 gv(float_type);
1877 vswap();
1879 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1880 gv(float_type);
1882 /* must put at least one value in the floating point register */
1883 if ((vtop[-1].r & VT_LVAL) &&
1884 (vtop[0].r & VT_LVAL)) {
1885 vswap();
1886 gv(float_type);
1887 vswap();
1889 swapped = 0;
1890 /* swap the stack if needed so that t1 is the register and t2 is
1891 the memory reference */
1892 if (vtop[-1].r & VT_LVAL) {
1893 vswap();
1894 swapped = 1;
1896 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1897 if (op >= TOK_ULT && op <= TOK_GT) {
1898 /* load on stack second operand */
1899 load(TREG_ST0, vtop);
1900 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1901 if (op == TOK_GE || op == TOK_GT)
1902 swapped = !swapped;
1903 else if (op == TOK_EQ || op == TOK_NE)
1904 swapped = 0;
1905 if (swapped)
1906 o(0xc9d9); /* fxch %st(1) */
1907 if (op == TOK_EQ || op == TOK_NE)
1908 o(0xe9da); /* fucompp */
1909 else
1910 o(0xd9de); /* fcompp */
1911 o(0xe0df); /* fnstsw %ax */
1912 if (op == TOK_EQ) {
1913 o(0x45e480); /* and $0x45, %ah */
1914 o(0x40fC80); /* cmp $0x40, %ah */
1915 } else if (op == TOK_NE) {
1916 o(0x45e480); /* and $0x45, %ah */
1917 o(0x40f480); /* xor $0x40, %ah */
1918 op = TOK_NE;
1919 } else if (op == TOK_GE || op == TOK_LE) {
1920 o(0x05c4f6); /* test $0x05, %ah */
1921 op = TOK_EQ;
1922 } else {
1923 o(0x45c4f6); /* test $0x45, %ah */
1924 op = TOK_EQ;
1926 vtop--;
1927 vtop->r = VT_CMP;
1928 vtop->c.i = op;
1929 } else {
1930 /* no memory reference possible for long double operations */
1931 load(TREG_ST0, vtop);
1932 swapped = !swapped;
1934 switch(op) {
1935 default:
1936 case '+':
1937 a = 0;
1938 break;
1939 case '-':
1940 a = 4;
1941 if (swapped)
1942 a++;
1943 break;
1944 case '*':
1945 a = 1;
1946 break;
1947 case '/':
1948 a = 6;
1949 if (swapped)
1950 a++;
1951 break;
1953 ft = vtop->type.t;
1954 fc = vtop->c.i;
1955 o(0xde); /* fxxxp %st, %st(1) */
1956 o(0xc1 + (a << 3));
1957 vtop--;
1959 } else {
1960 if (op >= TOK_ULT && op <= TOK_GT) {
1961 /* if saved lvalue, then we must reload it */
1962 r = vtop->r;
1963 fc = vtop->c.i;
1964 if ((r & VT_VALMASK) == VT_LLOCAL) {
1965 SValue v1;
1966 r = get_reg(RC_INT);
1967 v1.type.t = VT_PTR;
1968 v1.r = VT_LOCAL | VT_LVAL;
1969 v1.c.i = fc;
1970 load(r, &v1);
1971 fc = 0;
1974 if (op == TOK_EQ || op == TOK_NE) {
1975 swapped = 0;
1976 } else {
1977 if (op == TOK_LE || op == TOK_LT)
1978 swapped = !swapped;
1979 if (op == TOK_LE || op == TOK_GE) {
1980 op = 0x93; /* setae */
1981 } else {
1982 op = 0x97; /* seta */
1986 if (swapped) {
1987 gv(RC_FLOAT);
1988 vswap();
1990 assert(!(vtop[-1].r & VT_LVAL));
1992 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1993 o(0x66);
1994 if (op == TOK_EQ || op == TOK_NE)
1995 o(0x2e0f); /* ucomisd */
1996 else
1997 o(0x2f0f); /* comisd */
1999 if (vtop->r & VT_LVAL) {
2000 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2001 } else {
2002 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2005 vtop--;
2006 vtop->r = VT_CMP;
2007 vtop->c.i = op | 0x100;
2008 } else {
2009 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
2010 switch(op) {
2011 default:
2012 case '+':
2013 a = 0;
2014 break;
2015 case '-':
2016 a = 4;
2017 break;
2018 case '*':
2019 a = 1;
2020 break;
2021 case '/':
2022 a = 6;
2023 break;
2025 ft = vtop->type.t;
2026 fc = vtop->c.i;
2027 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2029 r = vtop->r;
2030 /* if saved lvalue, then we must reload it */
2031 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2032 SValue v1;
2033 r = get_reg(RC_INT);
2034 v1.type.t = VT_PTR;
2035 v1.r = VT_LOCAL | VT_LVAL;
2036 v1.c.i = fc;
2037 load(r, &v1);
2038 fc = 0;
2041 assert(!(vtop[-1].r & VT_LVAL));
2042 if (swapped) {
2043 assert(vtop->r & VT_LVAL);
2044 gv(RC_FLOAT);
2045 vswap();
2048 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2049 o(0xf2);
2050 } else {
2051 o(0xf3);
2053 o(0x0f);
2054 o(0x58 + a);
2056 if (vtop->r & VT_LVAL) {
2057 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2058 } else {
2059 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2062 vtop--;
2067 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2068 and 'long long' cases. */
2069 void gen_cvt_itof(int t)
2071 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2072 save_reg(TREG_ST0);
2073 gv(RC_INT);
2074 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2075 /* signed long long to float/double/long double (unsigned case
2076 is handled generically) */
2077 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2078 o(0x242cdf); /* fildll (%rsp) */
2079 o(0x08c48348); /* add $8, %rsp */
2080 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2081 (VT_INT | VT_UNSIGNED)) {
2082 /* unsigned int to float/double/long double */
2083 o(0x6a); /* push $0 */
2084 g(0x00);
2085 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2086 o(0x242cdf); /* fildll (%rsp) */
2087 o(0x10c48348); /* add $16, %rsp */
2088 } else {
2089 /* int to float/double/long double */
2090 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2091 o(0x2404db); /* fildl (%rsp) */
2092 o(0x08c48348); /* add $8, %rsp */
2094 vtop->r = TREG_ST0;
2095 } else {
2096 int r = get_reg(RC_FLOAT);
2097 gv(RC_INT);
2098 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2099 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2100 (VT_INT | VT_UNSIGNED) ||
2101 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2102 o(0x48); /* REX */
2104 o(0x2a0f);
2105 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2106 vtop->r = r;
2110 /* convert from one floating point type to another */
2111 void gen_cvt_ftof(int t)
2113 int ft, bt, tbt;
2115 ft = vtop->type.t;
2116 bt = ft & VT_BTYPE;
2117 tbt = t & VT_BTYPE;
2119 if (bt == VT_FLOAT) {
2120 gv(RC_FLOAT);
2121 if (tbt == VT_DOUBLE) {
2122 o(0x140f); /* unpcklps */
2123 o(0xc0 + REG_VALUE(vtop->r)*9);
2124 o(0x5a0f); /* cvtps2pd */
2125 o(0xc0 + REG_VALUE(vtop->r)*9);
2126 } else if (tbt == VT_LDOUBLE) {
2127 save_reg(RC_ST0);
2128 /* movss %xmm0,-0x10(%rsp) */
2129 o(0x110ff3);
2130 o(0x44 + REG_VALUE(vtop->r)*8);
2131 o(0xf024);
2132 o(0xf02444d9); /* flds -0x10(%rsp) */
2133 vtop->r = TREG_ST0;
2135 } else if (bt == VT_DOUBLE) {
2136 gv(RC_FLOAT);
2137 if (tbt == VT_FLOAT) {
2138 o(0x140f66); /* unpcklpd */
2139 o(0xc0 + REG_VALUE(vtop->r)*9);
2140 o(0x5a0f66); /* cvtpd2ps */
2141 o(0xc0 + REG_VALUE(vtop->r)*9);
2142 } else if (tbt == VT_LDOUBLE) {
2143 save_reg(RC_ST0);
2144 /* movsd %xmm0,-0x10(%rsp) */
2145 o(0x110ff2);
2146 o(0x44 + REG_VALUE(vtop->r)*8);
2147 o(0xf024);
2148 o(0xf02444dd); /* fldl -0x10(%rsp) */
2149 vtop->r = TREG_ST0;
2151 } else {
2152 int r;
2153 gv(RC_ST0);
2154 r = get_reg(RC_FLOAT);
2155 if (tbt == VT_DOUBLE) {
2156 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2157 /* movsd -0x10(%rsp),%xmm0 */
2158 o(0x100ff2);
2159 o(0x44 + REG_VALUE(r)*8);
2160 o(0xf024);
2161 vtop->r = r;
2162 } else if (tbt == VT_FLOAT) {
2163 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2164 /* movss -0x10(%rsp),%xmm0 */
2165 o(0x100ff3);
2166 o(0x44 + REG_VALUE(r)*8);
2167 o(0xf024);
2168 vtop->r = r;
2173 /* convert fp to int 't' type */
2174 void gen_cvt_ftoi(int t)
2176 int ft, bt, size, r;
2177 ft = vtop->type.t;
2178 bt = ft & VT_BTYPE;
2179 if (bt == VT_LDOUBLE) {
2180 gen_cvt_ftof(VT_DOUBLE);
2181 bt = VT_DOUBLE;
2184 gv(RC_FLOAT);
2185 if (t != VT_INT)
2186 size = 8;
2187 else
2188 size = 4;
2190 r = get_reg(RC_INT);
2191 if (bt == VT_FLOAT) {
2192 o(0xf3);
2193 } else if (bt == VT_DOUBLE) {
2194 o(0xf2);
2195 } else {
2196 assert(0);
2198 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2199 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2200 vtop->r = r;
2203 /* computed goto support */
2204 void ggoto(void)
2206 gcall_or_jmp(1);
2207 vtop--;
2210 /* Save the stack pointer onto the stack and return the location of its address */
2211 ST_FUNC void gen_vla_sp_save(int addr) {
2212 /* mov %rsp,addr(%rbp)*/
2213 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2216 /* Restore the SP from a location on the stack */
2217 ST_FUNC void gen_vla_sp_restore(int addr) {
2218 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2221 #ifdef TCC_TARGET_PE
2222 /* Save result of gen_vla_alloc onto the stack */
2223 ST_FUNC void gen_vla_result(int addr) {
2224 /* mov %rax,addr(%rbp)*/
2225 gen_modrm64(0x89, TREG_RAX, VT_LOCAL, NULL, addr);
2227 #endif
2229 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2230 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2231 #ifdef TCC_TARGET_PE
2232 /* alloca does more than just adjust %rsp on Windows */
2233 vpush_global_sym(&func_old_type, TOK_alloca);
2234 vswap(); /* Move alloca ref past allocation size */
2235 gfunc_call(1);
2236 #else
2237 int r;
2238 r = gv(RC_INT); /* allocation size */
2239 /* sub r,%rsp */
2240 o(0x2b48);
2241 o(0xe0 | REG_VALUE(r));
2242 /* We align to 16 bytes rather than align */
2243 /* and ~15, %rsp */
2244 o(0xf0e48348);
2245 vpop();
2246 #endif
2250 /* end of x86-64 code generator */
2251 /*************************************************************/
2252 #endif /* ! TARGET_DEFS_ONLY */
2253 /******************************************************/