tidy code
[tinycc.git] / x86_64-gen.c
blobb95199e53e0903a26404e68429c5815c9a279c06
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
35 #define RC_RAX 0x0004
36 #define RC_RCX 0x0008
37 #define RC_RDX 0x0010
38 #define RC_ST0 0x0080 /* only for long double */
39 #define RC_R8 0x0100
40 #define RC_R9 0x0200
41 #define RC_R10 0x0400
42 #define RC_R11 0x0800
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_LRET RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_QRET RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
57 enum {
58 TREG_RAX = 0,
59 TREG_RCX = 1,
60 TREG_RDX = 2,
61 TREG_RSP = 4,
62 TREG_RSI = 6,
63 TREG_RDI = 7,
65 TREG_R8 = 8,
66 TREG_R9 = 9,
67 TREG_R10 = 10,
68 TREG_R11 = 11,
70 TREG_XMM0 = 16,
71 TREG_XMM1 = 17,
72 TREG_XMM2 = 18,
73 TREG_XMM3 = 19,
74 TREG_XMM4 = 20,
75 TREG_XMM5 = 21,
76 TREG_XMM6 = 22,
77 TREG_XMM7 = 23,
79 TREG_ST0 = 24,
81 TREG_MEM = 0x20
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_LRET TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_QRET TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
97 #define PTR_SIZE 8
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
103 #define MAX_ALIGN 16
105 /******************************************************/
106 #else /* ! TARGET_DEFS_ONLY */
107 /******************************************************/
108 #include "tcc.h"
109 #include <assert.h>
111 ST_DATA const int reg_classes[NB_REGS] = {
112 /* eax */ RC_INT | RC_RAX,
113 /* ecx */ RC_INT | RC_RCX,
114 /* edx */ RC_INT | RC_RDX,
120 RC_R8,
121 RC_R9,
122 RC_R10,
123 RC_R11,
128 /* xmm0 */ RC_FLOAT | RC_XMM0,
129 /* xmm1 */ RC_FLOAT | RC_XMM1,
130 /* xmm2 */ RC_FLOAT | RC_XMM2,
131 /* xmm3 */ RC_FLOAT | RC_XMM3,
132 /* xmm4 */ RC_FLOAT | RC_XMM4,
133 /* xmm5 */ RC_FLOAT | RC_XMM5,
134 /* xmm6 an xmm7 are included so gv() can be used on them,
135 but they are not tagged with RC_FLOAT because they are
136 callee saved on Windows */
137 RC_XMM6,
138 RC_XMM7,
139 /* st0 */ RC_ST0
142 static unsigned long func_sub_sp_offset;
143 static int func_ret_sub;
145 /* XXX: make it faster ? */
146 ST_FUNC void g(int c)
148 int ind1;
149 if (nocode_wanted)
150 return;
151 ind1 = ind + 1;
152 if (ind1 > cur_text_section->data_allocated)
153 section_realloc(cur_text_section, ind1);
154 cur_text_section->data[ind] = c;
155 ind = ind1;
158 ST_FUNC void o(unsigned int c)
160 while (c) {
161 g(c);
162 c = c >> 8;
166 ST_FUNC void gen_le16(int v)
168 g(v);
169 g(v >> 8);
172 ST_FUNC void gen_le32(int c)
174 g(c);
175 g(c >> 8);
176 g(c >> 16);
177 g(c >> 24);
180 ST_FUNC void gen_le64(int64_t c)
182 g(c);
183 g(c >> 8);
184 g(c >> 16);
185 g(c >> 24);
186 g(c >> 32);
187 g(c >> 40);
188 g(c >> 48);
189 g(c >> 56);
192 static void orex(int ll, int r, int r2, int b)
194 if ((r & VT_VALMASK) >= VT_CONST)
195 r = 0;
196 if ((r2 & VT_VALMASK) >= VT_CONST)
197 r2 = 0;
198 if (ll || REX_BASE(r) || REX_BASE(r2))
199 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
200 o(b);
203 /* output a symbol and patch all calls to it */
204 ST_FUNC void gsym_addr(int t, int a)
206 while (t) {
207 unsigned char *ptr = cur_text_section->data + t;
208 uint32_t n = read32le(ptr); /* next value */
209 write32le(ptr, a - t - 4);
210 t = n;
214 void gsym(int t)
216 gsym_addr(t, ind);
220 static int is64_type(int t)
222 return ((t & VT_BTYPE) == VT_PTR ||
223 (t & VT_BTYPE) == VT_FUNC ||
224 (t & VT_BTYPE) == VT_LLONG);
227 /* instruction + 4 bytes data. Return the address of the data */
228 static int oad(int c, int s)
230 int t;
231 if (nocode_wanted)
232 return s;
233 o(c);
234 t = ind;
235 gen_le32(s);
236 return t;
239 /* generate jmp to a label */
240 #define gjmp2(instr,lbl) oad(instr,lbl)
242 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
244 if (r & VT_SYM)
245 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
246 gen_le32(c);
249 /* output constant with relocation if 'r & VT_SYM' is true */
250 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
252 if (r & VT_SYM)
253 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
254 gen_le64(c);
257 /* output constant with relocation if 'r & VT_SYM' is true */
258 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
260 if (r & VT_SYM)
261 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
262 gen_le32(c-4);
265 /* output got address with relocation */
266 static void gen_gotpcrel(int r, Sym *sym, int c)
268 #ifdef TCC_TARGET_PE
269 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
270 get_tok_str(sym->v, NULL), c, r,
271 cur_text_section->data[ind-3],
272 cur_text_section->data[ind-2],
273 cur_text_section->data[ind-1]
275 #endif
276 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
277 gen_le32(0);
278 if (c) {
279 /* we use add c, %xxx for displacement */
280 orex(1, r, 0, 0x81);
281 o(0xc0 + REG_VALUE(r));
282 gen_le32(c);
286 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
288 op_reg = REG_VALUE(op_reg) << 3;
289 if ((r & VT_VALMASK) == VT_CONST) {
290 /* constant memory reference */
291 if (!(r & VT_SYM)) {
292 /* Absolute memory reference */
293 o(0x04 | op_reg); /* [sib] | destreg */
294 oad(0x25, c); /* disp32 */
295 } else {
296 o(0x05 | op_reg); /* (%rip)+disp32 | destreg */
297 if (is_got) {
298 gen_gotpcrel(r, sym, c);
299 } else {
300 gen_addrpc32(r, sym, c);
303 } else if ((r & VT_VALMASK) == VT_LOCAL) {
304 /* currently, we use only ebp as base */
305 if (c == (char)c) {
306 /* short reference */
307 o(0x45 | op_reg);
308 g(c);
309 } else {
310 oad(0x85 | op_reg, c);
312 } else if ((r & VT_VALMASK) >= TREG_MEM) {
313 if (c) {
314 g(0x80 | op_reg | REG_VALUE(r));
315 gen_le32(c);
316 } else {
317 g(0x00 | op_reg | REG_VALUE(r));
319 } else {
320 g(0x00 | op_reg | REG_VALUE(r));
324 /* generate a modrm reference. 'op_reg' contains the additional 3
325 opcode bits */
326 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
328 gen_modrm_impl(op_reg, r, sym, c, 0);
331 /* generate a modrm reference. 'op_reg' contains the additional 3
332 opcode bits */
333 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
335 int is_got;
336 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
337 orex(1, r, op_reg, opcode);
338 gen_modrm_impl(op_reg, r, sym, c, is_got);
342 /* load 'r' from value 'sv' */
343 void load(int r, SValue *sv)
345 int v, t, ft, fc, fr;
346 SValue v1;
348 #ifdef TCC_TARGET_PE
349 SValue v2;
350 sv = pe_getimport(sv, &v2);
351 #endif
353 fr = sv->r;
354 ft = sv->type.t & ~VT_DEFSIGN;
355 fc = sv->c.i;
356 if (fc != sv->c.i && (fr & VT_SYM))
357 tcc_error("64 bit addend in load");
359 ft &= ~(VT_VOLATILE | VT_CONSTANT);
361 #ifndef TCC_TARGET_PE
362 /* we use indirect access via got */
363 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
364 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
365 /* use the result register as a temporal register */
366 int tr = r | TREG_MEM;
367 if (is_float(ft)) {
368 /* we cannot use float registers as a temporal register */
369 tr = get_reg(RC_INT) | TREG_MEM;
371 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
373 /* load from the temporal register */
374 fr = tr | VT_LVAL;
376 #endif
378 v = fr & VT_VALMASK;
379 if (fr & VT_LVAL) {
380 int b, ll;
381 if (v == VT_LLOCAL) {
382 v1.type.t = VT_PTR;
383 v1.r = VT_LOCAL | VT_LVAL;
384 v1.c.i = fc;
385 fr = r;
386 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
387 fr = get_reg(RC_INT);
388 load(fr, &v1);
390 if (fc != sv->c.i) {
391 /* If the addends doesn't fit into a 32bit signed
392 we must use a 64bit move. We've checked above
393 that this doesn't have a sym associated. */
394 v1.type.t = VT_LLONG;
395 v1.r = VT_CONST;
396 v1.c.i = sv->c.i;
397 fr = r;
398 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
399 fr = get_reg(RC_INT);
400 load(fr, &v1);
401 fc = 0;
403 ll = 0;
404 /* Like GCC we can load from small enough properly sized
405 structs and unions as well.
406 XXX maybe move to generic operand handling, but should
407 occur only with asm, so tccasm.c might also be a better place */
408 if ((ft & VT_BTYPE) == VT_STRUCT) {
409 int align;
410 switch (type_size(&sv->type, &align)) {
411 case 1: ft = VT_BYTE; break;
412 case 2: ft = VT_SHORT; break;
413 case 4: ft = VT_INT; break;
414 case 8: ft = VT_LLONG; break;
415 default:
416 tcc_error("invalid aggregate type for register load");
417 break;
420 if ((ft & VT_BTYPE) == VT_FLOAT) {
421 b = 0x6e0f66;
422 r = REG_VALUE(r); /* movd */
423 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
424 b = 0x7e0ff3; /* movq */
425 r = REG_VALUE(r);
426 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
427 b = 0xdb, r = 5; /* fldt */
428 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
429 b = 0xbe0f; /* movsbl */
430 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
431 b = 0xb60f; /* movzbl */
432 } else if ((ft & VT_TYPE) == VT_SHORT) {
433 b = 0xbf0f; /* movswl */
434 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
435 b = 0xb70f; /* movzwl */
436 } else {
437 assert(((ft & VT_BTYPE) == VT_INT)
438 || ((ft & VT_BTYPE) == VT_LLONG)
439 || ((ft & VT_BTYPE) == VT_PTR)
440 || ((ft & VT_BTYPE) == VT_FUNC)
442 ll = is64_type(ft);
443 b = 0x8b;
445 if (ll) {
446 gen_modrm64(b, r, fr, sv->sym, fc);
447 } else {
448 orex(ll, fr, r, b);
449 gen_modrm(r, fr, sv->sym, fc);
451 } else {
452 if (v == VT_CONST) {
453 if (fr & VT_SYM) {
454 #ifdef TCC_TARGET_PE
455 orex(1,0,r,0x8d);
456 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
457 gen_addrpc32(fr, sv->sym, fc);
458 #else
459 if (sv->sym->type.t & VT_STATIC) {
460 orex(1,0,r,0x8d);
461 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
462 gen_addrpc32(fr, sv->sym, fc);
463 } else {
464 orex(1,0,r,0x8b);
465 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
466 gen_gotpcrel(r, sv->sym, fc);
468 #endif
469 } else if (is64_type(ft)) {
470 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
471 gen_le64(sv->c.i);
472 } else {
473 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
474 gen_le32(fc);
476 } else if (v == VT_LOCAL) {
477 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
478 gen_modrm(r, VT_LOCAL, sv->sym, fc);
479 } else if (v == VT_CMP) {
480 orex(0,r,0,0);
481 if ((fc & ~0x100) != TOK_NE)
482 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
483 else
484 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
485 if (fc & 0x100)
487 /* This was a float compare. If the parity bit is
488 set the result was unordered, meaning false for everything
489 except TOK_NE, and true for TOK_NE. */
490 fc &= ~0x100;
491 o(0x037a + (REX_BASE(r) << 8));
493 orex(0,r,0, 0x0f); /* setxx %br */
494 o(fc);
495 o(0xc0 + REG_VALUE(r));
496 } else if (v == VT_JMP || v == VT_JMPI) {
497 t = v & 1;
498 orex(0,r,0,0);
499 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
500 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
501 gsym(fc);
502 orex(0,r,0,0);
503 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
504 } else if (v != r) {
505 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
506 if (v == TREG_ST0) {
507 /* gen_cvt_ftof(VT_DOUBLE); */
508 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
509 /* movsd -0x10(%rsp),%xmmN */
510 o(0x100ff2);
511 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
512 o(0xf024);
513 } else {
514 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
515 if ((ft & VT_BTYPE) == VT_FLOAT) {
516 o(0x100ff3);
517 } else {
518 assert((ft & VT_BTYPE) == VT_DOUBLE);
519 o(0x100ff2);
521 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
523 } else if (r == TREG_ST0) {
524 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
525 /* gen_cvt_ftof(VT_LDOUBLE); */
526 /* movsd %xmmN,-0x10(%rsp) */
527 o(0x110ff2);
528 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
529 o(0xf024);
530 o(0xf02444dd); /* fldl -0x10(%rsp) */
531 } else {
532 orex(1,r,v, 0x89);
533 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
539 /* store register 'r' in lvalue 'v' */
540 void store(int r, SValue *v)
542 int fr, bt, ft, fc;
543 int op64 = 0;
544 /* store the REX prefix in this variable when PIC is enabled */
545 int pic = 0;
547 #ifdef TCC_TARGET_PE
548 SValue v2;
549 v = pe_getimport(v, &v2);
550 #endif
552 fr = v->r & VT_VALMASK;
553 ft = v->type.t;
554 fc = v->c.i;
555 if (fc != v->c.i && (fr & VT_SYM))
556 tcc_error("64 bit addend in store");
557 ft &= ~(VT_VOLATILE | VT_CONSTANT);
558 bt = ft & VT_BTYPE;
560 #ifndef TCC_TARGET_PE
561 /* we need to access the variable via got */
562 if (fr == VT_CONST && (v->r & VT_SYM)) {
563 /* mov xx(%rip), %r11 */
564 o(0x1d8b4c);
565 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
566 pic = is64_type(bt) ? 0x49 : 0x41;
568 #endif
570 /* XXX: incorrect if float reg to reg */
571 if (bt == VT_FLOAT) {
572 o(0x66);
573 o(pic);
574 o(0x7e0f); /* movd */
575 r = REG_VALUE(r);
576 } else if (bt == VT_DOUBLE) {
577 o(0x66);
578 o(pic);
579 o(0xd60f); /* movq */
580 r = REG_VALUE(r);
581 } else if (bt == VT_LDOUBLE) {
582 o(0xc0d9); /* fld %st(0) */
583 o(pic);
584 o(0xdb); /* fstpt */
585 r = 7;
586 } else {
587 if (bt == VT_SHORT)
588 o(0x66);
589 o(pic);
590 if (bt == VT_BYTE || bt == VT_BOOL)
591 orex(0, 0, r, 0x88);
592 else if (is64_type(bt))
593 op64 = 0x89;
594 else
595 orex(0, 0, r, 0x89);
597 if (pic) {
598 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
599 if (op64)
600 o(op64);
601 o(3 + (r << 3));
602 } else if (op64) {
603 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
604 gen_modrm64(op64, r, v->r, v->sym, fc);
605 } else if (fr != r) {
606 /* XXX: don't we really come here? */
607 abort();
608 o(0xc0 + fr + r * 8); /* mov r, fr */
610 } else {
611 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
612 gen_modrm(r, v->r, v->sym, fc);
613 } else if (fr != r) {
614 /* XXX: don't we really come here? */
615 abort();
616 o(0xc0 + fr + r * 8); /* mov r, fr */
621 /* 'is_jmp' is '1' if it is a jump */
622 static void gcall_or_jmp(int is_jmp)
624 int r;
625 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
626 ((vtop->r & VT_SYM) && (vtop->c.i-4) == (int)(vtop->c.i-4))) {
627 /* constant symbolic case -> simple relocation */
628 #ifdef TCC_TARGET_PE
629 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
630 #else
631 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
632 #endif
633 oad(0xe8 + is_jmp, 0); /* call/jmp im */
634 } else {
635 /* otherwise, indirect call */
636 r = TREG_R11;
637 load(r, vtop);
638 o(0x41); /* REX */
639 o(0xff); /* call/jmp *r */
640 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
644 #if defined(CONFIG_TCC_BCHECK)
645 #ifndef TCC_TARGET_PE
646 static addr_t func_bound_offset;
647 static unsigned long func_bound_ind;
648 #endif
650 static void gen_static_call(int v)
652 Sym *sym = external_global_sym(v, &func_old_type, 0);
653 oad(0xe8, 0);
654 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
657 /* generate a bounded pointer addition */
658 ST_FUNC void gen_bounded_ptr_add(void)
660 /* save all temporary registers */
661 save_regs(0);
663 /* prepare fast x86_64 function call */
664 gv(RC_RAX);
665 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
666 vtop--;
668 gv(RC_RAX);
669 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
670 vtop--;
672 /* do a fast function call */
673 gen_static_call(TOK___bound_ptr_add);
675 /* returned pointer is in rax */
676 vtop++;
677 vtop->r = TREG_RAX | VT_BOUNDED;
680 /* relocation offset of the bounding function call point */
681 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
684 /* patch pointer addition in vtop so that pointer dereferencing is
685 also tested */
686 ST_FUNC void gen_bounded_ptr_deref(void)
688 addr_t func;
689 int size, align;
690 ElfW(Rela) *rel;
691 Sym *sym;
693 size = 0;
694 /* XXX: put that code in generic part of tcc */
695 if (!is_float(vtop->type.t)) {
696 if (vtop->r & VT_LVAL_BYTE)
697 size = 1;
698 else if (vtop->r & VT_LVAL_SHORT)
699 size = 2;
701 if (!size)
702 size = type_size(&vtop->type, &align);
703 switch(size) {
704 case 1: func = TOK___bound_ptr_indir1; break;
705 case 2: func = TOK___bound_ptr_indir2; break;
706 case 4: func = TOK___bound_ptr_indir4; break;
707 case 8: func = TOK___bound_ptr_indir8; break;
708 case 12: func = TOK___bound_ptr_indir12; break;
709 case 16: func = TOK___bound_ptr_indir16; break;
710 default:
711 tcc_error("unhandled size when dereferencing bounded pointer");
712 func = 0;
713 break;
716 sym = external_global_sym(func, &func_old_type, 0);
717 if (!sym->c)
718 put_extern_sym(sym, NULL, 0, 0);
720 /* patch relocation */
721 /* XXX: find a better solution ? */
723 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
724 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
726 #endif
728 #ifdef TCC_TARGET_PE
730 #define REGN 4
731 static const uint8_t arg_regs[REGN] = {
732 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
735 /* Prepare arguments in R10 and R11 rather than RCX and RDX
736 because gv() will not ever use these */
737 static int arg_prepare_reg(int idx) {
738 if (idx == 0 || idx == 1)
739 /* idx=0: r10, idx=1: r11 */
740 return idx + 10;
741 else
742 return arg_regs[idx];
745 static int func_scratch, func_alloca;
747 /* Generate function call. The function address is pushed first, then
748 all the parameters in call order. This functions pops all the
749 parameters and the function address. */
751 static void gen_offs_sp(int b, int r, int d)
753 orex(1,0,r & 0x100 ? 0 : r, b);
754 if (d == (char)d) {
755 o(0x2444 | (REG_VALUE(r) << 3));
756 g(d);
757 } else {
758 o(0x2484 | (REG_VALUE(r) << 3));
759 gen_le32(d);
763 static int using_regs(int size)
765 return !(size > 8 || (size & (size - 1)));
768 /* Return the number of registers needed to return the struct, or 0 if
769 returning via struct pointer. */
770 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
772 int size, align;
773 *ret_align = 1; // Never have to re-align return values for x86-64
774 *regsize = 8;
775 size = type_size(vt, &align);
776 if (!using_regs(size))
777 return 0;
778 if (size == 8)
779 ret->t = VT_LLONG;
780 else if (size == 4)
781 ret->t = VT_INT;
782 else if (size == 2)
783 ret->t = VT_SHORT;
784 else
785 ret->t = VT_BYTE;
786 ret->ref = NULL;
787 return 1;
790 static int is_sse_float(int t) {
791 int bt;
792 bt = t & VT_BTYPE;
793 return bt == VT_DOUBLE || bt == VT_FLOAT;
796 static int gfunc_arg_size(CType *type) {
797 int align;
798 if (type->t & (VT_ARRAY|VT_BITFIELD))
799 return 8;
800 return type_size(type, &align);
803 void gfunc_call(int nb_args)
805 int size, r, args_size, i, d, bt, struct_size;
806 int arg;
808 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
809 arg = nb_args;
811 /* for struct arguments, we need to call memcpy and the function
812 call breaks register passing arguments we are preparing.
813 So, we process arguments which will be passed by stack first. */
814 struct_size = args_size;
815 for(i = 0; i < nb_args; i++) {
816 SValue *sv;
818 --arg;
819 sv = &vtop[-i];
820 bt = (sv->type.t & VT_BTYPE);
821 size = gfunc_arg_size(&sv->type);
823 if (using_regs(size))
824 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
826 if (bt == VT_STRUCT) {
827 /* align to stack align size */
828 size = (size + 15) & ~15;
829 /* generate structure store */
830 r = get_reg(RC_INT);
831 gen_offs_sp(0x8d, r, struct_size);
832 struct_size += size;
834 /* generate memcpy call */
835 vset(&sv->type, r | VT_LVAL, 0);
836 vpushv(sv);
837 vstore();
838 --vtop;
839 } else if (bt == VT_LDOUBLE) {
840 gv(RC_ST0);
841 gen_offs_sp(0xdb, 0x107, struct_size);
842 struct_size += 16;
846 if (func_scratch < struct_size)
847 func_scratch = struct_size;
849 arg = nb_args;
850 struct_size = args_size;
852 for(i = 0; i < nb_args; i++) {
853 --arg;
854 bt = (vtop->type.t & VT_BTYPE);
856 size = gfunc_arg_size(&vtop->type);
857 if (!using_regs(size)) {
858 /* align to stack align size */
859 size = (size + 15) & ~15;
860 if (arg >= REGN) {
861 d = get_reg(RC_INT);
862 gen_offs_sp(0x8d, d, struct_size);
863 gen_offs_sp(0x89, d, arg*8);
864 } else {
865 d = arg_prepare_reg(arg);
866 gen_offs_sp(0x8d, d, struct_size);
868 struct_size += size;
869 } else {
870 if (is_sse_float(vtop->type.t)) {
871 if (tcc_state->nosse)
872 tcc_error("SSE disabled");
873 if (arg >= REGN) {
874 gv(RC_XMM0);
875 /* movq %xmm0, j*8(%rsp) */
876 gen_offs_sp(0xd60f66, 0x100, arg*8);
877 } else {
878 /* Load directly to xmmN register */
879 gv(RC_XMM0 << arg);
880 d = arg_prepare_reg(arg);
881 /* mov %xmmN, %rxx */
882 o(0x66);
883 orex(1,d,0, 0x7e0f);
884 o(0xc0 + arg*8 + REG_VALUE(d));
886 } else {
887 if (bt == VT_STRUCT) {
888 vtop->type.ref = NULL;
889 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
890 : size > 1 ? VT_SHORT : VT_BYTE;
893 r = gv(RC_INT);
894 if (arg >= REGN) {
895 gen_offs_sp(0x89, r, arg*8);
896 } else {
897 d = arg_prepare_reg(arg);
898 orex(1,d,r,0x89); /* mov */
899 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
903 vtop--;
905 save_regs(0);
906 /* Copy R10 and R11 into RCX and RDX, respectively */
907 if (nb_args > 0) {
908 o(0xd1894c); /* mov %r10, %rcx */
909 if (nb_args > 1) {
910 o(0xda894c); /* mov %r11, %rdx */
914 gcall_or_jmp(0);
916 if ((vtop->r & VT_SYM) && vtop->sym->v == TOK_alloca) {
917 /* need to add the "func_scratch" area after alloca */
918 o(0x0548), gen_le32(func_alloca), func_alloca = ind - 4;
921 /* other compilers don't clear the upper bits when returning char/short */
922 bt = vtop->type.ref->type.t & (VT_BTYPE | VT_UNSIGNED);
923 if (bt == (VT_BYTE | VT_UNSIGNED))
924 o(0xc0b60f); /* movzbl %al, %eax */
925 else if (bt == VT_BYTE)
926 o(0xc0be0f); /* movsbl %al, %eax */
927 else if (bt == VT_SHORT)
928 o(0x98); /* cwtl */
929 else if (bt == (VT_SHORT | VT_UNSIGNED))
930 o(0xc0b70f); /* movzbl %al, %eax */
931 #if 0 /* handled in gen_cast() */
932 else if (bt == VT_INT)
933 o(0x9848); /* cltq */
934 else if (bt == (VT_INT | VT_UNSIGNED))
935 o(0xc089); /* mov %eax,%eax */
936 #endif
937 vtop--;
941 #define FUNC_PROLOG_SIZE 11
943 /* generate function prolog of type 't' */
944 void gfunc_prolog(CType *func_type)
946 int addr, reg_param_index, bt, size;
947 Sym *sym;
948 CType *type;
950 func_ret_sub = 0;
951 func_scratch = 0;
952 func_alloca = 0;
953 loc = 0;
955 addr = PTR_SIZE * 2;
956 ind += FUNC_PROLOG_SIZE;
957 func_sub_sp_offset = ind;
958 reg_param_index = 0;
960 sym = func_type->ref;
962 /* if the function returns a structure, then add an
963 implicit pointer parameter */
964 func_vt = sym->type;
965 func_var = (sym->f.func_type == FUNC_ELLIPSIS);
966 size = gfunc_arg_size(&func_vt);
967 if (!using_regs(size)) {
968 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
969 func_vc = addr;
970 reg_param_index++;
971 addr += 8;
974 /* define parameters */
975 while ((sym = sym->next) != NULL) {
976 type = &sym->type;
977 bt = type->t & VT_BTYPE;
978 size = gfunc_arg_size(type);
979 if (!using_regs(size)) {
980 if (reg_param_index < REGN) {
981 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
983 sym_push(sym->v & ~SYM_FIELD, type,
984 VT_LLOCAL | lvalue_type(type->t), addr);
985 } else {
986 if (reg_param_index < REGN) {
987 /* save arguments passed by register */
988 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
989 if (tcc_state->nosse)
990 tcc_error("SSE disabled");
991 o(0xd60f66); /* movq */
992 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
993 } else {
994 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
997 sym_push(sym->v & ~SYM_FIELD, type,
998 VT_LOCAL | lvalue_type(type->t), addr);
1000 addr += 8;
1001 reg_param_index++;
1004 while (reg_param_index < REGN) {
1005 if (func_type->ref->f.func_type == FUNC_ELLIPSIS) {
1006 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1007 addr += 8;
1009 reg_param_index++;
1013 /* generate function epilog */
1014 void gfunc_epilog(void)
1016 int v, saved_ind;
1018 o(0xc9); /* leave */
1019 if (func_ret_sub == 0) {
1020 o(0xc3); /* ret */
1021 } else {
1022 o(0xc2); /* ret n */
1023 g(func_ret_sub);
1024 g(func_ret_sub >> 8);
1027 saved_ind = ind;
1028 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1029 /* align local size to word & save local variables */
1030 func_scratch = (func_scratch + 15) & -16;
1031 v = (func_scratch + -loc + 15) & -16;
1033 if (v >= 4096) {
1034 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
1035 oad(0xb8, v); /* mov stacksize, %eax */
1036 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1037 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1038 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1039 } else {
1040 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1041 o(0xec8148); /* sub rsp, stacksize */
1042 gen_le32(v);
1045 /* add the "func_scratch" area after each alloca seen */
1046 while (func_alloca) {
1047 unsigned char *ptr = cur_text_section->data + func_alloca;
1048 func_alloca = read32le(ptr);
1049 write32le(ptr, func_scratch);
1052 cur_text_section->data_offset = saved_ind;
1053 pe_add_unwind_data(ind, saved_ind, v);
1054 ind = cur_text_section->data_offset;
1057 #else
1059 static void gadd_sp(int val)
1061 if (val == (char)val) {
1062 o(0xc48348);
1063 g(val);
1064 } else {
1065 oad(0xc48148, val); /* add $xxx, %rsp */
1069 typedef enum X86_64_Mode {
1070 x86_64_mode_none,
1071 x86_64_mode_memory,
1072 x86_64_mode_integer,
1073 x86_64_mode_sse,
1074 x86_64_mode_x87
1075 } X86_64_Mode;
1077 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1079 if (a == b)
1080 return a;
1081 else if (a == x86_64_mode_none)
1082 return b;
1083 else if (b == x86_64_mode_none)
1084 return a;
1085 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1086 return x86_64_mode_memory;
1087 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1088 return x86_64_mode_integer;
1089 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1090 return x86_64_mode_memory;
1091 else
1092 return x86_64_mode_sse;
1095 static X86_64_Mode classify_x86_64_inner(CType *ty)
1097 X86_64_Mode mode;
1098 Sym *f;
1100 switch (ty->t & VT_BTYPE) {
1101 case VT_VOID: return x86_64_mode_none;
1103 case VT_INT:
1104 case VT_BYTE:
1105 case VT_SHORT:
1106 case VT_LLONG:
1107 case VT_BOOL:
1108 case VT_PTR:
1109 case VT_FUNC:
1110 return x86_64_mode_integer;
1112 case VT_FLOAT:
1113 case VT_DOUBLE: return x86_64_mode_sse;
1115 case VT_LDOUBLE: return x86_64_mode_x87;
1117 case VT_STRUCT:
1118 f = ty->ref;
1120 mode = x86_64_mode_none;
1121 for (f = f->next; f; f = f->next)
1122 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1124 return mode;
1126 assert(0);
1127 return 0;
1130 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1132 X86_64_Mode mode;
1133 int size, align, ret_t = 0;
1135 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1136 *psize = 8;
1137 *palign = 8;
1138 *reg_count = 1;
1139 ret_t = ty->t;
1140 mode = x86_64_mode_integer;
1141 } else {
1142 size = type_size(ty, &align);
1143 *psize = (size + 7) & ~7;
1144 *palign = (align + 7) & ~7;
1146 if (size > 16) {
1147 mode = x86_64_mode_memory;
1148 } else {
1149 mode = classify_x86_64_inner(ty);
1150 switch (mode) {
1151 case x86_64_mode_integer:
1152 if (size > 8) {
1153 *reg_count = 2;
1154 ret_t = VT_QLONG;
1155 } else {
1156 *reg_count = 1;
1157 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1159 break;
1161 case x86_64_mode_x87:
1162 *reg_count = 1;
1163 ret_t = VT_LDOUBLE;
1164 break;
1166 case x86_64_mode_sse:
1167 if (size > 8) {
1168 *reg_count = 2;
1169 ret_t = VT_QFLOAT;
1170 } else {
1171 *reg_count = 1;
1172 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1174 break;
1175 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1180 if (ret) {
1181 ret->ref = NULL;
1182 ret->t = ret_t;
1185 return mode;
1188 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1190 /* This definition must be synced with stdarg.h */
1191 enum __va_arg_type {
1192 __va_gen_reg, __va_float_reg, __va_stack
1194 int size, align, reg_count;
1195 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1196 switch (mode) {
1197 default: return __va_stack;
1198 case x86_64_mode_integer: return __va_gen_reg;
1199 case x86_64_mode_sse: return __va_float_reg;
1203 /* Return the number of registers needed to return the struct, or 0 if
1204 returning via struct pointer. */
1205 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1207 int size, align, reg_count;
1208 *ret_align = 1; // Never have to re-align return values for x86-64
1209 *regsize = 8;
1210 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1213 #define REGN 6
1214 static const uint8_t arg_regs[REGN] = {
1215 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1218 static int arg_prepare_reg(int idx) {
1219 if (idx == 2 || idx == 3)
1220 /* idx=2: r10, idx=3: r11 */
1221 return idx + 8;
1222 else
1223 return arg_regs[idx];
1226 /* Generate function call. The function address is pushed first, then
1227 all the parameters in call order. This functions pops all the
1228 parameters and the function address. */
1229 void gfunc_call(int nb_args)
1231 X86_64_Mode mode;
1232 CType type;
1233 int size, align, r, args_size, stack_adjust, i, reg_count;
1234 int nb_reg_args = 0;
1235 int nb_sse_args = 0;
1236 int sse_reg, gen_reg;
1237 char _onstack[nb_args], *onstack = _onstack;
1239 /* calculate the number of integer/float register arguments, remember
1240 arguments to be passed via stack (in onstack[]), and also remember
1241 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1242 to be done in a left-to-right pass over arguments. */
1243 stack_adjust = 0;
1244 for(i = nb_args - 1; i >= 0; i--) {
1245 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1246 if (mode == x86_64_mode_sse && nb_sse_args + reg_count <= 8) {
1247 nb_sse_args += reg_count;
1248 onstack[i] = 0;
1249 } else if (mode == x86_64_mode_integer && nb_reg_args + reg_count <= REGN) {
1250 nb_reg_args += reg_count;
1251 onstack[i] = 0;
1252 } else if (mode == x86_64_mode_none) {
1253 onstack[i] = 0;
1254 } else {
1255 if (align == 16 && (stack_adjust &= 15)) {
1256 onstack[i] = 2;
1257 stack_adjust = 0;
1258 } else
1259 onstack[i] = 1;
1260 stack_adjust += size;
1264 if (nb_sse_args && tcc_state->nosse)
1265 tcc_error("SSE disabled but floating point arguments passed");
1267 /* fetch cpu flag before generating any code */
1268 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1269 gv(RC_INT);
1271 /* for struct arguments, we need to call memcpy and the function
1272 call breaks register passing arguments we are preparing.
1273 So, we process arguments which will be passed by stack first. */
1274 gen_reg = nb_reg_args;
1275 sse_reg = nb_sse_args;
1276 args_size = 0;
1277 stack_adjust &= 15;
1278 for (i = 0; i < nb_args;) {
1279 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1280 if (!onstack[i]) {
1281 ++i;
1282 continue;
1284 /* Possibly adjust stack to align SSE boundary. We're processing
1285 args from right to left while allocating happens left to right
1286 (stack grows down), so the adjustment needs to happen _after_
1287 an argument that requires it. */
1288 if (stack_adjust) {
1289 o(0x50); /* push %rax; aka sub $8,%rsp */
1290 args_size += 8;
1291 stack_adjust = 0;
1293 if (onstack[i] == 2)
1294 stack_adjust = 1;
1296 vrotb(i+1);
1298 switch (vtop->type.t & VT_BTYPE) {
1299 case VT_STRUCT:
1300 /* allocate the necessary size on stack */
1301 o(0x48);
1302 oad(0xec81, size); /* sub $xxx, %rsp */
1303 /* generate structure store */
1304 r = get_reg(RC_INT);
1305 orex(1, r, 0, 0x89); /* mov %rsp, r */
1306 o(0xe0 + REG_VALUE(r));
1307 vset(&vtop->type, r | VT_LVAL, 0);
1308 vswap();
1309 vstore();
1310 break;
1312 case VT_LDOUBLE:
1313 gv(RC_ST0);
1314 oad(0xec8148, size); /* sub $xxx, %rsp */
1315 o(0x7cdb); /* fstpt 0(%rsp) */
1316 g(0x24);
1317 g(0x00);
1318 break;
1320 case VT_FLOAT:
1321 case VT_DOUBLE:
1322 assert(mode == x86_64_mode_sse);
1323 r = gv(RC_FLOAT);
1324 o(0x50); /* push $rax */
1325 /* movq %xmmN, (%rsp) */
1326 o(0xd60f66);
1327 o(0x04 + REG_VALUE(r)*8);
1328 o(0x24);
1329 break;
1331 default:
1332 assert(mode == x86_64_mode_integer);
1333 /* simple type */
1334 /* XXX: implicit cast ? */
1335 r = gv(RC_INT);
1336 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1337 break;
1339 args_size += size;
1341 vpop();
1342 --nb_args;
1343 onstack++;
1346 /* XXX This should be superfluous. */
1347 save_regs(0); /* save used temporary registers */
1349 /* then, we prepare register passing arguments.
1350 Note that we cannot set RDX and RCX in this loop because gv()
1351 may break these temporary registers. Let's use R10 and R11
1352 instead of them */
1353 assert(gen_reg <= REGN);
1354 assert(sse_reg <= 8);
1355 for(i = 0; i < nb_args; i++) {
1356 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1357 /* Alter stack entry type so that gv() knows how to treat it */
1358 vtop->type = type;
1359 if (mode == x86_64_mode_sse) {
1360 if (reg_count == 2) {
1361 sse_reg -= 2;
1362 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1363 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1364 /* movaps %xmm0, %xmmN */
1365 o(0x280f);
1366 o(0xc0 + (sse_reg << 3));
1367 /* movaps %xmm1, %xmmN */
1368 o(0x280f);
1369 o(0xc1 + ((sse_reg+1) << 3));
1371 } else {
1372 assert(reg_count == 1);
1373 --sse_reg;
1374 /* Load directly to register */
1375 gv(RC_XMM0 << sse_reg);
1377 } else if (mode == x86_64_mode_integer) {
1378 /* simple type */
1379 /* XXX: implicit cast ? */
1380 int d;
1381 gen_reg -= reg_count;
1382 r = gv(RC_INT);
1383 d = arg_prepare_reg(gen_reg);
1384 orex(1,d,r,0x89); /* mov */
1385 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1386 if (reg_count == 2) {
1387 d = arg_prepare_reg(gen_reg+1);
1388 orex(1,d,vtop->r2,0x89); /* mov */
1389 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1392 vtop--;
1394 assert(gen_reg == 0);
1395 assert(sse_reg == 0);
1397 /* We shouldn't have many operands on the stack anymore, but the
1398 call address itself is still there, and it might be in %eax
1399 (or edx/ecx) currently, which the below writes would clobber.
1400 So evict all remaining operands here. */
1401 save_regs(0);
1403 /* Copy R10 and R11 into RDX and RCX, respectively */
1404 if (nb_reg_args > 2) {
1405 o(0xd2894c); /* mov %r10, %rdx */
1406 if (nb_reg_args > 3) {
1407 o(0xd9894c); /* mov %r11, %rcx */
1411 if (vtop->type.ref->f.func_type != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1412 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1413 gcall_or_jmp(0);
1414 if (args_size)
1415 gadd_sp(args_size);
1416 vtop--;
1420 #define FUNC_PROLOG_SIZE 11
1422 static void push_arg_reg(int i) {
1423 loc -= 8;
1424 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1427 /* generate function prolog of type 't' */
1428 void gfunc_prolog(CType *func_type)
1430 X86_64_Mode mode;
1431 int i, addr, align, size, reg_count;
1432 int param_addr = 0, reg_param_index, sse_param_index;
1433 Sym *sym;
1434 CType *type;
1436 sym = func_type->ref;
1437 addr = PTR_SIZE * 2;
1438 loc = 0;
1439 ind += FUNC_PROLOG_SIZE;
1440 func_sub_sp_offset = ind;
1441 func_ret_sub = 0;
1443 if (sym->f.func_type == FUNC_ELLIPSIS) {
1444 int seen_reg_num, seen_sse_num, seen_stack_size;
1445 seen_reg_num = seen_sse_num = 0;
1446 /* frame pointer and return address */
1447 seen_stack_size = PTR_SIZE * 2;
1448 /* count the number of seen parameters */
1449 sym = func_type->ref;
1450 while ((sym = sym->next) != NULL) {
1451 type = &sym->type;
1452 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1453 switch (mode) {
1454 default:
1455 stack_arg:
1456 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1457 break;
1459 case x86_64_mode_integer:
1460 if (seen_reg_num + reg_count > REGN)
1461 goto stack_arg;
1462 seen_reg_num += reg_count;
1463 break;
1465 case x86_64_mode_sse:
1466 if (seen_sse_num + reg_count > 8)
1467 goto stack_arg;
1468 seen_sse_num += reg_count;
1469 break;
1473 loc -= 16;
1474 /* movl $0x????????, -0x10(%rbp) */
1475 o(0xf045c7);
1476 gen_le32(seen_reg_num * 8);
1477 /* movl $0x????????, -0xc(%rbp) */
1478 o(0xf445c7);
1479 gen_le32(seen_sse_num * 16 + 48);
1480 /* movl $0x????????, -0x8(%rbp) */
1481 o(0xf845c7);
1482 gen_le32(seen_stack_size);
1484 /* save all register passing arguments */
1485 for (i = 0; i < 8; i++) {
1486 loc -= 16;
1487 if (!tcc_state->nosse) {
1488 o(0xd60f66); /* movq */
1489 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1491 /* movq $0, loc+8(%rbp) */
1492 o(0x85c748);
1493 gen_le32(loc + 8);
1494 gen_le32(0);
1496 for (i = 0; i < REGN; i++) {
1497 push_arg_reg(REGN-1-i);
1501 sym = func_type->ref;
1502 reg_param_index = 0;
1503 sse_param_index = 0;
1505 /* if the function returns a structure, then add an
1506 implicit pointer parameter */
1507 func_vt = sym->type;
1508 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1509 if (mode == x86_64_mode_memory) {
1510 push_arg_reg(reg_param_index);
1511 func_vc = loc;
1512 reg_param_index++;
1514 /* define parameters */
1515 while ((sym = sym->next) != NULL) {
1516 type = &sym->type;
1517 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1518 switch (mode) {
1519 case x86_64_mode_sse:
1520 if (tcc_state->nosse)
1521 tcc_error("SSE disabled but floating point arguments used");
1522 if (sse_param_index + reg_count <= 8) {
1523 /* save arguments passed by register */
1524 loc -= reg_count * 8;
1525 param_addr = loc;
1526 for (i = 0; i < reg_count; ++i) {
1527 o(0xd60f66); /* movq */
1528 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1529 ++sse_param_index;
1531 } else {
1532 addr = (addr + align - 1) & -align;
1533 param_addr = addr;
1534 addr += size;
1536 break;
1538 case x86_64_mode_memory:
1539 case x86_64_mode_x87:
1540 addr = (addr + align - 1) & -align;
1541 param_addr = addr;
1542 addr += size;
1543 break;
1545 case x86_64_mode_integer: {
1546 if (reg_param_index + reg_count <= REGN) {
1547 /* save arguments passed by register */
1548 loc -= reg_count * 8;
1549 param_addr = loc;
1550 for (i = 0; i < reg_count; ++i) {
1551 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1552 ++reg_param_index;
1554 } else {
1555 addr = (addr + align - 1) & -align;
1556 param_addr = addr;
1557 addr += size;
1559 break;
1561 default: break; /* nothing to be done for x86_64_mode_none */
1563 sym_push(sym->v & ~SYM_FIELD, type,
1564 VT_LOCAL | lvalue_type(type->t), param_addr);
1567 #ifdef CONFIG_TCC_BCHECK
1568 /* leave some room for bound checking code */
1569 if (tcc_state->do_bounds_check) {
1570 func_bound_offset = lbounds_section->data_offset;
1571 func_bound_ind = ind;
1572 oad(0xb8, 0); /* lbound section pointer */
1573 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1574 oad(0xb8, 0); /* call to function */
1576 #endif
1579 /* generate function epilog */
1580 void gfunc_epilog(void)
1582 int v, saved_ind;
1584 #ifdef CONFIG_TCC_BCHECK
1585 if (tcc_state->do_bounds_check
1586 && func_bound_offset != lbounds_section->data_offset)
1588 addr_t saved_ind;
1589 addr_t *bounds_ptr;
1590 Sym *sym_data;
1592 /* add end of table info */
1593 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
1594 *bounds_ptr = 0;
1596 /* generate bound local allocation */
1597 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
1598 func_bound_offset, lbounds_section->data_offset);
1599 saved_ind = ind;
1600 ind = func_bound_ind;
1601 greloca(cur_text_section, sym_data, ind + 1, R_X86_64_64, 0);
1602 ind = ind + 5 + 3;
1603 gen_static_call(TOK___bound_local_new);
1604 ind = saved_ind;
1606 /* generate bound check local freeing */
1607 o(0x5250); /* save returned value, if any */
1608 greloca(cur_text_section, sym_data, ind + 1, R_X86_64_64, 0);
1609 oad(0xb8, 0); /* mov xxx, %rax */
1610 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1611 gen_static_call(TOK___bound_local_delete);
1612 o(0x585a); /* restore returned value, if any */
1614 #endif
1615 o(0xc9); /* leave */
1616 if (func_ret_sub == 0) {
1617 o(0xc3); /* ret */
1618 } else {
1619 o(0xc2); /* ret n */
1620 g(func_ret_sub);
1621 g(func_ret_sub >> 8);
1623 /* align local size to word & save local variables */
1624 v = (-loc + 15) & -16;
1625 saved_ind = ind;
1626 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1627 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1628 o(0xec8148); /* sub rsp, stacksize */
1629 gen_le32(v);
1630 ind = saved_ind;
1633 #endif /* not PE */
1635 ST_FUNC void gen_fill_nops(int bytes)
1637 while (bytes--)
1638 g(0x90);
1641 /* generate a jump to a label */
1642 int gjmp(int t)
1644 return gjmp2(0xe9, t);
1647 /* generate a jump to a fixed address */
1648 void gjmp_addr(int a)
1650 int r;
1651 r = a - ind - 2;
1652 if (r == (char)r) {
1653 g(0xeb);
1654 g(r);
1655 } else {
1656 oad(0xe9, a - ind - 5);
1660 ST_FUNC void gtst_addr(int inv, int a)
1662 int v = vtop->r & VT_VALMASK;
1663 if (v == VT_CMP) {
1664 inv ^= (vtop--)->c.i;
1665 a -= ind + 2;
1666 if (a == (char)a) {
1667 g(inv - 32);
1668 g(a);
1669 } else {
1670 g(0x0f);
1671 oad(inv - 16, a - 4);
1673 } else if ((v & ~1) == VT_JMP) {
1674 if ((v & 1) != inv) {
1675 gjmp_addr(a);
1676 gsym(vtop->c.i);
1677 } else {
1678 gsym(vtop->c.i);
1679 o(0x05eb);
1680 gjmp_addr(a);
1682 vtop--;
1686 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1687 ST_FUNC int gtst(int inv, int t)
1689 int v = vtop->r & VT_VALMASK;
1691 if (nocode_wanted) {
1693 } else if (v == VT_CMP) {
1694 /* fast case : can jump directly since flags are set */
1695 if (vtop->c.i & 0x100)
1697 /* This was a float compare. If the parity flag is set
1698 the result was unordered. For anything except != this
1699 means false and we don't jump (anding both conditions).
1700 For != this means true (oring both).
1701 Take care about inverting the test. We need to jump
1702 to our target if the result was unordered and test wasn't NE,
1703 otherwise if unordered we don't want to jump. */
1704 vtop->c.i &= ~0x100;
1705 if (inv == (vtop->c.i == TOK_NE))
1706 o(0x067a); /* jp +6 */
1707 else
1709 g(0x0f);
1710 t = gjmp2(0x8a, t); /* jp t */
1713 g(0x0f);
1714 t = gjmp2((vtop->c.i - 16) ^ inv, t);
1715 } else if (v == VT_JMP || v == VT_JMPI) {
1716 /* && or || optimization */
1717 if ((v & 1) == inv) {
1718 /* insert vtop->c jump list in t */
1719 uint32_t n1, n = vtop->c.i;
1720 if (n) {
1721 while ((n1 = read32le(cur_text_section->data + n)))
1722 n = n1;
1723 write32le(cur_text_section->data + n, t);
1724 t = vtop->c.i;
1726 } else {
1727 t = gjmp(t);
1728 gsym(vtop->c.i);
1731 vtop--;
1732 return t;
1735 /* generate an integer binary operation */
1736 void gen_opi(int op)
1738 int r, fr, opc, c;
1739 int ll, uu, cc;
1741 ll = is64_type(vtop[-1].type.t);
1742 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1743 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1745 switch(op) {
1746 case '+':
1747 case TOK_ADDC1: /* add with carry generation */
1748 opc = 0;
1749 gen_op8:
1750 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1751 /* constant case */
1752 vswap();
1753 r = gv(RC_INT);
1754 vswap();
1755 c = vtop->c.i;
1756 if (c == (char)c) {
1757 /* XXX: generate inc and dec for smaller code ? */
1758 orex(ll, r, 0, 0x83);
1759 o(0xc0 | (opc << 3) | REG_VALUE(r));
1760 g(c);
1761 } else {
1762 orex(ll, r, 0, 0x81);
1763 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1765 } else {
1766 gv2(RC_INT, RC_INT);
1767 r = vtop[-1].r;
1768 fr = vtop[0].r;
1769 orex(ll, r, fr, (opc << 3) | 0x01);
1770 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1772 vtop--;
1773 if (op >= TOK_ULT && op <= TOK_GT) {
1774 vtop->r = VT_CMP;
1775 vtop->c.i = op;
1777 break;
1778 case '-':
1779 case TOK_SUBC1: /* sub with carry generation */
1780 opc = 5;
1781 goto gen_op8;
1782 case TOK_ADDC2: /* add with carry use */
1783 opc = 2;
1784 goto gen_op8;
1785 case TOK_SUBC2: /* sub with carry use */
1786 opc = 3;
1787 goto gen_op8;
1788 case '&':
1789 opc = 4;
1790 goto gen_op8;
1791 case '^':
1792 opc = 6;
1793 goto gen_op8;
1794 case '|':
1795 opc = 1;
1796 goto gen_op8;
1797 case '*':
1798 gv2(RC_INT, RC_INT);
1799 r = vtop[-1].r;
1800 fr = vtop[0].r;
1801 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1802 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1803 vtop--;
1804 break;
1805 case TOK_SHL:
1806 opc = 4;
1807 goto gen_shift;
1808 case TOK_SHR:
1809 opc = 5;
1810 goto gen_shift;
1811 case TOK_SAR:
1812 opc = 7;
1813 gen_shift:
1814 opc = 0xc0 | (opc << 3);
1815 if (cc) {
1816 /* constant case */
1817 vswap();
1818 r = gv(RC_INT);
1819 vswap();
1820 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1821 o(opc | REG_VALUE(r));
1822 g(vtop->c.i & (ll ? 63 : 31));
1823 } else {
1824 /* we generate the shift in ecx */
1825 gv2(RC_INT, RC_RCX);
1826 r = vtop[-1].r;
1827 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1828 o(opc | REG_VALUE(r));
1830 vtop--;
1831 break;
1832 case TOK_UDIV:
1833 case TOK_UMOD:
1834 uu = 1;
1835 goto divmod;
1836 case '/':
1837 case '%':
1838 case TOK_PDIV:
1839 uu = 0;
1840 divmod:
1841 /* first operand must be in eax */
1842 /* XXX: need better constraint for second operand */
1843 gv2(RC_RAX, RC_RCX);
1844 r = vtop[-1].r;
1845 fr = vtop[0].r;
1846 vtop--;
1847 save_reg(TREG_RDX);
1848 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1849 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1850 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1851 if (op == '%' || op == TOK_UMOD)
1852 r = TREG_RDX;
1853 else
1854 r = TREG_RAX;
1855 vtop->r = r;
1856 break;
1857 default:
1858 opc = 7;
1859 goto gen_op8;
1863 void gen_opl(int op)
1865 gen_opi(op);
1868 /* generate a floating point operation 'v = t1 op t2' instruction. The
1869 two operands are guaranteed to have the same floating point type */
1870 /* XXX: need to use ST1 too */
1871 void gen_opf(int op)
1873 int a, ft, fc, swapped, r;
1874 int float_type =
1875 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1877 /* convert constants to memory references */
1878 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1879 vswap();
1880 gv(float_type);
1881 vswap();
1883 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1884 gv(float_type);
1886 /* must put at least one value in the floating point register */
1887 if ((vtop[-1].r & VT_LVAL) &&
1888 (vtop[0].r & VT_LVAL)) {
1889 vswap();
1890 gv(float_type);
1891 vswap();
1893 swapped = 0;
1894 /* swap the stack if needed so that t1 is the register and t2 is
1895 the memory reference */
1896 if (vtop[-1].r & VT_LVAL) {
1897 vswap();
1898 swapped = 1;
1900 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1901 if (op >= TOK_ULT && op <= TOK_GT) {
1902 /* load on stack second operand */
1903 load(TREG_ST0, vtop);
1904 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1905 if (op == TOK_GE || op == TOK_GT)
1906 swapped = !swapped;
1907 else if (op == TOK_EQ || op == TOK_NE)
1908 swapped = 0;
1909 if (swapped)
1910 o(0xc9d9); /* fxch %st(1) */
1911 if (op == TOK_EQ || op == TOK_NE)
1912 o(0xe9da); /* fucompp */
1913 else
1914 o(0xd9de); /* fcompp */
1915 o(0xe0df); /* fnstsw %ax */
1916 if (op == TOK_EQ) {
1917 o(0x45e480); /* and $0x45, %ah */
1918 o(0x40fC80); /* cmp $0x40, %ah */
1919 } else if (op == TOK_NE) {
1920 o(0x45e480); /* and $0x45, %ah */
1921 o(0x40f480); /* xor $0x40, %ah */
1922 op = TOK_NE;
1923 } else if (op == TOK_GE || op == TOK_LE) {
1924 o(0x05c4f6); /* test $0x05, %ah */
1925 op = TOK_EQ;
1926 } else {
1927 o(0x45c4f6); /* test $0x45, %ah */
1928 op = TOK_EQ;
1930 vtop--;
1931 vtop->r = VT_CMP;
1932 vtop->c.i = op;
1933 } else {
1934 /* no memory reference possible for long double operations */
1935 load(TREG_ST0, vtop);
1936 swapped = !swapped;
1938 switch(op) {
1939 default:
1940 case '+':
1941 a = 0;
1942 break;
1943 case '-':
1944 a = 4;
1945 if (swapped)
1946 a++;
1947 break;
1948 case '*':
1949 a = 1;
1950 break;
1951 case '/':
1952 a = 6;
1953 if (swapped)
1954 a++;
1955 break;
1957 ft = vtop->type.t;
1958 fc = vtop->c.i;
1959 o(0xde); /* fxxxp %st, %st(1) */
1960 o(0xc1 + (a << 3));
1961 vtop--;
1963 } else {
1964 if (op >= TOK_ULT && op <= TOK_GT) {
1965 /* if saved lvalue, then we must reload it */
1966 r = vtop->r;
1967 fc = vtop->c.i;
1968 if ((r & VT_VALMASK) == VT_LLOCAL) {
1969 SValue v1;
1970 r = get_reg(RC_INT);
1971 v1.type.t = VT_PTR;
1972 v1.r = VT_LOCAL | VT_LVAL;
1973 v1.c.i = fc;
1974 load(r, &v1);
1975 fc = 0;
1978 if (op == TOK_EQ || op == TOK_NE) {
1979 swapped = 0;
1980 } else {
1981 if (op == TOK_LE || op == TOK_LT)
1982 swapped = !swapped;
1983 if (op == TOK_LE || op == TOK_GE) {
1984 op = 0x93; /* setae */
1985 } else {
1986 op = 0x97; /* seta */
1990 if (swapped) {
1991 gv(RC_FLOAT);
1992 vswap();
1994 assert(!(vtop[-1].r & VT_LVAL));
1996 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1997 o(0x66);
1998 if (op == TOK_EQ || op == TOK_NE)
1999 o(0x2e0f); /* ucomisd */
2000 else
2001 o(0x2f0f); /* comisd */
2003 if (vtop->r & VT_LVAL) {
2004 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2005 } else {
2006 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2009 vtop--;
2010 vtop->r = VT_CMP;
2011 vtop->c.i = op | 0x100;
2012 } else {
2013 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
2014 switch(op) {
2015 default:
2016 case '+':
2017 a = 0;
2018 break;
2019 case '-':
2020 a = 4;
2021 break;
2022 case '*':
2023 a = 1;
2024 break;
2025 case '/':
2026 a = 6;
2027 break;
2029 ft = vtop->type.t;
2030 fc = vtop->c.i;
2031 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2033 r = vtop->r;
2034 /* if saved lvalue, then we must reload it */
2035 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2036 SValue v1;
2037 r = get_reg(RC_INT);
2038 v1.type.t = VT_PTR;
2039 v1.r = VT_LOCAL | VT_LVAL;
2040 v1.c.i = fc;
2041 load(r, &v1);
2042 fc = 0;
2045 assert(!(vtop[-1].r & VT_LVAL));
2046 if (swapped) {
2047 assert(vtop->r & VT_LVAL);
2048 gv(RC_FLOAT);
2049 vswap();
2052 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2053 o(0xf2);
2054 } else {
2055 o(0xf3);
2057 o(0x0f);
2058 o(0x58 + a);
2060 if (vtop->r & VT_LVAL) {
2061 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2062 } else {
2063 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2066 vtop--;
2071 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2072 and 'long long' cases. */
2073 void gen_cvt_itof(int t)
2075 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2076 save_reg(TREG_ST0);
2077 gv(RC_INT);
2078 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2079 /* signed long long to float/double/long double (unsigned case
2080 is handled generically) */
2081 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2082 o(0x242cdf); /* fildll (%rsp) */
2083 o(0x08c48348); /* add $8, %rsp */
2084 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2085 (VT_INT | VT_UNSIGNED)) {
2086 /* unsigned int to float/double/long double */
2087 o(0x6a); /* push $0 */
2088 g(0x00);
2089 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2090 o(0x242cdf); /* fildll (%rsp) */
2091 o(0x10c48348); /* add $16, %rsp */
2092 } else {
2093 /* int to float/double/long double */
2094 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2095 o(0x2404db); /* fildl (%rsp) */
2096 o(0x08c48348); /* add $8, %rsp */
2098 vtop->r = TREG_ST0;
2099 } else {
2100 int r = get_reg(RC_FLOAT);
2101 gv(RC_INT);
2102 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2103 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2104 (VT_INT | VT_UNSIGNED) ||
2105 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2106 o(0x48); /* REX */
2108 o(0x2a0f);
2109 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2110 vtop->r = r;
2114 /* convert from one floating point type to another */
2115 void gen_cvt_ftof(int t)
2117 int ft, bt, tbt;
2119 ft = vtop->type.t;
2120 bt = ft & VT_BTYPE;
2121 tbt = t & VT_BTYPE;
2123 if (bt == VT_FLOAT) {
2124 gv(RC_FLOAT);
2125 if (tbt == VT_DOUBLE) {
2126 o(0x140f); /* unpcklps */
2127 o(0xc0 + REG_VALUE(vtop->r)*9);
2128 o(0x5a0f); /* cvtps2pd */
2129 o(0xc0 + REG_VALUE(vtop->r)*9);
2130 } else if (tbt == VT_LDOUBLE) {
2131 save_reg(RC_ST0);
2132 /* movss %xmm0,-0x10(%rsp) */
2133 o(0x110ff3);
2134 o(0x44 + REG_VALUE(vtop->r)*8);
2135 o(0xf024);
2136 o(0xf02444d9); /* flds -0x10(%rsp) */
2137 vtop->r = TREG_ST0;
2139 } else if (bt == VT_DOUBLE) {
2140 gv(RC_FLOAT);
2141 if (tbt == VT_FLOAT) {
2142 o(0x140f66); /* unpcklpd */
2143 o(0xc0 + REG_VALUE(vtop->r)*9);
2144 o(0x5a0f66); /* cvtpd2ps */
2145 o(0xc0 + REG_VALUE(vtop->r)*9);
2146 } else if (tbt == VT_LDOUBLE) {
2147 save_reg(RC_ST0);
2148 /* movsd %xmm0,-0x10(%rsp) */
2149 o(0x110ff2);
2150 o(0x44 + REG_VALUE(vtop->r)*8);
2151 o(0xf024);
2152 o(0xf02444dd); /* fldl -0x10(%rsp) */
2153 vtop->r = TREG_ST0;
2155 } else {
2156 int r;
2157 gv(RC_ST0);
2158 r = get_reg(RC_FLOAT);
2159 if (tbt == VT_DOUBLE) {
2160 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2161 /* movsd -0x10(%rsp),%xmm0 */
2162 o(0x100ff2);
2163 o(0x44 + REG_VALUE(r)*8);
2164 o(0xf024);
2165 vtop->r = r;
2166 } else if (tbt == VT_FLOAT) {
2167 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2168 /* movss -0x10(%rsp),%xmm0 */
2169 o(0x100ff3);
2170 o(0x44 + REG_VALUE(r)*8);
2171 o(0xf024);
2172 vtop->r = r;
2177 /* convert fp to int 't' type */
2178 void gen_cvt_ftoi(int t)
2180 int ft, bt, size, r;
2181 ft = vtop->type.t;
2182 bt = ft & VT_BTYPE;
2183 if (bt == VT_LDOUBLE) {
2184 gen_cvt_ftof(VT_DOUBLE);
2185 bt = VT_DOUBLE;
2188 gv(RC_FLOAT);
2189 if (t != VT_INT)
2190 size = 8;
2191 else
2192 size = 4;
2194 r = get_reg(RC_INT);
2195 if (bt == VT_FLOAT) {
2196 o(0xf3);
2197 } else if (bt == VT_DOUBLE) {
2198 o(0xf2);
2199 } else {
2200 assert(0);
2202 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2203 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2204 vtop->r = r;
2207 /* computed goto support */
2208 void ggoto(void)
2210 gcall_or_jmp(1);
2211 vtop--;
2214 /* Save the stack pointer onto the stack and return the location of its address */
2215 ST_FUNC void gen_vla_sp_save(int addr) {
2216 /* mov %rsp,addr(%rbp)*/
2217 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2220 /* Restore the SP from a location on the stack */
2221 ST_FUNC void gen_vla_sp_restore(int addr) {
2222 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2225 #ifdef TCC_TARGET_PE
2226 /* Save result of gen_vla_alloc onto the stack */
2227 ST_FUNC void gen_vla_result(int addr) {
2228 /* mov %rax,addr(%rbp)*/
2229 gen_modrm64(0x89, TREG_RAX, VT_LOCAL, NULL, addr);
2231 #endif
2233 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2234 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2235 #ifdef TCC_TARGET_PE
2236 /* alloca does more than just adjust %rsp on Windows */
2237 vpush_global_sym(&func_old_type, TOK_alloca);
2238 vswap(); /* Move alloca ref past allocation size */
2239 gfunc_call(1);
2240 #else
2241 int r;
2242 r = gv(RC_INT); /* allocation size */
2243 /* sub r,%rsp */
2244 o(0x2b48);
2245 o(0xe0 | REG_VALUE(r));
2246 /* We align to 16 bytes rather than align */
2247 /* and ~15, %rsp */
2248 o(0xf0e48348);
2249 vpop();
2250 #endif
2254 /* end of x86-64 code generator */
2255 /*************************************************************/
2256 #endif /* ! TARGET_DEFS_ONLY */
2257 /******************************************************/