Support -v --help the same way as gcc does
[tinycc.git] / x86_64-gen.c
blob0a5d339b676f89f47e88ec446a43deb9858c405e
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
35 #define RC_RAX 0x0004
36 #define RC_RCX 0x0008
37 #define RC_RDX 0x0010
38 #define RC_ST0 0x0080 /* only for long double */
39 #define RC_R8 0x0100
40 #define RC_R9 0x0200
41 #define RC_R10 0x0400
42 #define RC_R11 0x0800
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_IRE2 RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_FRE2 RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
57 enum {
58 TREG_RAX = 0,
59 TREG_RCX = 1,
60 TREG_RDX = 2,
61 TREG_RSP = 4,
62 TREG_RSI = 6,
63 TREG_RDI = 7,
65 TREG_R8 = 8,
66 TREG_R9 = 9,
67 TREG_R10 = 10,
68 TREG_R11 = 11,
70 TREG_XMM0 = 16,
71 TREG_XMM1 = 17,
72 TREG_XMM2 = 18,
73 TREG_XMM3 = 19,
74 TREG_XMM4 = 20,
75 TREG_XMM5 = 21,
76 TREG_XMM6 = 22,
77 TREG_XMM7 = 23,
79 TREG_ST0 = 24,
81 TREG_MEM = 0x20
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_IRE2 TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_FRE2 TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
97 #define PTR_SIZE 8
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
103 #define MAX_ALIGN 16
105 /* define if return values need to be extended explicitely
106 at caller side (for interfacing with non-TCC compilers) */
107 #define PROMOTE_RET
108 /******************************************************/
109 #else /* ! TARGET_DEFS_ONLY */
110 /******************************************************/
111 #define USING_GLOBALS
112 #include "tcc.h"
113 #include <assert.h>
115 ST_DATA const int reg_classes[NB_REGS] = {
116 /* eax */ RC_INT | RC_RAX,
117 /* ecx */ RC_INT | RC_RCX,
118 /* edx */ RC_INT | RC_RDX,
124 RC_R8,
125 RC_R9,
126 RC_R10,
127 RC_R11,
132 /* xmm0 */ RC_FLOAT | RC_XMM0,
133 /* xmm1 */ RC_FLOAT | RC_XMM1,
134 /* xmm2 */ RC_FLOAT | RC_XMM2,
135 /* xmm3 */ RC_FLOAT | RC_XMM3,
136 /* xmm4 */ RC_FLOAT | RC_XMM4,
137 /* xmm5 */ RC_FLOAT | RC_XMM5,
138 /* xmm6 an xmm7 are included so gv() can be used on them,
139 but they are not tagged with RC_FLOAT because they are
140 callee saved on Windows */
141 RC_XMM6,
142 RC_XMM7,
143 /* st0 */ RC_ST0
146 static unsigned long func_sub_sp_offset;
147 static int func_ret_sub;
149 #if defined(CONFIG_TCC_BCHECK)
150 static addr_t func_bound_offset;
151 static unsigned long func_bound_ind;
152 static int func_bound_add_epilog;
153 #endif
155 #ifdef TCC_TARGET_PE
156 static int func_scratch, func_alloca;
157 #endif
159 /* XXX: make it faster ? */
160 ST_FUNC void g(int c)
162 int ind1;
163 if (nocode_wanted)
164 return;
165 ind1 = ind + 1;
166 if (ind1 > cur_text_section->data_allocated)
167 section_realloc(cur_text_section, ind1);
168 cur_text_section->data[ind] = c;
169 ind = ind1;
172 ST_FUNC void o(unsigned int c)
174 while (c) {
175 g(c);
176 c = c >> 8;
180 ST_FUNC void gen_le16(int v)
182 g(v);
183 g(v >> 8);
186 ST_FUNC void gen_le32(int c)
188 g(c);
189 g(c >> 8);
190 g(c >> 16);
191 g(c >> 24);
194 ST_FUNC void gen_le64(int64_t c)
196 g(c);
197 g(c >> 8);
198 g(c >> 16);
199 g(c >> 24);
200 g(c >> 32);
201 g(c >> 40);
202 g(c >> 48);
203 g(c >> 56);
206 static void orex(int ll, int r, int r2, int b)
208 if ((r & VT_VALMASK) >= VT_CONST)
209 r = 0;
210 if ((r2 & VT_VALMASK) >= VT_CONST)
211 r2 = 0;
212 if (ll || REX_BASE(r) || REX_BASE(r2))
213 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
214 o(b);
217 /* output a symbol and patch all calls to it */
218 ST_FUNC void gsym_addr(int t, int a)
220 while (t) {
221 unsigned char *ptr = cur_text_section->data + t;
222 uint32_t n = read32le(ptr); /* next value */
223 write32le(ptr, a < 0 ? -a : a - t - 4);
224 t = n;
228 static int is64_type(int t)
230 return ((t & VT_BTYPE) == VT_PTR ||
231 (t & VT_BTYPE) == VT_FUNC ||
232 (t & VT_BTYPE) == VT_LLONG);
235 /* instruction + 4 bytes data. Return the address of the data */
236 static int oad(int c, int s)
238 int t;
239 if (nocode_wanted)
240 return s;
241 o(c);
242 t = ind;
243 gen_le32(s);
244 return t;
247 /* generate jmp to a label */
248 #define gjmp2(instr,lbl) oad(instr,lbl)
250 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
252 if (r & VT_SYM)
253 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
254 gen_le32(c);
257 /* output constant with relocation if 'r & VT_SYM' is true */
258 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
260 if (r & VT_SYM)
261 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
262 gen_le64(c);
265 /* output constant with relocation if 'r & VT_SYM' is true */
266 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
268 if (r & VT_SYM)
269 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
270 gen_le32(c-4);
273 /* output got address with relocation */
274 static void gen_gotpcrel(int r, Sym *sym, int c)
276 #ifdef TCC_TARGET_PE
277 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
278 get_tok_str(sym->v, NULL), c, r,
279 cur_text_section->data[ind-3],
280 cur_text_section->data[ind-2],
281 cur_text_section->data[ind-1]
283 #endif
284 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
285 gen_le32(0);
286 if (c) {
287 /* we use add c, %xxx for displacement */
288 orex(1, r, 0, 0x81);
289 o(0xc0 + REG_VALUE(r));
290 gen_le32(c);
294 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
296 op_reg = REG_VALUE(op_reg) << 3;
297 if ((r & VT_VALMASK) == VT_CONST) {
298 /* constant memory reference */
299 if (!(r & VT_SYM)) {
300 /* Absolute memory reference */
301 o(0x04 | op_reg); /* [sib] | destreg */
302 oad(0x25, c); /* disp32 */
303 } else {
304 o(0x05 | op_reg); /* (%rip)+disp32 | destreg */
305 if (is_got) {
306 gen_gotpcrel(r, sym, c);
307 } else {
308 gen_addrpc32(r, sym, c);
311 } else if ((r & VT_VALMASK) == VT_LOCAL) {
312 /* currently, we use only ebp as base */
313 if (c == (char)c) {
314 /* short reference */
315 o(0x45 | op_reg);
316 g(c);
317 } else {
318 oad(0x85 | op_reg, c);
320 } else if ((r & VT_VALMASK) >= TREG_MEM) {
321 if (c) {
322 g(0x80 | op_reg | REG_VALUE(r));
323 gen_le32(c);
324 } else {
325 g(0x00 | op_reg | REG_VALUE(r));
327 } else {
328 g(0x00 | op_reg | REG_VALUE(r));
332 /* generate a modrm reference. 'op_reg' contains the additional 3
333 opcode bits */
334 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
336 gen_modrm_impl(op_reg, r, sym, c, 0);
339 /* generate a modrm reference. 'op_reg' contains the additional 3
340 opcode bits */
341 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
343 int is_got;
344 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
345 orex(1, r, op_reg, opcode);
346 gen_modrm_impl(op_reg, r, sym, c, is_got);
350 /* load 'r' from value 'sv' */
351 void load(int r, SValue *sv)
353 int v, t, ft, fc, fr;
354 SValue v1;
356 #ifdef TCC_TARGET_PE
357 SValue v2;
358 sv = pe_getimport(sv, &v2);
359 #endif
361 fr = sv->r;
362 ft = sv->type.t & ~VT_DEFSIGN;
363 fc = sv->c.i;
364 if (fc != sv->c.i && (fr & VT_SYM))
365 tcc_error("64 bit addend in load");
367 ft &= ~(VT_VOLATILE | VT_CONSTANT);
369 #ifndef TCC_TARGET_PE
370 /* we use indirect access via got */
371 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
372 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
373 /* use the result register as a temporal register */
374 int tr = r | TREG_MEM;
375 if (is_float(ft)) {
376 /* we cannot use float registers as a temporal register */
377 tr = get_reg(RC_INT) | TREG_MEM;
379 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
381 /* load from the temporal register */
382 fr = tr | VT_LVAL;
384 #endif
386 v = fr & VT_VALMASK;
387 if (fr & VT_LVAL) {
388 int b, ll;
389 if (v == VT_LLOCAL) {
390 v1.type.t = VT_PTR;
391 v1.r = VT_LOCAL | VT_LVAL;
392 v1.c.i = fc;
393 fr = r;
394 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
395 fr = get_reg(RC_INT);
396 load(fr, &v1);
398 if (fc != sv->c.i) {
399 /* If the addends doesn't fit into a 32bit signed
400 we must use a 64bit move. We've checked above
401 that this doesn't have a sym associated. */
402 v1.type.t = VT_LLONG;
403 v1.r = VT_CONST;
404 v1.c.i = sv->c.i;
405 fr = r;
406 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
407 fr = get_reg(RC_INT);
408 load(fr, &v1);
409 fc = 0;
411 ll = 0;
412 /* Like GCC we can load from small enough properly sized
413 structs and unions as well.
414 XXX maybe move to generic operand handling, but should
415 occur only with asm, so tccasm.c might also be a better place */
416 if ((ft & VT_BTYPE) == VT_STRUCT) {
417 int align;
418 switch (type_size(&sv->type, &align)) {
419 case 1: ft = VT_BYTE; break;
420 case 2: ft = VT_SHORT; break;
421 case 4: ft = VT_INT; break;
422 case 8: ft = VT_LLONG; break;
423 default:
424 tcc_error("invalid aggregate type for register load");
425 break;
428 if ((ft & VT_BTYPE) == VT_FLOAT) {
429 b = 0x6e0f66;
430 r = REG_VALUE(r); /* movd */
431 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
432 b = 0x7e0ff3; /* movq */
433 r = REG_VALUE(r);
434 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
435 b = 0xdb, r = 5; /* fldt */
436 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
437 b = 0xbe0f; /* movsbl */
438 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
439 b = 0xb60f; /* movzbl */
440 } else if ((ft & VT_TYPE) == VT_SHORT) {
441 b = 0xbf0f; /* movswl */
442 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
443 b = 0xb70f; /* movzwl */
444 } else {
445 assert(((ft & VT_BTYPE) == VT_INT)
446 || ((ft & VT_BTYPE) == VT_LLONG)
447 || ((ft & VT_BTYPE) == VT_PTR)
448 || ((ft & VT_BTYPE) == VT_FUNC)
450 ll = is64_type(ft);
451 b = 0x8b;
453 if (ll) {
454 gen_modrm64(b, r, fr, sv->sym, fc);
455 } else {
456 orex(ll, fr, r, b);
457 gen_modrm(r, fr, sv->sym, fc);
459 } else {
460 if (v == VT_CONST) {
461 if (fr & VT_SYM) {
462 #ifdef TCC_TARGET_PE
463 orex(1,0,r,0x8d);
464 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
465 gen_addrpc32(fr, sv->sym, fc);
466 #else
467 if (sv->sym->type.t & VT_STATIC) {
468 orex(1,0,r,0x8d);
469 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
470 gen_addrpc32(fr, sv->sym, fc);
471 } else {
472 orex(1,0,r,0x8b);
473 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
474 gen_gotpcrel(r, sv->sym, fc);
476 #endif
477 } else if (is64_type(ft)) {
478 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
479 gen_le64(sv->c.i);
480 } else {
481 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
482 gen_le32(fc);
484 } else if (v == VT_LOCAL) {
485 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
486 gen_modrm(r, VT_LOCAL, sv->sym, fc);
487 } else if (v == VT_CMP) {
488 if (fc & 0x100)
490 v = vtop->cmp_r;
491 fc &= ~0x100;
492 /* This was a float compare. If the parity bit is
493 set the result was unordered, meaning false for everything
494 except TOK_NE, and true for TOK_NE. */
495 orex(0, r, 0, 0xb0 + REG_VALUE(r)); /* mov $0/1,%al */
496 g(v ^ fc ^ (v == TOK_NE));
497 o(0x037a + (REX_BASE(r) << 8));
499 orex(0,r,0, 0x0f); /* setxx %br */
500 o(fc);
501 o(0xc0 + REG_VALUE(r));
502 orex(0,r,0, 0x0f);
503 o(0xc0b6 + REG_VALUE(r) * 0x900); /* movzbl %al, %eax */
504 } else if (v == VT_JMP || v == VT_JMPI) {
505 t = v & 1;
506 orex(0,r,0,0);
507 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
508 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
509 gsym(fc);
510 orex(0,r,0,0);
511 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
512 } else if (v != r) {
513 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
514 if (v == TREG_ST0) {
515 /* gen_cvt_ftof(VT_DOUBLE); */
516 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
517 /* movsd -0x10(%rsp),%xmmN */
518 o(0x100ff2);
519 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
520 o(0xf024);
521 } else {
522 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
523 if ((ft & VT_BTYPE) == VT_FLOAT) {
524 o(0x100ff3);
525 } else {
526 assert((ft & VT_BTYPE) == VT_DOUBLE);
527 o(0x100ff2);
529 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
531 } else if (r == TREG_ST0) {
532 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
533 /* gen_cvt_ftof(VT_LDOUBLE); */
534 /* movsd %xmmN,-0x10(%rsp) */
535 o(0x110ff2);
536 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
537 o(0xf024);
538 o(0xf02444dd); /* fldl -0x10(%rsp) */
539 } else {
540 orex(is64_type(ft), r, v, 0x89);
541 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
547 /* store register 'r' in lvalue 'v' */
548 void store(int r, SValue *v)
550 int fr, bt, ft, fc;
551 int op64 = 0;
552 /* store the REX prefix in this variable when PIC is enabled */
553 int pic = 0;
555 #ifdef TCC_TARGET_PE
556 SValue v2;
557 v = pe_getimport(v, &v2);
558 #endif
560 fr = v->r & VT_VALMASK;
561 ft = v->type.t;
562 fc = v->c.i;
563 if (fc != v->c.i && (fr & VT_SYM))
564 tcc_error("64 bit addend in store");
565 ft &= ~(VT_VOLATILE | VT_CONSTANT);
566 bt = ft & VT_BTYPE;
568 #ifndef TCC_TARGET_PE
569 /* we need to access the variable via got */
570 if (fr == VT_CONST && (v->r & VT_SYM)) {
571 /* mov xx(%rip), %r11 */
572 o(0x1d8b4c);
573 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
574 pic = is64_type(bt) ? 0x49 : 0x41;
576 #endif
578 /* XXX: incorrect if float reg to reg */
579 if (bt == VT_FLOAT) {
580 o(0x66);
581 o(pic);
582 o(0x7e0f); /* movd */
583 r = REG_VALUE(r);
584 } else if (bt == VT_DOUBLE) {
585 o(0x66);
586 o(pic);
587 o(0xd60f); /* movq */
588 r = REG_VALUE(r);
589 } else if (bt == VT_LDOUBLE) {
590 o(0xc0d9); /* fld %st(0) */
591 o(pic);
592 o(0xdb); /* fstpt */
593 r = 7;
594 } else {
595 if (bt == VT_SHORT)
596 o(0x66);
597 o(pic);
598 if (bt == VT_BYTE || bt == VT_BOOL)
599 orex(0, 0, r, 0x88);
600 else if (is64_type(bt))
601 op64 = 0x89;
602 else
603 orex(0, 0, r, 0x89);
605 if (pic) {
606 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
607 if (op64)
608 o(op64);
609 o(3 + (r << 3));
610 } else if (op64) {
611 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
612 gen_modrm64(op64, r, v->r, v->sym, fc);
613 } else if (fr != r) {
614 orex(1, fr, r, op64);
615 o(0xc0 + fr + r * 8); /* mov r, fr */
617 } else {
618 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
619 gen_modrm(r, v->r, v->sym, fc);
620 } else if (fr != r) {
621 o(0xc0 + fr + r * 8); /* mov r, fr */
626 /* 'is_jmp' is '1' if it is a jump */
627 static void gcall_or_jmp(int is_jmp)
629 int r;
630 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
631 ((vtop->r & VT_SYM) && (vtop->c.i-4) == (int)(vtop->c.i-4))) {
632 /* constant symbolic case -> simple relocation */
633 #ifdef TCC_TARGET_PE
634 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
635 #else
636 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
637 #endif
638 oad(0xe8 + is_jmp, 0); /* call/jmp im */
639 #ifdef CONFIG_TCC_BCHECK
640 if (tcc_state->do_bounds_check &&
641 (vtop->sym->v == TOK_alloca ||
642 vtop->sym->v == TOK_setjmp ||
643 vtop->sym->v == TOK__setjmp
644 #ifndef TCC_TARGET_PE
645 || vtop->sym->v == TOK_sigsetjmp
646 || vtop->sym->v == TOK___sigsetjmp
647 #endif
649 func_bound_add_epilog = 1;
650 #endif
651 } else {
652 /* otherwise, indirect call */
653 r = TREG_R11;
654 load(r, vtop);
655 o(0x41); /* REX */
656 o(0xff); /* call/jmp *r */
657 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
661 #if defined(CONFIG_TCC_BCHECK)
663 static void gen_bounds_call(int v)
665 Sym *sym = external_global_sym(v, &func_old_type);
666 oad(0xe8, 0);
667 #ifdef TCC_TARGET_PE
668 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
669 #else
670 greloca(cur_text_section, sym, ind-4, R_X86_64_PLT32, -4);
671 #endif
674 /* generate a bounded pointer addition */
675 ST_FUNC void gen_bounded_ptr_add(void)
677 vpush_global_sym(&func_old_type, TOK___bound_ptr_add);
678 vrott(3);
679 gfunc_call(2);
680 vpushi(0);
681 /* returned pointer is in rax */
682 vtop->r = TREG_RAX | VT_BOUNDED;
683 if (nocode_wanted)
684 return;
685 /* relocation offset of the bounding function call point */
686 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
689 /* patch pointer addition in vtop so that pointer dereferencing is
690 also tested */
691 ST_FUNC void gen_bounded_ptr_deref(void)
693 addr_t func;
694 int size, align;
695 ElfW(Rela) *rel;
696 Sym *sym;
698 if (nocode_wanted)
699 return;
701 size = type_size(&vtop->type, &align);
702 switch(size) {
703 case 1: func = TOK___bound_ptr_indir1; break;
704 case 2: func = TOK___bound_ptr_indir2; break;
705 case 4: func = TOK___bound_ptr_indir4; break;
706 case 8: func = TOK___bound_ptr_indir8; break;
707 case 12: func = TOK___bound_ptr_indir12; break;
708 case 16: func = TOK___bound_ptr_indir16; break;
709 default:
710 /* may happen with struct member access */
711 return;
712 //tcc_error("unhandled size when dereferencing bounded pointer");
713 //func = 0;
714 //break;
716 sym = external_global_sym(func, &func_old_type);
717 if (!sym->c)
718 put_extern_sym(sym, NULL, 0, 0);
719 /* patch relocation */
720 /* XXX: find a better solution ? */
721 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
722 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
725 #ifdef TCC_TARGET_PE
726 # define TREG_FASTCALL_1 TREG_RCX
727 #else
728 # define TREG_FASTCALL_1 TREG_RDI
729 #endif
731 static void gen_bounds_prolog(void)
733 /* leave some room for bound checking code */
734 func_bound_offset = lbounds_section->data_offset;
735 func_bound_ind = ind;
736 func_bound_add_epilog = 0;
737 o(0xb848 + TREG_FASTCALL_1 * 0x100); /*lbound section pointer */
738 gen_le64 (0);
739 oad(0xb8, 0); /* call to function */
742 static void gen_bounds_epilog(void)
744 addr_t saved_ind;
745 addr_t *bounds_ptr;
746 Sym *sym_data;
748 if (func_bound_offset == lbounds_section->data_offset && !func_bound_add_epilog)
749 return;
751 /* add end of table info */
752 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
753 *bounds_ptr = 0;
755 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
756 func_bound_offset, lbounds_section->data_offset);
758 /* generate bound local allocation */
759 if (func_bound_offset != lbounds_section->data_offset) {
760 saved_ind = ind;
761 ind = func_bound_ind;
762 greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
763 ind = ind + 10;
764 gen_bounds_call(TOK___bound_local_new);
765 ind = saved_ind;
768 /* generate bound check local freeing */
769 o(0x5250); /* save returned value, if any */
770 greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
771 o(0xb848 + TREG_FASTCALL_1 * 0x100); /* mov xxx, %rcx/di */
772 gen_le64 (0);
773 gen_bounds_call(TOK___bound_local_delete);
774 o(0x585a); /* restore returned value, if any */
776 #endif
778 #ifdef TCC_TARGET_PE
780 #define REGN 4
781 static const uint8_t arg_regs[REGN] = {
782 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
785 /* Prepare arguments in R10 and R11 rather than RCX and RDX
786 because gv() will not ever use these */
787 static int arg_prepare_reg(int idx) {
788 if (idx == 0 || idx == 1)
789 /* idx=0: r10, idx=1: r11 */
790 return idx + 10;
791 else
792 return arg_regs[idx];
795 /* Generate function call. The function address is pushed first, then
796 all the parameters in call order. This functions pops all the
797 parameters and the function address. */
799 static void gen_offs_sp(int b, int r, int d)
801 orex(1,0,r & 0x100 ? 0 : r, b);
802 if (d == (char)d) {
803 o(0x2444 | (REG_VALUE(r) << 3));
804 g(d);
805 } else {
806 o(0x2484 | (REG_VALUE(r) << 3));
807 gen_le32(d);
811 static int using_regs(int size)
813 return !(size > 8 || (size & (size - 1)));
816 /* Return the number of registers needed to return the struct, or 0 if
817 returning via struct pointer. */
818 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
820 int size, align;
821 *ret_align = 1; // Never have to re-align return values for x86-64
822 *regsize = 8;
823 size = type_size(vt, &align);
824 if (!using_regs(size))
825 return 0;
826 if (size == 8)
827 ret->t = VT_LLONG;
828 else if (size == 4)
829 ret->t = VT_INT;
830 else if (size == 2)
831 ret->t = VT_SHORT;
832 else
833 ret->t = VT_BYTE;
834 ret->ref = NULL;
835 return 1;
838 static int is_sse_float(int t) {
839 int bt;
840 bt = t & VT_BTYPE;
841 return bt == VT_DOUBLE || bt == VT_FLOAT;
844 static int gfunc_arg_size(CType *type) {
845 int align;
846 if (type->t & (VT_ARRAY|VT_BITFIELD))
847 return 8;
848 return type_size(type, &align);
851 void gfunc_call(int nb_args)
853 int size, r, args_size, i, d, bt, struct_size;
854 int arg;
856 #ifdef CONFIG_TCC_BCHECK
857 if (tcc_state->do_bounds_check)
858 gbound_args(nb_args);
859 #endif
861 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
862 arg = nb_args;
864 /* for struct arguments, we need to call memcpy and the function
865 call breaks register passing arguments we are preparing.
866 So, we process arguments which will be passed by stack first. */
867 struct_size = args_size;
868 for(i = 0; i < nb_args; i++) {
869 SValue *sv;
871 --arg;
872 sv = &vtop[-i];
873 bt = (sv->type.t & VT_BTYPE);
874 size = gfunc_arg_size(&sv->type);
876 if (using_regs(size))
877 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
879 if (bt == VT_STRUCT) {
880 /* align to stack align size */
881 size = (size + 15) & ~15;
882 /* generate structure store */
883 r = get_reg(RC_INT);
884 gen_offs_sp(0x8d, r, struct_size);
885 struct_size += size;
887 /* generate memcpy call */
888 vset(&sv->type, r | VT_LVAL, 0);
889 vpushv(sv);
890 vstore();
891 --vtop;
892 } else if (bt == VT_LDOUBLE) {
893 gv(RC_ST0);
894 gen_offs_sp(0xdb, 0x107, struct_size);
895 struct_size += 16;
899 if (func_scratch < struct_size)
900 func_scratch = struct_size;
902 arg = nb_args;
903 struct_size = args_size;
905 for(i = 0; i < nb_args; i++) {
906 --arg;
907 bt = (vtop->type.t & VT_BTYPE);
909 size = gfunc_arg_size(&vtop->type);
910 if (!using_regs(size)) {
911 /* align to stack align size */
912 size = (size + 15) & ~15;
913 if (arg >= REGN) {
914 d = get_reg(RC_INT);
915 gen_offs_sp(0x8d, d, struct_size);
916 gen_offs_sp(0x89, d, arg*8);
917 } else {
918 d = arg_prepare_reg(arg);
919 gen_offs_sp(0x8d, d, struct_size);
921 struct_size += size;
922 } else {
923 if (is_sse_float(vtop->type.t)) {
924 if (tcc_state->nosse)
925 tcc_error("SSE disabled");
926 if (arg >= REGN) {
927 gv(RC_XMM0);
928 /* movq %xmm0, j*8(%rsp) */
929 gen_offs_sp(0xd60f66, 0x100, arg*8);
930 } else {
931 /* Load directly to xmmN register */
932 gv(RC_XMM0 << arg);
933 d = arg_prepare_reg(arg);
934 /* mov %xmmN, %rxx */
935 o(0x66);
936 orex(1,d,0, 0x7e0f);
937 o(0xc0 + arg*8 + REG_VALUE(d));
939 } else {
940 if (bt == VT_STRUCT) {
941 vtop->type.ref = NULL;
942 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
943 : size > 1 ? VT_SHORT : VT_BYTE;
946 r = gv(RC_INT);
947 if (arg >= REGN) {
948 gen_offs_sp(0x89, r, arg*8);
949 } else {
950 d = arg_prepare_reg(arg);
951 orex(1,d,r,0x89); /* mov */
952 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
956 vtop--;
958 save_regs(0);
959 /* Copy R10 and R11 into RCX and RDX, respectively */
960 if (nb_args > 0) {
961 o(0xd1894c); /* mov %r10, %rcx */
962 if (nb_args > 1) {
963 o(0xda894c); /* mov %r11, %rdx */
967 gcall_or_jmp(0);
969 if ((vtop->r & VT_SYM) && vtop->sym->v == TOK_alloca) {
970 /* need to add the "func_scratch" area after alloca */
971 o(0x48); func_alloca = oad(0x05, func_alloca); /* add $NN, %rax */
972 #ifdef CONFIG_TCC_BCHECK
973 if (tcc_state->do_bounds_check)
974 gen_bounds_call(TOK___bound_alloca_nr); /* new region */
975 #endif
977 vtop--;
981 #define FUNC_PROLOG_SIZE 11
983 /* generate function prolog of type 't' */
984 void gfunc_prolog(Sym *func_sym)
986 CType *func_type = &func_sym->type;
987 int addr, reg_param_index, bt, size;
988 Sym *sym;
989 CType *type;
991 func_ret_sub = 0;
992 func_scratch = 32;
993 func_alloca = 0;
994 loc = 0;
996 addr = PTR_SIZE * 2;
997 ind += FUNC_PROLOG_SIZE;
998 func_sub_sp_offset = ind;
999 reg_param_index = 0;
1001 sym = func_type->ref;
1003 /* if the function returns a structure, then add an
1004 implicit pointer parameter */
1005 size = gfunc_arg_size(&func_vt);
1006 if (!using_regs(size)) {
1007 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1008 func_vc = addr;
1009 reg_param_index++;
1010 addr += 8;
1013 /* define parameters */
1014 while ((sym = sym->next) != NULL) {
1015 type = &sym->type;
1016 bt = type->t & VT_BTYPE;
1017 size = gfunc_arg_size(type);
1018 if (!using_regs(size)) {
1019 if (reg_param_index < REGN) {
1020 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1022 sym_push(sym->v & ~SYM_FIELD, type,
1023 VT_LLOCAL | VT_LVAL, addr);
1024 } else {
1025 if (reg_param_index < REGN) {
1026 /* save arguments passed by register */
1027 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
1028 if (tcc_state->nosse)
1029 tcc_error("SSE disabled");
1030 o(0xd60f66); /* movq */
1031 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
1032 } else {
1033 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1036 sym_push(sym->v & ~SYM_FIELD, type,
1037 VT_LOCAL | VT_LVAL, addr);
1039 addr += 8;
1040 reg_param_index++;
1043 while (reg_param_index < REGN) {
1044 if (func_var) {
1045 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1046 addr += 8;
1048 reg_param_index++;
1050 #ifdef CONFIG_TCC_BCHECK
1051 if (tcc_state->do_bounds_check)
1052 gen_bounds_prolog();
1053 #endif
1056 /* generate function epilog */
1057 void gfunc_epilog(void)
1059 int v, saved_ind;
1061 /* align local size to word & save local variables */
1062 func_scratch = (func_scratch + 15) & -16;
1063 loc = (loc & -16) - func_scratch;
1065 #ifdef CONFIG_TCC_BCHECK
1066 if (tcc_state->do_bounds_check)
1067 gen_bounds_epilog();
1068 #endif
1070 o(0xc9); /* leave */
1071 if (func_ret_sub == 0) {
1072 o(0xc3); /* ret */
1073 } else {
1074 o(0xc2); /* ret n */
1075 g(func_ret_sub);
1076 g(func_ret_sub >> 8);
1079 saved_ind = ind;
1080 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1081 v = -loc;
1083 if (v >= 4096) {
1084 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type);
1085 oad(0xb8, v); /* mov stacksize, %eax */
1086 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1087 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1088 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1089 } else {
1090 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1091 o(0xec8148); /* sub rsp, stacksize */
1092 gen_le32(v);
1095 /* add the "func_scratch" area after each alloca seen */
1096 gsym_addr(func_alloca, -func_scratch);
1098 cur_text_section->data_offset = saved_ind;
1099 pe_add_unwind_data(ind, saved_ind, v);
1100 ind = cur_text_section->data_offset;
1103 #else
1105 static void gadd_sp(int val)
1107 if (val == (char)val) {
1108 o(0xc48348);
1109 g(val);
1110 } else {
1111 oad(0xc48148, val); /* add $xxx, %rsp */
1115 typedef enum X86_64_Mode {
1116 x86_64_mode_none,
1117 x86_64_mode_memory,
1118 x86_64_mode_integer,
1119 x86_64_mode_sse,
1120 x86_64_mode_x87
1121 } X86_64_Mode;
1123 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1125 if (a == b)
1126 return a;
1127 else if (a == x86_64_mode_none)
1128 return b;
1129 else if (b == x86_64_mode_none)
1130 return a;
1131 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1132 return x86_64_mode_memory;
1133 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1134 return x86_64_mode_integer;
1135 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1136 return x86_64_mode_memory;
1137 else
1138 return x86_64_mode_sse;
1141 static X86_64_Mode classify_x86_64_inner(CType *ty)
1143 X86_64_Mode mode;
1144 Sym *f;
1146 switch (ty->t & VT_BTYPE) {
1147 case VT_VOID: return x86_64_mode_none;
1149 case VT_INT:
1150 case VT_BYTE:
1151 case VT_SHORT:
1152 case VT_LLONG:
1153 case VT_BOOL:
1154 case VT_PTR:
1155 case VT_FUNC:
1156 return x86_64_mode_integer;
1158 case VT_FLOAT:
1159 case VT_DOUBLE: return x86_64_mode_sse;
1161 case VT_LDOUBLE: return x86_64_mode_x87;
1163 case VT_STRUCT:
1164 f = ty->ref;
1166 mode = x86_64_mode_none;
1167 for (f = f->next; f; f = f->next)
1168 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1170 return mode;
1172 assert(0);
1173 return 0;
1176 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1178 X86_64_Mode mode;
1179 int size, align, ret_t = 0;
1181 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1182 *psize = 8;
1183 *palign = 8;
1184 *reg_count = 1;
1185 ret_t = ty->t;
1186 mode = x86_64_mode_integer;
1187 } else {
1188 size = type_size(ty, &align);
1189 *psize = (size + 7) & ~7;
1190 *palign = (align + 7) & ~7;
1192 if (size > 16) {
1193 mode = x86_64_mode_memory;
1194 } else {
1195 mode = classify_x86_64_inner(ty);
1196 switch (mode) {
1197 case x86_64_mode_integer:
1198 if (size > 8) {
1199 *reg_count = 2;
1200 ret_t = VT_QLONG;
1201 } else {
1202 *reg_count = 1;
1203 if (size > 4)
1204 ret_t = VT_LLONG;
1205 else if (size > 2)
1206 ret_t = VT_INT;
1207 else if (size > 1)
1208 ret_t = VT_SHORT;
1209 else
1210 ret_t = VT_BYTE;
1211 if ((ty->t & VT_BTYPE) == VT_STRUCT || (ty->t & VT_UNSIGNED))
1212 ret_t |= VT_UNSIGNED;
1214 break;
1216 case x86_64_mode_x87:
1217 *reg_count = 1;
1218 ret_t = VT_LDOUBLE;
1219 break;
1221 case x86_64_mode_sse:
1222 if (size > 8) {
1223 *reg_count = 2;
1224 ret_t = VT_QFLOAT;
1225 } else {
1226 *reg_count = 1;
1227 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1229 break;
1230 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1235 if (ret) {
1236 ret->ref = NULL;
1237 ret->t = ret_t;
1240 return mode;
1243 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1245 /* This definition must be synced with stdarg.h */
1246 enum __va_arg_type {
1247 __va_gen_reg, __va_float_reg, __va_stack
1249 int size, align, reg_count;
1250 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1251 switch (mode) {
1252 default: return __va_stack;
1253 case x86_64_mode_integer: return __va_gen_reg;
1254 case x86_64_mode_sse: return __va_float_reg;
1258 /* Return the number of registers needed to return the struct, or 0 if
1259 returning via struct pointer. */
1260 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1262 int size, align, reg_count;
1263 *ret_align = 1; // Never have to re-align return values for x86-64
1264 *regsize = 8;
1265 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1268 #define REGN 6
1269 static const uint8_t arg_regs[REGN] = {
1270 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1273 static int arg_prepare_reg(int idx) {
1274 if (idx == 2 || idx == 3)
1275 /* idx=2: r10, idx=3: r11 */
1276 return idx + 8;
1277 else
1278 return arg_regs[idx];
1281 /* Generate function call. The function address is pushed first, then
1282 all the parameters in call order. This functions pops all the
1283 parameters and the function address. */
1284 void gfunc_call(int nb_args)
1286 X86_64_Mode mode;
1287 CType type;
1288 int size, align, r, args_size, stack_adjust, i, reg_count;
1289 int nb_reg_args = 0;
1290 int nb_sse_args = 0;
1291 int sse_reg, gen_reg;
1292 char _onstack[nb_args ? nb_args : 1], *onstack = _onstack;
1294 #ifdef CONFIG_TCC_BCHECK
1295 if (tcc_state->do_bounds_check)
1296 gbound_args(nb_args);
1297 #endif
1299 /* calculate the number of integer/float register arguments, remember
1300 arguments to be passed via stack (in onstack[]), and also remember
1301 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1302 to be done in a left-to-right pass over arguments. */
1303 stack_adjust = 0;
1304 for(i = nb_args - 1; i >= 0; i--) {
1305 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1306 if (mode == x86_64_mode_sse && nb_sse_args + reg_count <= 8) {
1307 nb_sse_args += reg_count;
1308 onstack[i] = 0;
1309 } else if (mode == x86_64_mode_integer && nb_reg_args + reg_count <= REGN) {
1310 nb_reg_args += reg_count;
1311 onstack[i] = 0;
1312 } else if (mode == x86_64_mode_none) {
1313 onstack[i] = 0;
1314 } else {
1315 if (align == 16 && (stack_adjust &= 15)) {
1316 onstack[i] = 2;
1317 stack_adjust = 0;
1318 } else
1319 onstack[i] = 1;
1320 stack_adjust += size;
1324 if (nb_sse_args && tcc_state->nosse)
1325 tcc_error("SSE disabled but floating point arguments passed");
1327 /* fetch cpu flag before generating any code */
1328 if ((vtop->r & VT_VALMASK) == VT_CMP)
1329 gv(RC_INT);
1331 /* for struct arguments, we need to call memcpy and the function
1332 call breaks register passing arguments we are preparing.
1333 So, we process arguments which will be passed by stack first. */
1334 gen_reg = nb_reg_args;
1335 sse_reg = nb_sse_args;
1336 args_size = 0;
1337 stack_adjust &= 15;
1338 for (i = 0; i < nb_args;) {
1339 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1340 if (!onstack[i]) {
1341 ++i;
1342 continue;
1344 /* Possibly adjust stack to align SSE boundary. We're processing
1345 args from right to left while allocating happens left to right
1346 (stack grows down), so the adjustment needs to happen _after_
1347 an argument that requires it. */
1348 if (stack_adjust) {
1349 o(0x50); /* push %rax; aka sub $8,%rsp */
1350 args_size += 8;
1351 stack_adjust = 0;
1353 if (onstack[i] == 2)
1354 stack_adjust = 1;
1356 vrotb(i+1);
1358 switch (vtop->type.t & VT_BTYPE) {
1359 case VT_STRUCT:
1360 /* allocate the necessary size on stack */
1361 o(0x48);
1362 oad(0xec81, size); /* sub $xxx, %rsp */
1363 /* generate structure store */
1364 r = get_reg(RC_INT);
1365 orex(1, r, 0, 0x89); /* mov %rsp, r */
1366 o(0xe0 + REG_VALUE(r));
1367 vset(&vtop->type, r | VT_LVAL, 0);
1368 vswap();
1369 vstore();
1370 break;
1372 case VT_LDOUBLE:
1373 gv(RC_ST0);
1374 oad(0xec8148, size); /* sub $xxx, %rsp */
1375 o(0x7cdb); /* fstpt 0(%rsp) */
1376 g(0x24);
1377 g(0x00);
1378 break;
1380 case VT_FLOAT:
1381 case VT_DOUBLE:
1382 assert(mode == x86_64_mode_sse);
1383 r = gv(RC_FLOAT);
1384 o(0x50); /* push $rax */
1385 /* movq %xmmN, (%rsp) */
1386 o(0xd60f66);
1387 o(0x04 + REG_VALUE(r)*8);
1388 o(0x24);
1389 break;
1391 default:
1392 assert(mode == x86_64_mode_integer);
1393 /* simple type */
1394 /* XXX: implicit cast ? */
1395 r = gv(RC_INT);
1396 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1397 break;
1399 args_size += size;
1401 vpop();
1402 --nb_args;
1403 onstack++;
1406 /* XXX This should be superfluous. */
1407 save_regs(0); /* save used temporary registers */
1409 /* then, we prepare register passing arguments.
1410 Note that we cannot set RDX and RCX in this loop because gv()
1411 may break these temporary registers. Let's use R10 and R11
1412 instead of them */
1413 assert(gen_reg <= REGN);
1414 assert(sse_reg <= 8);
1415 for(i = 0; i < nb_args; i++) {
1416 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1417 /* Alter stack entry type so that gv() knows how to treat it */
1418 vtop->type = type;
1419 if (mode == x86_64_mode_sse) {
1420 if (reg_count == 2) {
1421 sse_reg -= 2;
1422 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1423 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1424 /* movaps %xmm1, %xmmN */
1425 o(0x280f);
1426 o(0xc1 + ((sse_reg+1) << 3));
1427 /* movaps %xmm0, %xmmN */
1428 o(0x280f);
1429 o(0xc0 + (sse_reg << 3));
1431 } else {
1432 assert(reg_count == 1);
1433 --sse_reg;
1434 /* Load directly to register */
1435 gv(RC_XMM0 << sse_reg);
1437 } else if (mode == x86_64_mode_integer) {
1438 /* simple type */
1439 /* XXX: implicit cast ? */
1440 int d;
1441 gen_reg -= reg_count;
1442 r = gv(RC_INT);
1443 d = arg_prepare_reg(gen_reg);
1444 orex(1,d,r,0x89); /* mov */
1445 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1446 if (reg_count == 2) {
1447 d = arg_prepare_reg(gen_reg+1);
1448 orex(1,d,vtop->r2,0x89); /* mov */
1449 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1452 vtop--;
1454 assert(gen_reg == 0);
1455 assert(sse_reg == 0);
1457 /* We shouldn't have many operands on the stack anymore, but the
1458 call address itself is still there, and it might be in %eax
1459 (or edx/ecx) currently, which the below writes would clobber.
1460 So evict all remaining operands here. */
1461 save_regs(0);
1463 /* Copy R10 and R11 into RDX and RCX, respectively */
1464 if (nb_reg_args > 2) {
1465 o(0xd2894c); /* mov %r10, %rdx */
1466 if (nb_reg_args > 3) {
1467 o(0xd9894c); /* mov %r11, %rcx */
1471 if (vtop->type.ref->f.func_type != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1472 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1473 gcall_or_jmp(0);
1474 if (args_size)
1475 gadd_sp(args_size);
1476 vtop--;
1479 #define FUNC_PROLOG_SIZE 11
1481 static void push_arg_reg(int i) {
1482 loc -= 8;
1483 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1486 /* generate function prolog of type 't' */
1487 void gfunc_prolog(Sym *func_sym)
1489 CType *func_type = &func_sym->type;
1490 X86_64_Mode mode;
1491 int i, addr, align, size, reg_count;
1492 int param_addr = 0, reg_param_index, sse_param_index;
1493 Sym *sym;
1494 CType *type;
1496 sym = func_type->ref;
1497 addr = PTR_SIZE * 2;
1498 loc = 0;
1499 ind += FUNC_PROLOG_SIZE;
1500 func_sub_sp_offset = ind;
1501 func_ret_sub = 0;
1503 if (func_var) {
1504 int seen_reg_num, seen_sse_num, seen_stack_size;
1505 seen_reg_num = seen_sse_num = 0;
1506 /* frame pointer and return address */
1507 seen_stack_size = PTR_SIZE * 2;
1508 /* count the number of seen parameters */
1509 sym = func_type->ref;
1510 while ((sym = sym->next) != NULL) {
1511 type = &sym->type;
1512 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1513 switch (mode) {
1514 default:
1515 stack_arg:
1516 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1517 break;
1519 case x86_64_mode_integer:
1520 if (seen_reg_num + reg_count > REGN)
1521 goto stack_arg;
1522 seen_reg_num += reg_count;
1523 break;
1525 case x86_64_mode_sse:
1526 if (seen_sse_num + reg_count > 8)
1527 goto stack_arg;
1528 seen_sse_num += reg_count;
1529 break;
1533 loc -= 24;
1534 /* movl $0x????????, -0x18(%rbp) */
1535 o(0xe845c7);
1536 gen_le32(seen_reg_num * 8);
1537 /* movl $0x????????, -0x14(%rbp) */
1538 o(0xec45c7);
1539 gen_le32(seen_sse_num * 16 + 48);
1540 /* leaq $0x????????, %r11 */
1541 o(0x9d8d4c);
1542 gen_le32(seen_stack_size);
1543 /* movq %r11, -0x10(%rbp) */
1544 o(0xf05d894c);
1545 /* leaq $-192(%rbp), %r11 */
1546 o(0x9d8d4c);
1547 gen_le32(-176 - 24);
1548 /* movq %r11, -0x8(%rbp) */
1549 o(0xf85d894c);
1551 /* save all register passing arguments */
1552 for (i = 0; i < 8; i++) {
1553 loc -= 16;
1554 if (!tcc_state->nosse) {
1555 o(0xd60f66); /* movq */
1556 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1558 /* movq $0, loc+8(%rbp) */
1559 o(0x85c748);
1560 gen_le32(loc + 8);
1561 gen_le32(0);
1563 for (i = 0; i < REGN; i++) {
1564 push_arg_reg(REGN-1-i);
1568 sym = func_type->ref;
1569 reg_param_index = 0;
1570 sse_param_index = 0;
1572 /* if the function returns a structure, then add an
1573 implicit pointer parameter */
1574 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1575 if (mode == x86_64_mode_memory) {
1576 push_arg_reg(reg_param_index);
1577 func_vc = loc;
1578 reg_param_index++;
1580 /* define parameters */
1581 while ((sym = sym->next) != NULL) {
1582 type = &sym->type;
1583 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1584 switch (mode) {
1585 case x86_64_mode_sse:
1586 if (tcc_state->nosse)
1587 tcc_error("SSE disabled but floating point arguments used");
1588 if (sse_param_index + reg_count <= 8) {
1589 /* save arguments passed by register */
1590 loc -= reg_count * 8;
1591 param_addr = loc;
1592 for (i = 0; i < reg_count; ++i) {
1593 o(0xd60f66); /* movq */
1594 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1595 ++sse_param_index;
1597 } else {
1598 addr = (addr + align - 1) & -align;
1599 param_addr = addr;
1600 addr += size;
1602 break;
1604 case x86_64_mode_memory:
1605 case x86_64_mode_x87:
1606 addr = (addr + align - 1) & -align;
1607 param_addr = addr;
1608 addr += size;
1609 break;
1611 case x86_64_mode_integer: {
1612 if (reg_param_index + reg_count <= REGN) {
1613 /* save arguments passed by register */
1614 loc -= reg_count * 8;
1615 param_addr = loc;
1616 for (i = 0; i < reg_count; ++i) {
1617 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1618 ++reg_param_index;
1620 } else {
1621 addr = (addr + align - 1) & -align;
1622 param_addr = addr;
1623 addr += size;
1625 break;
1627 default: break; /* nothing to be done for x86_64_mode_none */
1629 sym_push(sym->v & ~SYM_FIELD, type,
1630 VT_LOCAL | VT_LVAL, param_addr);
1633 #ifdef CONFIG_TCC_BCHECK
1634 if (tcc_state->do_bounds_check)
1635 gen_bounds_prolog();
1636 #endif
1639 /* generate function epilog */
1640 void gfunc_epilog(void)
1642 int v, saved_ind;
1644 #ifdef CONFIG_TCC_BCHECK
1645 if (tcc_state->do_bounds_check)
1646 gen_bounds_epilog();
1647 #endif
1648 o(0xc9); /* leave */
1649 if (func_ret_sub == 0) {
1650 o(0xc3); /* ret */
1651 } else {
1652 o(0xc2); /* ret n */
1653 g(func_ret_sub);
1654 g(func_ret_sub >> 8);
1656 /* align local size to word & save local variables */
1657 v = (-loc + 15) & -16;
1658 saved_ind = ind;
1659 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1660 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1661 o(0xec8148); /* sub rsp, stacksize */
1662 gen_le32(v);
1663 ind = saved_ind;
1666 #endif /* not PE */
1668 ST_FUNC void gen_fill_nops(int bytes)
1670 while (bytes--)
1671 g(0x90);
1674 /* generate a jump to a label */
1675 int gjmp(int t)
1677 return gjmp2(0xe9, t);
1680 /* generate a jump to a fixed address */
1681 void gjmp_addr(int a)
1683 int r;
1684 r = a - ind - 2;
1685 if (r == (char)r) {
1686 g(0xeb);
1687 g(r);
1688 } else {
1689 oad(0xe9, a - ind - 5);
1693 ST_FUNC int gjmp_append(int n, int t)
1695 void *p;
1696 /* insert vtop->c jump list in t */
1697 if (n) {
1698 uint32_t n1 = n, n2;
1699 while ((n2 = read32le(p = cur_text_section->data + n1)))
1700 n1 = n2;
1701 write32le(p, t);
1702 t = n;
1704 return t;
1707 ST_FUNC int gjmp_cond(int op, int t)
1709 if (op & 0x100)
1711 /* This was a float compare. If the parity flag is set
1712 the result was unordered. For anything except != this
1713 means false and we don't jump (anding both conditions).
1714 For != this means true (oring both).
1715 Take care about inverting the test. We need to jump
1716 to our target if the result was unordered and test wasn't NE,
1717 otherwise if unordered we don't want to jump. */
1718 int v = vtop->cmp_r;
1719 op &= ~0x100;
1720 if (op ^ v ^ (v != TOK_NE))
1721 o(0x067a); /* jp +6 */
1722 else
1724 g(0x0f);
1725 t = gjmp2(0x8a, t); /* jp t */
1728 g(0x0f);
1729 t = gjmp2(op - 16, t);
1730 return t;
1733 /* generate an integer binary operation */
1734 void gen_opi(int op)
1736 int r, fr, opc, c;
1737 int ll, uu, cc;
1739 ll = is64_type(vtop[-1].type.t);
1740 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1741 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1743 switch(op) {
1744 case '+':
1745 case TOK_ADDC1: /* add with carry generation */
1746 opc = 0;
1747 gen_op8:
1748 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1749 /* constant case */
1750 vswap();
1751 r = gv(RC_INT);
1752 vswap();
1753 c = vtop->c.i;
1754 if (c == (char)c) {
1755 /* XXX: generate inc and dec for smaller code ? */
1756 orex(ll, r, 0, 0x83);
1757 o(0xc0 | (opc << 3) | REG_VALUE(r));
1758 g(c);
1759 } else {
1760 orex(ll, r, 0, 0x81);
1761 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1763 } else {
1764 gv2(RC_INT, RC_INT);
1765 r = vtop[-1].r;
1766 fr = vtop[0].r;
1767 orex(ll, r, fr, (opc << 3) | 0x01);
1768 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1770 vtop--;
1771 if (op >= TOK_ULT && op <= TOK_GT)
1772 vset_VT_CMP(op);
1773 break;
1774 case '-':
1775 case TOK_SUBC1: /* sub with carry generation */
1776 opc = 5;
1777 goto gen_op8;
1778 case TOK_ADDC2: /* add with carry use */
1779 opc = 2;
1780 goto gen_op8;
1781 case TOK_SUBC2: /* sub with carry use */
1782 opc = 3;
1783 goto gen_op8;
1784 case '&':
1785 opc = 4;
1786 goto gen_op8;
1787 case '^':
1788 opc = 6;
1789 goto gen_op8;
1790 case '|':
1791 opc = 1;
1792 goto gen_op8;
1793 case '*':
1794 gv2(RC_INT, RC_INT);
1795 r = vtop[-1].r;
1796 fr = vtop[0].r;
1797 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1798 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1799 vtop--;
1800 break;
1801 case TOK_SHL:
1802 opc = 4;
1803 goto gen_shift;
1804 case TOK_SHR:
1805 opc = 5;
1806 goto gen_shift;
1807 case TOK_SAR:
1808 opc = 7;
1809 gen_shift:
1810 opc = 0xc0 | (opc << 3);
1811 if (cc) {
1812 /* constant case */
1813 vswap();
1814 r = gv(RC_INT);
1815 vswap();
1816 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1817 o(opc | REG_VALUE(r));
1818 g(vtop->c.i & (ll ? 63 : 31));
1819 } else {
1820 /* we generate the shift in ecx */
1821 gv2(RC_INT, RC_RCX);
1822 r = vtop[-1].r;
1823 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1824 o(opc | REG_VALUE(r));
1826 vtop--;
1827 break;
1828 case TOK_UDIV:
1829 case TOK_UMOD:
1830 uu = 1;
1831 goto divmod;
1832 case '/':
1833 case '%':
1834 case TOK_PDIV:
1835 uu = 0;
1836 divmod:
1837 /* first operand must be in eax */
1838 /* XXX: need better constraint for second operand */
1839 gv2(RC_RAX, RC_RCX);
1840 r = vtop[-1].r;
1841 fr = vtop[0].r;
1842 vtop--;
1843 save_reg(TREG_RDX);
1844 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1845 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1846 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1847 if (op == '%' || op == TOK_UMOD)
1848 r = TREG_RDX;
1849 else
1850 r = TREG_RAX;
1851 vtop->r = r;
1852 break;
1853 default:
1854 opc = 7;
1855 goto gen_op8;
1859 void gen_opl(int op)
1861 gen_opi(op);
1864 /* generate a floating point operation 'v = t1 op t2' instruction. The
1865 two operands are guaranteed to have the same floating point type */
1866 /* XXX: need to use ST1 too */
1867 void gen_opf(int op)
1869 int a, ft, fc, swapped, r;
1870 int float_type =
1871 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1873 /* convert constants to memory references */
1874 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1875 vswap();
1876 gv(float_type);
1877 vswap();
1879 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1880 gv(float_type);
1882 /* must put at least one value in the floating point register */
1883 if ((vtop[-1].r & VT_LVAL) &&
1884 (vtop[0].r & VT_LVAL)) {
1885 vswap();
1886 gv(float_type);
1887 vswap();
1889 swapped = 0;
1890 /* swap the stack if needed so that t1 is the register and t2 is
1891 the memory reference */
1892 if (vtop[-1].r & VT_LVAL) {
1893 vswap();
1894 swapped = 1;
1896 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1897 if (op >= TOK_ULT && op <= TOK_GT) {
1898 /* load on stack second operand */
1899 load(TREG_ST0, vtop);
1900 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1901 if (op == TOK_GE || op == TOK_GT)
1902 swapped = !swapped;
1903 else if (op == TOK_EQ || op == TOK_NE)
1904 swapped = 0;
1905 if (swapped)
1906 o(0xc9d9); /* fxch %st(1) */
1907 if (op == TOK_EQ || op == TOK_NE)
1908 o(0xe9da); /* fucompp */
1909 else
1910 o(0xd9de); /* fcompp */
1911 o(0xe0df); /* fnstsw %ax */
1912 if (op == TOK_EQ) {
1913 o(0x45e480); /* and $0x45, %ah */
1914 o(0x40fC80); /* cmp $0x40, %ah */
1915 } else if (op == TOK_NE) {
1916 o(0x45e480); /* and $0x45, %ah */
1917 o(0x40f480); /* xor $0x40, %ah */
1918 op = TOK_NE;
1919 } else if (op == TOK_GE || op == TOK_LE) {
1920 o(0x05c4f6); /* test $0x05, %ah */
1921 op = TOK_EQ;
1922 } else {
1923 o(0x45c4f6); /* test $0x45, %ah */
1924 op = TOK_EQ;
1926 vtop--;
1927 vset_VT_CMP(op);
1928 } else {
1929 /* no memory reference possible for long double operations */
1930 load(TREG_ST0, vtop);
1931 swapped = !swapped;
1933 switch(op) {
1934 default:
1935 case '+':
1936 a = 0;
1937 break;
1938 case '-':
1939 a = 4;
1940 if (swapped)
1941 a++;
1942 break;
1943 case '*':
1944 a = 1;
1945 break;
1946 case '/':
1947 a = 6;
1948 if (swapped)
1949 a++;
1950 break;
1952 ft = vtop->type.t;
1953 fc = vtop->c.i;
1954 o(0xde); /* fxxxp %st, %st(1) */
1955 o(0xc1 + (a << 3));
1956 vtop--;
1958 } else {
1959 if (op >= TOK_ULT && op <= TOK_GT) {
1960 /* if saved lvalue, then we must reload it */
1961 r = vtop->r;
1962 fc = vtop->c.i;
1963 if ((r & VT_VALMASK) == VT_LLOCAL) {
1964 SValue v1;
1965 r = get_reg(RC_INT);
1966 v1.type.t = VT_PTR;
1967 v1.r = VT_LOCAL | VT_LVAL;
1968 v1.c.i = fc;
1969 load(r, &v1);
1970 fc = 0;
1971 vtop->r = r = r | VT_LVAL;
1974 if (op == TOK_EQ || op == TOK_NE) {
1975 swapped = 0;
1976 } else {
1977 if (op == TOK_LE || op == TOK_LT)
1978 swapped = !swapped;
1979 if (op == TOK_LE || op == TOK_GE) {
1980 op = 0x93; /* setae */
1981 } else {
1982 op = 0x97; /* seta */
1986 if (swapped) {
1987 gv(RC_FLOAT);
1988 vswap();
1990 assert(!(vtop[-1].r & VT_LVAL));
1992 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1993 o(0x66);
1994 if (op == TOK_EQ || op == TOK_NE)
1995 o(0x2e0f); /* ucomisd */
1996 else
1997 o(0x2f0f); /* comisd */
1999 if (vtop->r & VT_LVAL) {
2000 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2001 } else {
2002 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2005 vtop--;
2006 vset_VT_CMP(op | 0x100);
2007 vtop->cmp_r = op;
2008 } else {
2009 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
2010 switch(op) {
2011 default:
2012 case '+':
2013 a = 0;
2014 break;
2015 case '-':
2016 a = 4;
2017 break;
2018 case '*':
2019 a = 1;
2020 break;
2021 case '/':
2022 a = 6;
2023 break;
2025 ft = vtop->type.t;
2026 fc = vtop->c.i;
2027 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2029 r = vtop->r;
2030 /* if saved lvalue, then we must reload it */
2031 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2032 SValue v1;
2033 r = get_reg(RC_INT);
2034 v1.type.t = VT_PTR;
2035 v1.r = VT_LOCAL | VT_LVAL;
2036 v1.c.i = fc;
2037 load(r, &v1);
2038 fc = 0;
2039 vtop->r = r = r | VT_LVAL;
2042 assert(!(vtop[-1].r & VT_LVAL));
2043 if (swapped) {
2044 assert(vtop->r & VT_LVAL);
2045 gv(RC_FLOAT);
2046 vswap();
2049 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2050 o(0xf2);
2051 } else {
2052 o(0xf3);
2054 o(0x0f);
2055 o(0x58 + a);
2057 if (vtop->r & VT_LVAL) {
2058 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2059 } else {
2060 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2063 vtop--;
2068 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2069 and 'long long' cases. */
2070 void gen_cvt_itof(int t)
2072 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2073 save_reg(TREG_ST0);
2074 gv(RC_INT);
2075 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2076 /* signed long long to float/double/long double (unsigned case
2077 is handled generically) */
2078 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2079 o(0x242cdf); /* fildll (%rsp) */
2080 o(0x08c48348); /* add $8, %rsp */
2081 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2082 (VT_INT | VT_UNSIGNED)) {
2083 /* unsigned int to float/double/long double */
2084 o(0x6a); /* push $0 */
2085 g(0x00);
2086 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2087 o(0x242cdf); /* fildll (%rsp) */
2088 o(0x10c48348); /* add $16, %rsp */
2089 } else {
2090 /* int to float/double/long double */
2091 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2092 o(0x2404db); /* fildl (%rsp) */
2093 o(0x08c48348); /* add $8, %rsp */
2095 vtop->r = TREG_ST0;
2096 } else {
2097 int r = get_reg(RC_FLOAT);
2098 gv(RC_INT);
2099 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2100 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2101 (VT_INT | VT_UNSIGNED) ||
2102 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2103 o(0x48); /* REX */
2105 o(0x2a0f);
2106 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2107 vtop->r = r;
2111 /* convert from one floating point type to another */
2112 void gen_cvt_ftof(int t)
2114 int ft, bt, tbt;
2116 ft = vtop->type.t;
2117 bt = ft & VT_BTYPE;
2118 tbt = t & VT_BTYPE;
2120 if (bt == VT_FLOAT) {
2121 gv(RC_FLOAT);
2122 if (tbt == VT_DOUBLE) {
2123 o(0x140f); /* unpcklps */
2124 o(0xc0 + REG_VALUE(vtop->r)*9);
2125 o(0x5a0f); /* cvtps2pd */
2126 o(0xc0 + REG_VALUE(vtop->r)*9);
2127 } else if (tbt == VT_LDOUBLE) {
2128 save_reg(RC_ST0);
2129 /* movss %xmm0,-0x10(%rsp) */
2130 o(0x110ff3);
2131 o(0x44 + REG_VALUE(vtop->r)*8);
2132 o(0xf024);
2133 o(0xf02444d9); /* flds -0x10(%rsp) */
2134 vtop->r = TREG_ST0;
2136 } else if (bt == VT_DOUBLE) {
2137 gv(RC_FLOAT);
2138 if (tbt == VT_FLOAT) {
2139 o(0x140f66); /* unpcklpd */
2140 o(0xc0 + REG_VALUE(vtop->r)*9);
2141 o(0x5a0f66); /* cvtpd2ps */
2142 o(0xc0 + REG_VALUE(vtop->r)*9);
2143 } else if (tbt == VT_LDOUBLE) {
2144 save_reg(RC_ST0);
2145 /* movsd %xmm0,-0x10(%rsp) */
2146 o(0x110ff2);
2147 o(0x44 + REG_VALUE(vtop->r)*8);
2148 o(0xf024);
2149 o(0xf02444dd); /* fldl -0x10(%rsp) */
2150 vtop->r = TREG_ST0;
2152 } else {
2153 int r;
2154 gv(RC_ST0);
2155 r = get_reg(RC_FLOAT);
2156 if (tbt == VT_DOUBLE) {
2157 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2158 /* movsd -0x10(%rsp),%xmm0 */
2159 o(0x100ff2);
2160 o(0x44 + REG_VALUE(r)*8);
2161 o(0xf024);
2162 vtop->r = r;
2163 } else if (tbt == VT_FLOAT) {
2164 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2165 /* movss -0x10(%rsp),%xmm0 */
2166 o(0x100ff3);
2167 o(0x44 + REG_VALUE(r)*8);
2168 o(0xf024);
2169 vtop->r = r;
2174 /* convert fp to int 't' type */
2175 void gen_cvt_ftoi(int t)
2177 int ft, bt, size, r;
2178 ft = vtop->type.t;
2179 bt = ft & VT_BTYPE;
2180 if (bt == VT_LDOUBLE) {
2181 gen_cvt_ftof(VT_DOUBLE);
2182 bt = VT_DOUBLE;
2185 gv(RC_FLOAT);
2186 if (t != VT_INT)
2187 size = 8;
2188 else
2189 size = 4;
2191 r = get_reg(RC_INT);
2192 if (bt == VT_FLOAT) {
2193 o(0xf3);
2194 } else if (bt == VT_DOUBLE) {
2195 o(0xf2);
2196 } else {
2197 assert(0);
2199 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2200 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2201 vtop->r = r;
2204 // Generate sign extension from 32 to 64 bits:
2205 ST_FUNC void gen_cvt_sxtw(void)
2207 int r = gv(RC_INT);
2208 /* x86_64 specific: movslq */
2209 o(0x6348);
2210 o(0xc0 + (REG_VALUE(r) << 3) + REG_VALUE(r));
2213 /* char/short to int conversion */
2214 ST_FUNC void gen_cvt_csti(int t)
2216 int r, sz, xl, ll;
2217 r = gv(RC_INT);
2218 sz = !(t & VT_UNSIGNED);
2219 xl = (t & VT_BTYPE) == VT_SHORT;
2220 ll = (vtop->type.t & VT_BTYPE) == VT_LLONG;
2221 orex(ll, r, 0, 0xc0b60f /* mov[sz] %a[xl], %eax */
2222 | (sz << 3 | xl) << 8
2223 | (REG_VALUE(r) << 3 | REG_VALUE(r)) << 16
2227 /* computed goto support */
2228 void ggoto(void)
2230 gcall_or_jmp(1);
2231 vtop--;
2234 /* Save the stack pointer onto the stack and return the location of its address */
2235 ST_FUNC void gen_vla_sp_save(int addr) {
2236 /* mov %rsp,addr(%rbp)*/
2237 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2240 /* Restore the SP from a location on the stack */
2241 ST_FUNC void gen_vla_sp_restore(int addr) {
2242 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2245 #ifdef TCC_TARGET_PE
2246 /* Save result of gen_vla_alloc onto the stack */
2247 ST_FUNC void gen_vla_result(int addr) {
2248 /* mov %rax,addr(%rbp)*/
2249 gen_modrm64(0x89, TREG_RAX, VT_LOCAL, NULL, addr);
2251 #endif
2253 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2254 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2255 int use_call = 0;
2257 #if defined(CONFIG_TCC_BCHECK)
2258 use_call = tcc_state->do_bounds_check;
2259 #endif
2260 #ifdef TCC_TARGET_PE /* alloca does more than just adjust %rsp on Windows */
2261 use_call = 1;
2262 #endif
2263 if (use_call)
2265 vpush_global_sym(&func_old_type, TOK_alloca);
2266 vswap(); /* Move alloca ref past allocation size */
2267 gfunc_call(1);
2269 else {
2270 int r;
2271 r = gv(RC_INT); /* allocation size */
2272 /* sub r,%rsp */
2273 o(0x2b48);
2274 o(0xe0 | REG_VALUE(r));
2275 /* We align to 16 bytes rather than align */
2276 /* and ~15, %rsp */
2277 o(0xf0e48348);
2278 vpop();
2283 /* end of x86-64 code generator */
2284 /*************************************************************/
2285 #endif /* ! TARGET_DEFS_ONLY */
2286 /******************************************************/