Support --version cmdline arg
[tinycc.git] / x86_64-gen.c
blob1337ddd6f6c49912964e7b84d21641862db039ba
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
35 #define RC_RAX 0x0004
36 #define RC_RCX 0x0008
37 #define RC_RDX 0x0010
38 #define RC_ST0 0x0080 /* only for long double */
39 #define RC_R8 0x0100
40 #define RC_R9 0x0200
41 #define RC_R10 0x0400
42 #define RC_R11 0x0800
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_IRE2 RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_FRE2 RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
57 enum {
58 TREG_RAX = 0,
59 TREG_RCX = 1,
60 TREG_RDX = 2,
61 TREG_RSP = 4,
62 TREG_RSI = 6,
63 TREG_RDI = 7,
65 TREG_R8 = 8,
66 TREG_R9 = 9,
67 TREG_R10 = 10,
68 TREG_R11 = 11,
70 TREG_XMM0 = 16,
71 TREG_XMM1 = 17,
72 TREG_XMM2 = 18,
73 TREG_XMM3 = 19,
74 TREG_XMM4 = 20,
75 TREG_XMM5 = 21,
76 TREG_XMM6 = 22,
77 TREG_XMM7 = 23,
79 TREG_ST0 = 24,
81 TREG_MEM = 0x20
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_IRE2 TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_FRE2 TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
97 #define PTR_SIZE 8
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
103 #define MAX_ALIGN 16
105 /* define if return values need to be extended explicitely
106 at caller side (for interfacing with non-TCC compilers) */
107 #define PROMOTE_RET
108 /******************************************************/
109 #else /* ! TARGET_DEFS_ONLY */
110 /******************************************************/
111 #define USING_GLOBALS
112 #include "tcc.h"
113 #include <assert.h>
115 ST_DATA const int reg_classes[NB_REGS] = {
116 /* eax */ RC_INT | RC_RAX,
117 /* ecx */ RC_INT | RC_RCX,
118 /* edx */ RC_INT | RC_RDX,
124 RC_R8,
125 RC_R9,
126 RC_R10,
127 RC_R11,
132 /* xmm0 */ RC_FLOAT | RC_XMM0,
133 /* xmm1 */ RC_FLOAT | RC_XMM1,
134 /* xmm2 */ RC_FLOAT | RC_XMM2,
135 /* xmm3 */ RC_FLOAT | RC_XMM3,
136 /* xmm4 */ RC_FLOAT | RC_XMM4,
137 /* xmm5 */ RC_FLOAT | RC_XMM5,
138 /* xmm6 an xmm7 are included so gv() can be used on them,
139 but they are not tagged with RC_FLOAT because they are
140 callee saved on Windows */
141 RC_XMM6,
142 RC_XMM7,
143 /* st0 */ RC_ST0
146 static unsigned long func_sub_sp_offset;
147 static int func_ret_sub;
149 /* XXX: make it faster ? */
150 ST_FUNC void g(int c)
152 int ind1;
153 if (nocode_wanted)
154 return;
155 ind1 = ind + 1;
156 if (ind1 > cur_text_section->data_allocated)
157 section_realloc(cur_text_section, ind1);
158 cur_text_section->data[ind] = c;
159 ind = ind1;
162 ST_FUNC void o(unsigned int c)
164 while (c) {
165 g(c);
166 c = c >> 8;
170 ST_FUNC void gen_le16(int v)
172 g(v);
173 g(v >> 8);
176 ST_FUNC void gen_le32(int c)
178 g(c);
179 g(c >> 8);
180 g(c >> 16);
181 g(c >> 24);
184 ST_FUNC void gen_le64(int64_t c)
186 g(c);
187 g(c >> 8);
188 g(c >> 16);
189 g(c >> 24);
190 g(c >> 32);
191 g(c >> 40);
192 g(c >> 48);
193 g(c >> 56);
196 static void orex(int ll, int r, int r2, int b)
198 if ((r & VT_VALMASK) >= VT_CONST)
199 r = 0;
200 if ((r2 & VT_VALMASK) >= VT_CONST)
201 r2 = 0;
202 if (ll || REX_BASE(r) || REX_BASE(r2))
203 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
204 o(b);
207 /* output a symbol and patch all calls to it */
208 ST_FUNC void gsym_addr(int t, int a)
210 while (t) {
211 unsigned char *ptr = cur_text_section->data + t;
212 uint32_t n = read32le(ptr); /* next value */
213 write32le(ptr, a < 0 ? -a : a - t - 4);
214 t = n;
218 static int is64_type(int t)
220 return ((t & VT_BTYPE) == VT_PTR ||
221 (t & VT_BTYPE) == VT_FUNC ||
222 (t & VT_BTYPE) == VT_LLONG);
225 /* instruction + 4 bytes data. Return the address of the data */
226 static int oad(int c, int s)
228 int t;
229 if (nocode_wanted)
230 return s;
231 o(c);
232 t = ind;
233 gen_le32(s);
234 return t;
237 /* generate jmp to a label */
238 #define gjmp2(instr,lbl) oad(instr,lbl)
240 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
242 if (r & VT_SYM)
243 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
244 gen_le32(c);
247 /* output constant with relocation if 'r & VT_SYM' is true */
248 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
250 if (r & VT_SYM)
251 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
252 gen_le64(c);
255 /* output constant with relocation if 'r & VT_SYM' is true */
256 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
258 if (r & VT_SYM)
259 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
260 gen_le32(c-4);
263 /* output got address with relocation */
264 static void gen_gotpcrel(int r, Sym *sym, int c)
266 #ifdef TCC_TARGET_PE
267 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
268 get_tok_str(sym->v, NULL), c, r,
269 cur_text_section->data[ind-3],
270 cur_text_section->data[ind-2],
271 cur_text_section->data[ind-1]
273 #endif
274 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
275 gen_le32(0);
276 if (c) {
277 /* we use add c, %xxx for displacement */
278 orex(1, r, 0, 0x81);
279 o(0xc0 + REG_VALUE(r));
280 gen_le32(c);
284 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
286 op_reg = REG_VALUE(op_reg) << 3;
287 if ((r & VT_VALMASK) == VT_CONST) {
288 /* constant memory reference */
289 if (!(r & VT_SYM)) {
290 /* Absolute memory reference */
291 o(0x04 | op_reg); /* [sib] | destreg */
292 oad(0x25, c); /* disp32 */
293 } else {
294 o(0x05 | op_reg); /* (%rip)+disp32 | destreg */
295 if (is_got) {
296 gen_gotpcrel(r, sym, c);
297 } else {
298 gen_addrpc32(r, sym, c);
301 } else if ((r & VT_VALMASK) == VT_LOCAL) {
302 /* currently, we use only ebp as base */
303 if (c == (char)c) {
304 /* short reference */
305 o(0x45 | op_reg);
306 g(c);
307 } else {
308 oad(0x85 | op_reg, c);
310 } else if ((r & VT_VALMASK) >= TREG_MEM) {
311 if (c) {
312 g(0x80 | op_reg | REG_VALUE(r));
313 gen_le32(c);
314 } else {
315 g(0x00 | op_reg | REG_VALUE(r));
317 } else {
318 g(0x00 | op_reg | REG_VALUE(r));
322 /* generate a modrm reference. 'op_reg' contains the additional 3
323 opcode bits */
324 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
326 gen_modrm_impl(op_reg, r, sym, c, 0);
329 /* generate a modrm reference. 'op_reg' contains the additional 3
330 opcode bits */
331 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
333 int is_got;
334 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
335 orex(1, r, op_reg, opcode);
336 gen_modrm_impl(op_reg, r, sym, c, is_got);
340 /* load 'r' from value 'sv' */
341 void load(int r, SValue *sv)
343 int v, t, ft, fc, fr;
344 SValue v1;
346 #ifdef TCC_TARGET_PE
347 SValue v2;
348 sv = pe_getimport(sv, &v2);
349 #endif
351 fr = sv->r;
352 ft = sv->type.t & ~VT_DEFSIGN;
353 fc = sv->c.i;
354 if (fc != sv->c.i && (fr & VT_SYM))
355 tcc_error("64 bit addend in load");
357 ft &= ~(VT_VOLATILE | VT_CONSTANT);
359 #ifndef TCC_TARGET_PE
360 /* we use indirect access via got */
361 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
362 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
363 /* use the result register as a temporal register */
364 int tr = r | TREG_MEM;
365 if (is_float(ft)) {
366 /* we cannot use float registers as a temporal register */
367 tr = get_reg(RC_INT) | TREG_MEM;
369 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
371 /* load from the temporal register */
372 fr = tr | VT_LVAL;
374 #endif
376 v = fr & VT_VALMASK;
377 if (fr & VT_LVAL) {
378 int b, ll;
379 if (v == VT_LLOCAL) {
380 v1.type.t = VT_PTR;
381 v1.r = VT_LOCAL | VT_LVAL;
382 v1.c.i = fc;
383 fr = r;
384 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
385 fr = get_reg(RC_INT);
386 load(fr, &v1);
388 if (fc != sv->c.i) {
389 /* If the addends doesn't fit into a 32bit signed
390 we must use a 64bit move. We've checked above
391 that this doesn't have a sym associated. */
392 v1.type.t = VT_LLONG;
393 v1.r = VT_CONST;
394 v1.c.i = sv->c.i;
395 fr = r;
396 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
397 fr = get_reg(RC_INT);
398 load(fr, &v1);
399 fc = 0;
401 ll = 0;
402 /* Like GCC we can load from small enough properly sized
403 structs and unions as well.
404 XXX maybe move to generic operand handling, but should
405 occur only with asm, so tccasm.c might also be a better place */
406 if ((ft & VT_BTYPE) == VT_STRUCT) {
407 int align;
408 switch (type_size(&sv->type, &align)) {
409 case 1: ft = VT_BYTE; break;
410 case 2: ft = VT_SHORT; break;
411 case 4: ft = VT_INT; break;
412 case 8: ft = VT_LLONG; break;
413 default:
414 tcc_error("invalid aggregate type for register load");
415 break;
418 if ((ft & VT_BTYPE) == VT_FLOAT) {
419 b = 0x6e0f66;
420 r = REG_VALUE(r); /* movd */
421 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
422 b = 0x7e0ff3; /* movq */
423 r = REG_VALUE(r);
424 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
425 b = 0xdb, r = 5; /* fldt */
426 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
427 b = 0xbe0f; /* movsbl */
428 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
429 b = 0xb60f; /* movzbl */
430 } else if ((ft & VT_TYPE) == VT_SHORT) {
431 b = 0xbf0f; /* movswl */
432 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
433 b = 0xb70f; /* movzwl */
434 } else {
435 assert(((ft & VT_BTYPE) == VT_INT)
436 || ((ft & VT_BTYPE) == VT_LLONG)
437 || ((ft & VT_BTYPE) == VT_PTR)
438 || ((ft & VT_BTYPE) == VT_FUNC)
440 ll = is64_type(ft);
441 b = 0x8b;
443 if (ll) {
444 gen_modrm64(b, r, fr, sv->sym, fc);
445 } else {
446 orex(ll, fr, r, b);
447 gen_modrm(r, fr, sv->sym, fc);
449 } else {
450 if (v == VT_CONST) {
451 if (fr & VT_SYM) {
452 #ifdef TCC_TARGET_PE
453 orex(1,0,r,0x8d);
454 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
455 gen_addrpc32(fr, sv->sym, fc);
456 #else
457 if (sv->sym->type.t & VT_STATIC) {
458 orex(1,0,r,0x8d);
459 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
460 gen_addrpc32(fr, sv->sym, fc);
461 } else {
462 orex(1,0,r,0x8b);
463 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
464 gen_gotpcrel(r, sv->sym, fc);
466 #endif
467 } else if (is64_type(ft)) {
468 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
469 gen_le64(sv->c.i);
470 } else {
471 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
472 gen_le32(fc);
474 } else if (v == VT_LOCAL) {
475 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
476 gen_modrm(r, VT_LOCAL, sv->sym, fc);
477 } else if (v == VT_CMP) {
478 if (fc & 0x100)
480 v = vtop->cmp_r;
481 fc &= ~0x100;
482 /* This was a float compare. If the parity bit is
483 set the result was unordered, meaning false for everything
484 except TOK_NE, and true for TOK_NE. */
485 orex(0, r, 0, 0xb0 + REG_VALUE(r)); /* mov $0/1,%al */
486 g(v ^ fc ^ (v == TOK_NE));
487 o(0x037a + (REX_BASE(r) << 8));
489 orex(0,r,0, 0x0f); /* setxx %br */
490 o(fc);
491 o(0xc0 + REG_VALUE(r));
492 orex(0,r,0, 0x0f);
493 o(0xc0b6 + REG_VALUE(r) * 0x900); /* movzbl %al, %eax */
494 } else if (v == VT_JMP || v == VT_JMPI) {
495 t = v & 1;
496 orex(0,r,0,0);
497 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
498 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
499 gsym(fc);
500 orex(0,r,0,0);
501 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
502 } else if (v != r) {
503 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
504 if (v == TREG_ST0) {
505 /* gen_cvt_ftof(VT_DOUBLE); */
506 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
507 /* movsd -0x10(%rsp),%xmmN */
508 o(0x100ff2);
509 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
510 o(0xf024);
511 } else {
512 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
513 if ((ft & VT_BTYPE) == VT_FLOAT) {
514 o(0x100ff3);
515 } else {
516 assert((ft & VT_BTYPE) == VT_DOUBLE);
517 o(0x100ff2);
519 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
521 } else if (r == TREG_ST0) {
522 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
523 /* gen_cvt_ftof(VT_LDOUBLE); */
524 /* movsd %xmmN,-0x10(%rsp) */
525 o(0x110ff2);
526 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
527 o(0xf024);
528 o(0xf02444dd); /* fldl -0x10(%rsp) */
529 } else {
530 orex(is64_type(ft), r, v, 0x89);
531 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
537 /* store register 'r' in lvalue 'v' */
538 void store(int r, SValue *v)
540 int fr, bt, ft, fc;
541 int op64 = 0;
542 /* store the REX prefix in this variable when PIC is enabled */
543 int pic = 0;
545 #ifdef TCC_TARGET_PE
546 SValue v2;
547 v = pe_getimport(v, &v2);
548 #endif
550 fr = v->r & VT_VALMASK;
551 ft = v->type.t;
552 fc = v->c.i;
553 if (fc != v->c.i && (fr & VT_SYM))
554 tcc_error("64 bit addend in store");
555 ft &= ~(VT_VOLATILE | VT_CONSTANT);
556 bt = ft & VT_BTYPE;
558 #ifndef TCC_TARGET_PE
559 /* we need to access the variable via got */
560 if (fr == VT_CONST && (v->r & VT_SYM)) {
561 /* mov xx(%rip), %r11 */
562 o(0x1d8b4c);
563 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
564 pic = is64_type(bt) ? 0x49 : 0x41;
566 #endif
568 /* XXX: incorrect if float reg to reg */
569 if (bt == VT_FLOAT) {
570 o(0x66);
571 o(pic);
572 o(0x7e0f); /* movd */
573 r = REG_VALUE(r);
574 } else if (bt == VT_DOUBLE) {
575 o(0x66);
576 o(pic);
577 o(0xd60f); /* movq */
578 r = REG_VALUE(r);
579 } else if (bt == VT_LDOUBLE) {
580 o(0xc0d9); /* fld %st(0) */
581 o(pic);
582 o(0xdb); /* fstpt */
583 r = 7;
584 } else {
585 if (bt == VT_SHORT)
586 o(0x66);
587 o(pic);
588 if (bt == VT_BYTE || bt == VT_BOOL)
589 orex(0, 0, r, 0x88);
590 else if (is64_type(bt))
591 op64 = 0x89;
592 else
593 orex(0, 0, r, 0x89);
595 if (pic) {
596 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
597 if (op64)
598 o(op64);
599 o(3 + (r << 3));
600 } else if (op64) {
601 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
602 gen_modrm64(op64, r, v->r, v->sym, fc);
603 } else if (fr != r) {
604 orex(1, fr, r, op64);
605 o(0xc0 + fr + r * 8); /* mov r, fr */
607 } else {
608 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
609 gen_modrm(r, v->r, v->sym, fc);
610 } else if (fr != r) {
611 o(0xc0 + fr + r * 8); /* mov r, fr */
616 /* 'is_jmp' is '1' if it is a jump */
617 static void gcall_or_jmp(int is_jmp)
619 int r;
620 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
621 ((vtop->r & VT_SYM) && (vtop->c.i-4) == (int)(vtop->c.i-4))) {
622 /* constant symbolic case -> simple relocation */
623 #ifdef TCC_TARGET_PE
624 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
625 #else
626 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
627 #endif
628 oad(0xe8 + is_jmp, 0); /* call/jmp im */
629 } else {
630 /* otherwise, indirect call */
631 r = TREG_R11;
632 load(r, vtop);
633 o(0x41); /* REX */
634 o(0xff); /* call/jmp *r */
635 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
639 #if defined(CONFIG_TCC_BCHECK)
640 static addr_t func_bound_offset;
641 static unsigned long func_bound_ind;
643 static void gen_bounds_call(int v)
645 Sym *sym = external_global_sym(v, &func_old_type);
646 oad(0xe8, 0);
647 #ifdef TCC_TARGET_PE
648 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
649 #else
650 greloca(cur_text_section, sym, ind-4, R_X86_64_PLT32, -4);
651 #endif
654 /* generate a bounded pointer addition */
655 ST_FUNC void gen_bounded_ptr_add(void)
657 vpush_global_sym(&func_old_type, TOK___bound_ptr_add);
658 vrott(3);
659 gfunc_call(2);
660 vpushi(0);
661 /* returned pointer is in rax */
662 vtop->r = TREG_RAX | VT_BOUNDED;
663 if (nocode_wanted)
664 return;
665 /* relocation offset of the bounding function call point */
666 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
669 /* patch pointer addition in vtop so that pointer dereferencing is
670 also tested */
671 ST_FUNC void gen_bounded_ptr_deref(void)
673 addr_t func;
674 int size, align;
675 ElfW(Rela) *rel;
676 Sym *sym;
678 if (nocode_wanted)
679 return;
681 size = type_size(&vtop->type, &align);
682 switch(size) {
683 case 1: func = TOK___bound_ptr_indir1; break;
684 case 2: func = TOK___bound_ptr_indir2; break;
685 case 4: func = TOK___bound_ptr_indir4; break;
686 case 8: func = TOK___bound_ptr_indir8; break;
687 case 12: func = TOK___bound_ptr_indir12; break;
688 case 16: func = TOK___bound_ptr_indir16; break;
689 default:
690 /* may happen with struct member access */
691 return;
692 //tcc_error("unhandled size when dereferencing bounded pointer");
693 //func = 0;
694 //break;
696 sym = external_global_sym(func, &func_old_type);
697 if (!sym->c)
698 put_extern_sym(sym, NULL, 0, 0);
699 /* patch relocation */
700 /* XXX: find a better solution ? */
701 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
702 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
705 #ifdef TCC_TARGET_PE
706 # define TREG_FASTCALL_1 TREG_RCX
707 #else
708 # define TREG_FASTCALL_1 TREG_RDI
709 #endif
711 static void gen_bounds_prolog(void)
713 /* leave some room for bound checking code */
714 func_bound_offset = lbounds_section->data_offset;
715 func_bound_ind = ind;
716 o(0xb848 + TREG_FASTCALL_1 * 0x100); /*lbound section pointer */
717 gen_le64 (0);
718 oad(0xb8, 0); /* call to function */
721 static void gen_bounds_epilog(void)
723 addr_t saved_ind;
724 addr_t *bounds_ptr;
725 Sym *sym_data;
727 /* add end of table info */
728 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
729 *bounds_ptr = 0;
731 /* generate bound local allocation */
732 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
733 func_bound_offset, lbounds_section->data_offset);
734 saved_ind = ind;
735 ind = func_bound_ind;
736 greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
737 ind = ind + 10;
738 gen_bounds_call(TOK___bound_local_new);
739 ind = saved_ind;
741 /* generate bound check local freeing */
742 o(0x525051); /* save returned value, if any (+ scratch-space for windows) */
743 greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
744 o(0xb848 + TREG_FASTCALL_1 * 0x100); /* mov xxx, %rcx/di */
745 gen_le64 (0);
746 gen_bounds_call(TOK___bound_local_delete);
747 o(0x59585a); /* restore returned value, if any */
749 #endif
751 #ifdef TCC_TARGET_PE
753 static int func_scratch, func_alloca;
755 #define REGN 4
756 static const uint8_t arg_regs[REGN] = {
757 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
760 /* Prepare arguments in R10 and R11 rather than RCX and RDX
761 because gv() will not ever use these */
762 static int arg_prepare_reg(int idx) {
763 if (idx == 0 || idx == 1)
764 /* idx=0: r10, idx=1: r11 */
765 return idx + 10;
766 else
767 return arg_regs[idx];
770 /* Generate function call. The function address is pushed first, then
771 all the parameters in call order. This functions pops all the
772 parameters and the function address. */
774 static void gen_offs_sp(int b, int r, int d)
776 orex(1,0,r & 0x100 ? 0 : r, b);
777 if (d == (char)d) {
778 o(0x2444 | (REG_VALUE(r) << 3));
779 g(d);
780 } else {
781 o(0x2484 | (REG_VALUE(r) << 3));
782 gen_le32(d);
786 static int using_regs(int size)
788 return !(size > 8 || (size & (size - 1)));
791 /* Return the number of registers needed to return the struct, or 0 if
792 returning via struct pointer. */
793 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
795 int size, align;
796 *ret_align = 1; // Never have to re-align return values for x86-64
797 *regsize = 8;
798 size = type_size(vt, &align);
799 if (!using_regs(size))
800 return 0;
801 if (size == 8)
802 ret->t = VT_LLONG;
803 else if (size == 4)
804 ret->t = VT_INT;
805 else if (size == 2)
806 ret->t = VT_SHORT;
807 else
808 ret->t = VT_BYTE;
809 ret->ref = NULL;
810 return 1;
813 static int is_sse_float(int t) {
814 int bt;
815 bt = t & VT_BTYPE;
816 return bt == VT_DOUBLE || bt == VT_FLOAT;
819 static int gfunc_arg_size(CType *type) {
820 int align;
821 if (type->t & (VT_ARRAY|VT_BITFIELD))
822 return 8;
823 return type_size(type, &align);
826 void gfunc_call(int nb_args)
828 int size, r, args_size, i, d, bt, struct_size;
829 int arg;
831 #ifdef CONFIG_TCC_BCHECK
832 if (tcc_state->do_bounds_check)
833 gbound_args(nb_args);
834 #endif
836 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
837 arg = nb_args;
839 /* for struct arguments, we need to call memcpy and the function
840 call breaks register passing arguments we are preparing.
841 So, we process arguments which will be passed by stack first. */
842 struct_size = args_size;
843 for(i = 0; i < nb_args; i++) {
844 SValue *sv;
846 --arg;
847 sv = &vtop[-i];
848 bt = (sv->type.t & VT_BTYPE);
849 size = gfunc_arg_size(&sv->type);
851 if (using_regs(size))
852 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
854 if (bt == VT_STRUCT) {
855 /* align to stack align size */
856 size = (size + 15) & ~15;
857 /* generate structure store */
858 r = get_reg(RC_INT);
859 gen_offs_sp(0x8d, r, struct_size);
860 struct_size += size;
862 /* generate memcpy call */
863 vset(&sv->type, r | VT_LVAL, 0);
864 vpushv(sv);
865 vstore();
866 --vtop;
867 } else if (bt == VT_LDOUBLE) {
868 gv(RC_ST0);
869 gen_offs_sp(0xdb, 0x107, struct_size);
870 struct_size += 16;
874 if (func_scratch < struct_size)
875 func_scratch = struct_size;
877 arg = nb_args;
878 struct_size = args_size;
880 for(i = 0; i < nb_args; i++) {
881 --arg;
882 bt = (vtop->type.t & VT_BTYPE);
884 size = gfunc_arg_size(&vtop->type);
885 if (!using_regs(size)) {
886 /* align to stack align size */
887 size = (size + 15) & ~15;
888 if (arg >= REGN) {
889 d = get_reg(RC_INT);
890 gen_offs_sp(0x8d, d, struct_size);
891 gen_offs_sp(0x89, d, arg*8);
892 } else {
893 d = arg_prepare_reg(arg);
894 gen_offs_sp(0x8d, d, struct_size);
896 struct_size += size;
897 } else {
898 if (is_sse_float(vtop->type.t)) {
899 if (tcc_state->nosse)
900 tcc_error("SSE disabled");
901 if (arg >= REGN) {
902 gv(RC_XMM0);
903 /* movq %xmm0, j*8(%rsp) */
904 gen_offs_sp(0xd60f66, 0x100, arg*8);
905 } else {
906 /* Load directly to xmmN register */
907 gv(RC_XMM0 << arg);
908 d = arg_prepare_reg(arg);
909 /* mov %xmmN, %rxx */
910 o(0x66);
911 orex(1,d,0, 0x7e0f);
912 o(0xc0 + arg*8 + REG_VALUE(d));
914 } else {
915 if (bt == VT_STRUCT) {
916 vtop->type.ref = NULL;
917 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
918 : size > 1 ? VT_SHORT : VT_BYTE;
921 r = gv(RC_INT);
922 if (arg >= REGN) {
923 gen_offs_sp(0x89, r, arg*8);
924 } else {
925 d = arg_prepare_reg(arg);
926 orex(1,d,r,0x89); /* mov */
927 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
931 vtop--;
933 save_regs(0);
934 /* Copy R10 and R11 into RCX and RDX, respectively */
935 if (nb_args > 0) {
936 o(0xd1894c); /* mov %r10, %rcx */
937 if (nb_args > 1) {
938 o(0xda894c); /* mov %r11, %rdx */
942 gcall_or_jmp(0);
944 if ((vtop->r & VT_SYM) && vtop->sym->v == TOK_alloca) {
945 /* need to add the "func_scratch" area after alloca */
946 o(0x48); func_alloca = oad(0x05, func_alloca); /* add $NN, %rax */
947 #ifdef CONFIG_TCC_BCHECK
948 if (tcc_state->do_bounds_check)
949 gen_bounds_call(TOK___bound_alloca_nr); /* new region */
950 #endif
953 vtop--;
957 #define FUNC_PROLOG_SIZE 11
959 /* generate function prolog of type 't' */
960 void gfunc_prolog(Sym *func_sym)
962 CType *func_type = &func_sym->type;
963 int addr, reg_param_index, bt, size;
964 Sym *sym;
965 CType *type;
967 func_ret_sub = 0;
968 func_scratch = 32;
969 func_alloca = 0;
970 loc = 0;
972 addr = PTR_SIZE * 2;
973 ind += FUNC_PROLOG_SIZE;
974 func_sub_sp_offset = ind;
975 reg_param_index = 0;
977 sym = func_type->ref;
979 /* if the function returns a structure, then add an
980 implicit pointer parameter */
981 func_vt = sym->type;
982 func_var = (sym->f.func_type == FUNC_ELLIPSIS);
983 size = gfunc_arg_size(&func_vt);
984 if (!using_regs(size)) {
985 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
986 func_vc = addr;
987 reg_param_index++;
988 addr += 8;
991 /* define parameters */
992 while ((sym = sym->next) != NULL) {
993 type = &sym->type;
994 bt = type->t & VT_BTYPE;
995 size = gfunc_arg_size(type);
996 if (!using_regs(size)) {
997 if (reg_param_index < REGN) {
998 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1000 sym_push(sym->v & ~SYM_FIELD, type,
1001 VT_LLOCAL | VT_LVAL, addr);
1002 } else {
1003 if (reg_param_index < REGN) {
1004 /* save arguments passed by register */
1005 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
1006 if (tcc_state->nosse)
1007 tcc_error("SSE disabled");
1008 o(0xd60f66); /* movq */
1009 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
1010 } else {
1011 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1014 sym_push(sym->v & ~SYM_FIELD, type,
1015 VT_LOCAL | VT_LVAL, addr);
1017 addr += 8;
1018 reg_param_index++;
1021 while (reg_param_index < REGN) {
1022 if (func_type->ref->f.func_type == FUNC_ELLIPSIS) {
1023 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1024 addr += 8;
1026 reg_param_index++;
1028 #ifdef CONFIG_TCC_BCHECK
1029 if (tcc_state->do_bounds_check)
1030 gen_bounds_prolog();
1031 #endif
1034 /* generate function epilog */
1035 void gfunc_epilog(void)
1037 int v, saved_ind;
1039 /* align local size to word & save local variables */
1040 func_scratch = (func_scratch + 15) & -16;
1041 loc = (loc & -16) - func_scratch;
1043 #ifdef CONFIG_TCC_BCHECK
1044 if (tcc_state->do_bounds_check &&
1045 (func_bound_offset != lbounds_section->data_offset ||
1046 tcc_state->alloca_vla_used))
1047 gen_bounds_epilog();
1048 #endif
1050 o(0xc9); /* leave */
1051 if (func_ret_sub == 0) {
1052 o(0xc3); /* ret */
1053 } else {
1054 o(0xc2); /* ret n */
1055 g(func_ret_sub);
1056 g(func_ret_sub >> 8);
1059 saved_ind = ind;
1060 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1061 v = -loc;
1063 if (v >= 4096) {
1064 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type);
1065 oad(0xb8, v); /* mov stacksize, %eax */
1066 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1067 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1068 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1069 } else {
1070 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1071 o(0xec8148); /* sub rsp, stacksize */
1072 gen_le32(v);
1075 /* add the "func_scratch" area after each alloca seen */
1076 gsym_addr(func_alloca, -func_scratch);
1078 cur_text_section->data_offset = saved_ind;
1079 pe_add_unwind_data(ind, saved_ind, v);
1080 ind = cur_text_section->data_offset;
1083 #else
1085 static void gadd_sp(int val)
1087 if (val == (char)val) {
1088 o(0xc48348);
1089 g(val);
1090 } else {
1091 oad(0xc48148, val); /* add $xxx, %rsp */
1095 typedef enum X86_64_Mode {
1096 x86_64_mode_none,
1097 x86_64_mode_memory,
1098 x86_64_mode_integer,
1099 x86_64_mode_sse,
1100 x86_64_mode_x87
1101 } X86_64_Mode;
1103 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1105 if (a == b)
1106 return a;
1107 else if (a == x86_64_mode_none)
1108 return b;
1109 else if (b == x86_64_mode_none)
1110 return a;
1111 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1112 return x86_64_mode_memory;
1113 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1114 return x86_64_mode_integer;
1115 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1116 return x86_64_mode_memory;
1117 else
1118 return x86_64_mode_sse;
1121 static X86_64_Mode classify_x86_64_inner(CType *ty)
1123 X86_64_Mode mode;
1124 Sym *f;
1126 switch (ty->t & VT_BTYPE) {
1127 case VT_VOID: return x86_64_mode_none;
1129 case VT_INT:
1130 case VT_BYTE:
1131 case VT_SHORT:
1132 case VT_LLONG:
1133 case VT_BOOL:
1134 case VT_PTR:
1135 case VT_FUNC:
1136 return x86_64_mode_integer;
1138 case VT_FLOAT:
1139 case VT_DOUBLE: return x86_64_mode_sse;
1141 case VT_LDOUBLE: return x86_64_mode_x87;
1143 case VT_STRUCT:
1144 f = ty->ref;
1146 mode = x86_64_mode_none;
1147 for (f = f->next; f; f = f->next)
1148 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1150 return mode;
1152 assert(0);
1153 return 0;
1156 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1158 X86_64_Mode mode;
1159 int size, align, ret_t = 0;
1161 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1162 *psize = 8;
1163 *palign = 8;
1164 *reg_count = 1;
1165 ret_t = ty->t;
1166 mode = x86_64_mode_integer;
1167 } else {
1168 size = type_size(ty, &align);
1169 *psize = (size + 7) & ~7;
1170 *palign = (align + 7) & ~7;
1172 if (size > 16) {
1173 mode = x86_64_mode_memory;
1174 } else {
1175 mode = classify_x86_64_inner(ty);
1176 switch (mode) {
1177 case x86_64_mode_integer:
1178 if (size > 8) {
1179 *reg_count = 2;
1180 ret_t = VT_QLONG;
1181 } else {
1182 *reg_count = 1;
1183 if (size > 4)
1184 ret_t = VT_LLONG;
1185 else if (size > 2)
1186 ret_t = VT_INT;
1187 else if (size > 1)
1188 ret_t = VT_SHORT;
1189 else
1190 ret_t = VT_BYTE;
1191 if ((ty->t & VT_BTYPE) == VT_STRUCT || (ty->t & VT_UNSIGNED))
1192 ret_t |= VT_UNSIGNED;
1194 break;
1196 case x86_64_mode_x87:
1197 *reg_count = 1;
1198 ret_t = VT_LDOUBLE;
1199 break;
1201 case x86_64_mode_sse:
1202 if (size > 8) {
1203 *reg_count = 2;
1204 ret_t = VT_QFLOAT;
1205 } else {
1206 *reg_count = 1;
1207 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1209 break;
1210 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1215 if (ret) {
1216 ret->ref = NULL;
1217 ret->t = ret_t;
1220 return mode;
1223 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1225 /* This definition must be synced with stdarg.h */
1226 enum __va_arg_type {
1227 __va_gen_reg, __va_float_reg, __va_stack
1229 int size, align, reg_count;
1230 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1231 switch (mode) {
1232 default: return __va_stack;
1233 case x86_64_mode_integer: return __va_gen_reg;
1234 case x86_64_mode_sse: return __va_float_reg;
1238 /* Return the number of registers needed to return the struct, or 0 if
1239 returning via struct pointer. */
1240 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1242 int size, align, reg_count;
1243 *ret_align = 1; // Never have to re-align return values for x86-64
1244 *regsize = 8;
1245 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1248 #define REGN 6
1249 static const uint8_t arg_regs[REGN] = {
1250 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1253 static int arg_prepare_reg(int idx) {
1254 if (idx == 2 || idx == 3)
1255 /* idx=2: r10, idx=3: r11 */
1256 return idx + 8;
1257 else
1258 return arg_regs[idx];
1261 /* Generate function call. The function address is pushed first, then
1262 all the parameters in call order. This functions pops all the
1263 parameters and the function address. */
1264 void gfunc_call(int nb_args)
1266 X86_64_Mode mode;
1267 CType type;
1268 int size, align, r, args_size, stack_adjust, i, reg_count;
1269 int nb_reg_args = 0;
1270 int nb_sse_args = 0;
1271 int sse_reg, gen_reg;
1272 char _onstack[nb_args ? nb_args : 1], *onstack = _onstack;
1274 #ifdef CONFIG_TCC_BCHECK
1275 if (tcc_state->do_bounds_check)
1276 gbound_args(nb_args);
1277 #endif
1279 /* calculate the number of integer/float register arguments, remember
1280 arguments to be passed via stack (in onstack[]), and also remember
1281 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1282 to be done in a left-to-right pass over arguments. */
1283 stack_adjust = 0;
1284 for(i = nb_args - 1; i >= 0; i--) {
1285 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1286 if (mode == x86_64_mode_sse && nb_sse_args + reg_count <= 8) {
1287 nb_sse_args += reg_count;
1288 onstack[i] = 0;
1289 } else if (mode == x86_64_mode_integer && nb_reg_args + reg_count <= REGN) {
1290 nb_reg_args += reg_count;
1291 onstack[i] = 0;
1292 } else if (mode == x86_64_mode_none) {
1293 onstack[i] = 0;
1294 } else {
1295 if (align == 16 && (stack_adjust &= 15)) {
1296 onstack[i] = 2;
1297 stack_adjust = 0;
1298 } else
1299 onstack[i] = 1;
1300 stack_adjust += size;
1304 if (nb_sse_args && tcc_state->nosse)
1305 tcc_error("SSE disabled but floating point arguments passed");
1307 /* fetch cpu flag before generating any code */
1308 if ((vtop->r & VT_VALMASK) == VT_CMP)
1309 gv(RC_INT);
1311 /* for struct arguments, we need to call memcpy and the function
1312 call breaks register passing arguments we are preparing.
1313 So, we process arguments which will be passed by stack first. */
1314 gen_reg = nb_reg_args;
1315 sse_reg = nb_sse_args;
1316 args_size = 0;
1317 stack_adjust &= 15;
1318 for (i = 0; i < nb_args;) {
1319 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1320 if (!onstack[i]) {
1321 ++i;
1322 continue;
1324 /* Possibly adjust stack to align SSE boundary. We're processing
1325 args from right to left while allocating happens left to right
1326 (stack grows down), so the adjustment needs to happen _after_
1327 an argument that requires it. */
1328 if (stack_adjust) {
1329 o(0x50); /* push %rax; aka sub $8,%rsp */
1330 args_size += 8;
1331 stack_adjust = 0;
1333 if (onstack[i] == 2)
1334 stack_adjust = 1;
1336 vrotb(i+1);
1338 switch (vtop->type.t & VT_BTYPE) {
1339 case VT_STRUCT:
1340 /* allocate the necessary size on stack */
1341 o(0x48);
1342 oad(0xec81, size); /* sub $xxx, %rsp */
1343 /* generate structure store */
1344 r = get_reg(RC_INT);
1345 orex(1, r, 0, 0x89); /* mov %rsp, r */
1346 o(0xe0 + REG_VALUE(r));
1347 vset(&vtop->type, r | VT_LVAL, 0);
1348 vswap();
1349 vstore();
1350 break;
1352 case VT_LDOUBLE:
1353 gv(RC_ST0);
1354 oad(0xec8148, size); /* sub $xxx, %rsp */
1355 o(0x7cdb); /* fstpt 0(%rsp) */
1356 g(0x24);
1357 g(0x00);
1358 break;
1360 case VT_FLOAT:
1361 case VT_DOUBLE:
1362 assert(mode == x86_64_mode_sse);
1363 r = gv(RC_FLOAT);
1364 o(0x50); /* push $rax */
1365 /* movq %xmmN, (%rsp) */
1366 o(0xd60f66);
1367 o(0x04 + REG_VALUE(r)*8);
1368 o(0x24);
1369 break;
1371 default:
1372 assert(mode == x86_64_mode_integer);
1373 /* simple type */
1374 /* XXX: implicit cast ? */
1375 r = gv(RC_INT);
1376 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1377 break;
1379 args_size += size;
1381 vpop();
1382 --nb_args;
1383 onstack++;
1386 /* XXX This should be superfluous. */
1387 save_regs(0); /* save used temporary registers */
1389 /* then, we prepare register passing arguments.
1390 Note that we cannot set RDX and RCX in this loop because gv()
1391 may break these temporary registers. Let's use R10 and R11
1392 instead of them */
1393 assert(gen_reg <= REGN);
1394 assert(sse_reg <= 8);
1395 for(i = 0; i < nb_args; i++) {
1396 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1397 /* Alter stack entry type so that gv() knows how to treat it */
1398 vtop->type = type;
1399 if (mode == x86_64_mode_sse) {
1400 if (reg_count == 2) {
1401 sse_reg -= 2;
1402 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1403 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1404 /* movaps %xmm1, %xmmN */
1405 o(0x280f);
1406 o(0xc1 + ((sse_reg+1) << 3));
1407 /* movaps %xmm0, %xmmN */
1408 o(0x280f);
1409 o(0xc0 + (sse_reg << 3));
1411 } else {
1412 assert(reg_count == 1);
1413 --sse_reg;
1414 /* Load directly to register */
1415 gv(RC_XMM0 << sse_reg);
1417 } else if (mode == x86_64_mode_integer) {
1418 /* simple type */
1419 /* XXX: implicit cast ? */
1420 int d;
1421 gen_reg -= reg_count;
1422 r = gv(RC_INT);
1423 d = arg_prepare_reg(gen_reg);
1424 orex(1,d,r,0x89); /* mov */
1425 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1426 if (reg_count == 2) {
1427 d = arg_prepare_reg(gen_reg+1);
1428 orex(1,d,vtop->r2,0x89); /* mov */
1429 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1432 vtop--;
1434 assert(gen_reg == 0);
1435 assert(sse_reg == 0);
1437 /* We shouldn't have many operands on the stack anymore, but the
1438 call address itself is still there, and it might be in %eax
1439 (or edx/ecx) currently, which the below writes would clobber.
1440 So evict all remaining operands here. */
1441 save_regs(0);
1443 /* Copy R10 and R11 into RDX and RCX, respectively */
1444 if (nb_reg_args > 2) {
1445 o(0xd2894c); /* mov %r10, %rdx */
1446 if (nb_reg_args > 3) {
1447 o(0xd9894c); /* mov %r11, %rcx */
1451 if (vtop->type.ref->f.func_type != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1452 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1453 gcall_or_jmp(0);
1454 if (args_size)
1455 gadd_sp(args_size);
1456 vtop--;
1459 #define FUNC_PROLOG_SIZE 11
1461 static void push_arg_reg(int i) {
1462 loc -= 8;
1463 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1466 /* generate function prolog of type 't' */
1467 void gfunc_prolog(Sym *func_sym)
1469 CType *func_type = &func_sym->type;
1470 X86_64_Mode mode;
1471 int i, addr, align, size, reg_count;
1472 int param_addr = 0, reg_param_index, sse_param_index;
1473 Sym *sym;
1474 CType *type;
1476 sym = func_type->ref;
1477 addr = PTR_SIZE * 2;
1478 loc = 0;
1479 ind += FUNC_PROLOG_SIZE;
1480 func_sub_sp_offset = ind;
1481 func_ret_sub = 0;
1483 if (sym->f.func_type == FUNC_ELLIPSIS) {
1484 int seen_reg_num, seen_sse_num, seen_stack_size;
1485 seen_reg_num = seen_sse_num = 0;
1486 /* frame pointer and return address */
1487 seen_stack_size = PTR_SIZE * 2;
1488 /* count the number of seen parameters */
1489 sym = func_type->ref;
1490 while ((sym = sym->next) != NULL) {
1491 type = &sym->type;
1492 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1493 switch (mode) {
1494 default:
1495 stack_arg:
1496 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1497 break;
1499 case x86_64_mode_integer:
1500 if (seen_reg_num + reg_count > REGN)
1501 goto stack_arg;
1502 seen_reg_num += reg_count;
1503 break;
1505 case x86_64_mode_sse:
1506 if (seen_sse_num + reg_count > 8)
1507 goto stack_arg;
1508 seen_sse_num += reg_count;
1509 break;
1513 loc -= 24;
1514 /* movl $0x????????, -0x18(%rbp) */
1515 o(0xe845c7);
1516 gen_le32(seen_reg_num * 8);
1517 /* movl $0x????????, -0x14(%rbp) */
1518 o(0xec45c7);
1519 gen_le32(seen_sse_num * 16 + 48);
1520 /* leaq $0x????????, %r11 */
1521 o(0x9d8d4c);
1522 gen_le32(seen_stack_size);
1523 /* movq %r11, -0x10(%rbp) */
1524 o(0xf05d894c);
1525 /* leaq $-192(%rbp), %r11 */
1526 o(0x9d8d4c);
1527 gen_le32(-176 - 24);
1528 /* movq %r11, -0x8(%rbp) */
1529 o(0xf85d894c);
1531 /* save all register passing arguments */
1532 for (i = 0; i < 8; i++) {
1533 loc -= 16;
1534 if (!tcc_state->nosse) {
1535 o(0xd60f66); /* movq */
1536 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1538 /* movq $0, loc+8(%rbp) */
1539 o(0x85c748);
1540 gen_le32(loc + 8);
1541 gen_le32(0);
1543 for (i = 0; i < REGN; i++) {
1544 push_arg_reg(REGN-1-i);
1548 sym = func_type->ref;
1549 reg_param_index = 0;
1550 sse_param_index = 0;
1552 /* if the function returns a structure, then add an
1553 implicit pointer parameter */
1554 func_vt = sym->type;
1555 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1556 if (mode == x86_64_mode_memory) {
1557 push_arg_reg(reg_param_index);
1558 func_vc = loc;
1559 reg_param_index++;
1561 /* define parameters */
1562 while ((sym = sym->next) != NULL) {
1563 type = &sym->type;
1564 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1565 switch (mode) {
1566 case x86_64_mode_sse:
1567 if (tcc_state->nosse)
1568 tcc_error("SSE disabled but floating point arguments used");
1569 if (sse_param_index + reg_count <= 8) {
1570 /* save arguments passed by register */
1571 loc -= reg_count * 8;
1572 param_addr = loc;
1573 for (i = 0; i < reg_count; ++i) {
1574 o(0xd60f66); /* movq */
1575 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1576 ++sse_param_index;
1578 } else {
1579 addr = (addr + align - 1) & -align;
1580 param_addr = addr;
1581 addr += size;
1583 break;
1585 case x86_64_mode_memory:
1586 case x86_64_mode_x87:
1587 addr = (addr + align - 1) & -align;
1588 param_addr = addr;
1589 addr += size;
1590 break;
1592 case x86_64_mode_integer: {
1593 if (reg_param_index + reg_count <= REGN) {
1594 /* save arguments passed by register */
1595 loc -= reg_count * 8;
1596 param_addr = loc;
1597 for (i = 0; i < reg_count; ++i) {
1598 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1599 ++reg_param_index;
1601 } else {
1602 addr = (addr + align - 1) & -align;
1603 param_addr = addr;
1604 addr += size;
1606 break;
1608 default: break; /* nothing to be done for x86_64_mode_none */
1610 sym_push(sym->v & ~SYM_FIELD, type,
1611 VT_LOCAL | VT_LVAL, param_addr);
1614 #ifdef CONFIG_TCC_BCHECK
1615 if (tcc_state->do_bounds_check)
1616 gen_bounds_prolog();
1617 #endif
1620 /* generate function epilog */
1621 void gfunc_epilog(void)
1623 int v, saved_ind;
1625 #ifdef CONFIG_TCC_BCHECK
1626 if (tcc_state->do_bounds_check &&
1627 (func_bound_offset != lbounds_section->data_offset ||
1628 tcc_state->alloca_vla_used))
1629 gen_bounds_epilog();
1630 #endif
1631 o(0xc9); /* leave */
1632 if (func_ret_sub == 0) {
1633 o(0xc3); /* ret */
1634 } else {
1635 o(0xc2); /* ret n */
1636 g(func_ret_sub);
1637 g(func_ret_sub >> 8);
1639 /* align local size to word & save local variables */
1640 v = (-loc + 15) & -16;
1641 saved_ind = ind;
1642 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1643 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1644 o(0xec8148); /* sub rsp, stacksize */
1645 gen_le32(v);
1646 ind = saved_ind;
1649 #endif /* not PE */
1651 ST_FUNC void gen_fill_nops(int bytes)
1653 while (bytes--)
1654 g(0x90);
1657 /* generate a jump to a label */
1658 int gjmp(int t)
1660 return gjmp2(0xe9, t);
1663 /* generate a jump to a fixed address */
1664 void gjmp_addr(int a)
1666 int r;
1667 r = a - ind - 2;
1668 if (r == (char)r) {
1669 g(0xeb);
1670 g(r);
1671 } else {
1672 oad(0xe9, a - ind - 5);
1676 ST_FUNC int gjmp_append(int n, int t)
1678 void *p;
1679 /* insert vtop->c jump list in t */
1680 if (n) {
1681 uint32_t n1 = n, n2;
1682 while ((n2 = read32le(p = cur_text_section->data + n1)))
1683 n1 = n2;
1684 write32le(p, t);
1685 t = n;
1687 return t;
1690 ST_FUNC int gjmp_cond(int op, int t)
1692 if (op & 0x100)
1694 /* This was a float compare. If the parity flag is set
1695 the result was unordered. For anything except != this
1696 means false and we don't jump (anding both conditions).
1697 For != this means true (oring both).
1698 Take care about inverting the test. We need to jump
1699 to our target if the result was unordered and test wasn't NE,
1700 otherwise if unordered we don't want to jump. */
1701 int v = vtop->cmp_r;
1702 op &= ~0x100;
1703 if (op ^ v ^ (v != TOK_NE))
1704 o(0x067a); /* jp +6 */
1705 else
1707 g(0x0f);
1708 t = gjmp2(0x8a, t); /* jp t */
1711 g(0x0f);
1712 t = gjmp2(op - 16, t);
1713 return t;
1716 /* generate an integer binary operation */
1717 void gen_opi(int op)
1719 int r, fr, opc, c;
1720 int ll, uu, cc;
1722 ll = is64_type(vtop[-1].type.t);
1723 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1724 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1726 switch(op) {
1727 case '+':
1728 case TOK_ADDC1: /* add with carry generation */
1729 opc = 0;
1730 gen_op8:
1731 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1732 /* constant case */
1733 vswap();
1734 r = gv(RC_INT);
1735 vswap();
1736 c = vtop->c.i;
1737 if (c == (char)c) {
1738 /* XXX: generate inc and dec for smaller code ? */
1739 orex(ll, r, 0, 0x83);
1740 o(0xc0 | (opc << 3) | REG_VALUE(r));
1741 g(c);
1742 } else {
1743 orex(ll, r, 0, 0x81);
1744 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1746 } else {
1747 gv2(RC_INT, RC_INT);
1748 r = vtop[-1].r;
1749 fr = vtop[0].r;
1750 orex(ll, r, fr, (opc << 3) | 0x01);
1751 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1753 vtop--;
1754 if (op >= TOK_ULT && op <= TOK_GT)
1755 vset_VT_CMP(op);
1756 break;
1757 case '-':
1758 case TOK_SUBC1: /* sub with carry generation */
1759 opc = 5;
1760 goto gen_op8;
1761 case TOK_ADDC2: /* add with carry use */
1762 opc = 2;
1763 goto gen_op8;
1764 case TOK_SUBC2: /* sub with carry use */
1765 opc = 3;
1766 goto gen_op8;
1767 case '&':
1768 opc = 4;
1769 goto gen_op8;
1770 case '^':
1771 opc = 6;
1772 goto gen_op8;
1773 case '|':
1774 opc = 1;
1775 goto gen_op8;
1776 case '*':
1777 gv2(RC_INT, RC_INT);
1778 r = vtop[-1].r;
1779 fr = vtop[0].r;
1780 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1781 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1782 vtop--;
1783 break;
1784 case TOK_SHL:
1785 opc = 4;
1786 goto gen_shift;
1787 case TOK_SHR:
1788 opc = 5;
1789 goto gen_shift;
1790 case TOK_SAR:
1791 opc = 7;
1792 gen_shift:
1793 opc = 0xc0 | (opc << 3);
1794 if (cc) {
1795 /* constant case */
1796 vswap();
1797 r = gv(RC_INT);
1798 vswap();
1799 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1800 o(opc | REG_VALUE(r));
1801 g(vtop->c.i & (ll ? 63 : 31));
1802 } else {
1803 /* we generate the shift in ecx */
1804 gv2(RC_INT, RC_RCX);
1805 r = vtop[-1].r;
1806 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1807 o(opc | REG_VALUE(r));
1809 vtop--;
1810 break;
1811 case TOK_UDIV:
1812 case TOK_UMOD:
1813 uu = 1;
1814 goto divmod;
1815 case '/':
1816 case '%':
1817 case TOK_PDIV:
1818 uu = 0;
1819 divmod:
1820 /* first operand must be in eax */
1821 /* XXX: need better constraint for second operand */
1822 gv2(RC_RAX, RC_RCX);
1823 r = vtop[-1].r;
1824 fr = vtop[0].r;
1825 vtop--;
1826 save_reg(TREG_RDX);
1827 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1828 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1829 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1830 if (op == '%' || op == TOK_UMOD)
1831 r = TREG_RDX;
1832 else
1833 r = TREG_RAX;
1834 vtop->r = r;
1835 break;
1836 default:
1837 opc = 7;
1838 goto gen_op8;
1842 void gen_opl(int op)
1844 gen_opi(op);
1847 /* generate a floating point operation 'v = t1 op t2' instruction. The
1848 two operands are guaranteed to have the same floating point type */
1849 /* XXX: need to use ST1 too */
1850 void gen_opf(int op)
1852 int a, ft, fc, swapped, r;
1853 int float_type =
1854 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1856 /* convert constants to memory references */
1857 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1858 vswap();
1859 gv(float_type);
1860 vswap();
1862 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1863 gv(float_type);
1865 /* must put at least one value in the floating point register */
1866 if ((vtop[-1].r & VT_LVAL) &&
1867 (vtop[0].r & VT_LVAL)) {
1868 vswap();
1869 gv(float_type);
1870 vswap();
1872 swapped = 0;
1873 /* swap the stack if needed so that t1 is the register and t2 is
1874 the memory reference */
1875 if (vtop[-1].r & VT_LVAL) {
1876 vswap();
1877 swapped = 1;
1879 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1880 if (op >= TOK_ULT && op <= TOK_GT) {
1881 /* load on stack second operand */
1882 load(TREG_ST0, vtop);
1883 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1884 if (op == TOK_GE || op == TOK_GT)
1885 swapped = !swapped;
1886 else if (op == TOK_EQ || op == TOK_NE)
1887 swapped = 0;
1888 if (swapped)
1889 o(0xc9d9); /* fxch %st(1) */
1890 if (op == TOK_EQ || op == TOK_NE)
1891 o(0xe9da); /* fucompp */
1892 else
1893 o(0xd9de); /* fcompp */
1894 o(0xe0df); /* fnstsw %ax */
1895 if (op == TOK_EQ) {
1896 o(0x45e480); /* and $0x45, %ah */
1897 o(0x40fC80); /* cmp $0x40, %ah */
1898 } else if (op == TOK_NE) {
1899 o(0x45e480); /* and $0x45, %ah */
1900 o(0x40f480); /* xor $0x40, %ah */
1901 op = TOK_NE;
1902 } else if (op == TOK_GE || op == TOK_LE) {
1903 o(0x05c4f6); /* test $0x05, %ah */
1904 op = TOK_EQ;
1905 } else {
1906 o(0x45c4f6); /* test $0x45, %ah */
1907 op = TOK_EQ;
1909 vtop--;
1910 vset_VT_CMP(op);
1911 } else {
1912 /* no memory reference possible for long double operations */
1913 load(TREG_ST0, vtop);
1914 swapped = !swapped;
1916 switch(op) {
1917 default:
1918 case '+':
1919 a = 0;
1920 break;
1921 case '-':
1922 a = 4;
1923 if (swapped)
1924 a++;
1925 break;
1926 case '*':
1927 a = 1;
1928 break;
1929 case '/':
1930 a = 6;
1931 if (swapped)
1932 a++;
1933 break;
1935 ft = vtop->type.t;
1936 fc = vtop->c.i;
1937 o(0xde); /* fxxxp %st, %st(1) */
1938 o(0xc1 + (a << 3));
1939 vtop--;
1941 } else {
1942 if (op >= TOK_ULT && op <= TOK_GT) {
1943 /* if saved lvalue, then we must reload it */
1944 r = vtop->r;
1945 fc = vtop->c.i;
1946 if ((r & VT_VALMASK) == VT_LLOCAL) {
1947 SValue v1;
1948 r = get_reg(RC_INT);
1949 v1.type.t = VT_PTR;
1950 v1.r = VT_LOCAL | VT_LVAL;
1951 v1.c.i = fc;
1952 load(r, &v1);
1953 fc = 0;
1954 vtop->r = r = r | VT_LVAL;
1957 if (op == TOK_EQ || op == TOK_NE) {
1958 swapped = 0;
1959 } else {
1960 if (op == TOK_LE || op == TOK_LT)
1961 swapped = !swapped;
1962 if (op == TOK_LE || op == TOK_GE) {
1963 op = 0x93; /* setae */
1964 } else {
1965 op = 0x97; /* seta */
1969 if (swapped) {
1970 gv(RC_FLOAT);
1971 vswap();
1973 assert(!(vtop[-1].r & VT_LVAL));
1975 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1976 o(0x66);
1977 if (op == TOK_EQ || op == TOK_NE)
1978 o(0x2e0f); /* ucomisd */
1979 else
1980 o(0x2f0f); /* comisd */
1982 if (vtop->r & VT_LVAL) {
1983 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1984 } else {
1985 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1988 vtop--;
1989 vset_VT_CMP(op | 0x100);
1990 vtop->cmp_r = op;
1991 } else {
1992 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1993 switch(op) {
1994 default:
1995 case '+':
1996 a = 0;
1997 break;
1998 case '-':
1999 a = 4;
2000 break;
2001 case '*':
2002 a = 1;
2003 break;
2004 case '/':
2005 a = 6;
2006 break;
2008 ft = vtop->type.t;
2009 fc = vtop->c.i;
2010 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2012 r = vtop->r;
2013 /* if saved lvalue, then we must reload it */
2014 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2015 SValue v1;
2016 r = get_reg(RC_INT);
2017 v1.type.t = VT_PTR;
2018 v1.r = VT_LOCAL | VT_LVAL;
2019 v1.c.i = fc;
2020 load(r, &v1);
2021 fc = 0;
2022 vtop->r = r = r | VT_LVAL;
2025 assert(!(vtop[-1].r & VT_LVAL));
2026 if (swapped) {
2027 assert(vtop->r & VT_LVAL);
2028 gv(RC_FLOAT);
2029 vswap();
2032 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2033 o(0xf2);
2034 } else {
2035 o(0xf3);
2037 o(0x0f);
2038 o(0x58 + a);
2040 if (vtop->r & VT_LVAL) {
2041 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2042 } else {
2043 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2046 vtop--;
2051 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2052 and 'long long' cases. */
2053 void gen_cvt_itof(int t)
2055 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2056 save_reg(TREG_ST0);
2057 gv(RC_INT);
2058 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2059 /* signed long long to float/double/long double (unsigned case
2060 is handled generically) */
2061 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2062 o(0x242cdf); /* fildll (%rsp) */
2063 o(0x08c48348); /* add $8, %rsp */
2064 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2065 (VT_INT | VT_UNSIGNED)) {
2066 /* unsigned int to float/double/long double */
2067 o(0x6a); /* push $0 */
2068 g(0x00);
2069 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2070 o(0x242cdf); /* fildll (%rsp) */
2071 o(0x10c48348); /* add $16, %rsp */
2072 } else {
2073 /* int to float/double/long double */
2074 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2075 o(0x2404db); /* fildl (%rsp) */
2076 o(0x08c48348); /* add $8, %rsp */
2078 vtop->r = TREG_ST0;
2079 } else {
2080 int r = get_reg(RC_FLOAT);
2081 gv(RC_INT);
2082 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2083 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2084 (VT_INT | VT_UNSIGNED) ||
2085 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2086 o(0x48); /* REX */
2088 o(0x2a0f);
2089 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2090 vtop->r = r;
2094 /* convert from one floating point type to another */
2095 void gen_cvt_ftof(int t)
2097 int ft, bt, tbt;
2099 ft = vtop->type.t;
2100 bt = ft & VT_BTYPE;
2101 tbt = t & VT_BTYPE;
2103 if (bt == VT_FLOAT) {
2104 gv(RC_FLOAT);
2105 if (tbt == VT_DOUBLE) {
2106 o(0x140f); /* unpcklps */
2107 o(0xc0 + REG_VALUE(vtop->r)*9);
2108 o(0x5a0f); /* cvtps2pd */
2109 o(0xc0 + REG_VALUE(vtop->r)*9);
2110 } else if (tbt == VT_LDOUBLE) {
2111 save_reg(RC_ST0);
2112 /* movss %xmm0,-0x10(%rsp) */
2113 o(0x110ff3);
2114 o(0x44 + REG_VALUE(vtop->r)*8);
2115 o(0xf024);
2116 o(0xf02444d9); /* flds -0x10(%rsp) */
2117 vtop->r = TREG_ST0;
2119 } else if (bt == VT_DOUBLE) {
2120 gv(RC_FLOAT);
2121 if (tbt == VT_FLOAT) {
2122 o(0x140f66); /* unpcklpd */
2123 o(0xc0 + REG_VALUE(vtop->r)*9);
2124 o(0x5a0f66); /* cvtpd2ps */
2125 o(0xc0 + REG_VALUE(vtop->r)*9);
2126 } else if (tbt == VT_LDOUBLE) {
2127 save_reg(RC_ST0);
2128 /* movsd %xmm0,-0x10(%rsp) */
2129 o(0x110ff2);
2130 o(0x44 + REG_VALUE(vtop->r)*8);
2131 o(0xf024);
2132 o(0xf02444dd); /* fldl -0x10(%rsp) */
2133 vtop->r = TREG_ST0;
2135 } else {
2136 int r;
2137 gv(RC_ST0);
2138 r = get_reg(RC_FLOAT);
2139 if (tbt == VT_DOUBLE) {
2140 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2141 /* movsd -0x10(%rsp),%xmm0 */
2142 o(0x100ff2);
2143 o(0x44 + REG_VALUE(r)*8);
2144 o(0xf024);
2145 vtop->r = r;
2146 } else if (tbt == VT_FLOAT) {
2147 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2148 /* movss -0x10(%rsp),%xmm0 */
2149 o(0x100ff3);
2150 o(0x44 + REG_VALUE(r)*8);
2151 o(0xf024);
2152 vtop->r = r;
2157 /* convert fp to int 't' type */
2158 void gen_cvt_ftoi(int t)
2160 int ft, bt, size, r;
2161 ft = vtop->type.t;
2162 bt = ft & VT_BTYPE;
2163 if (bt == VT_LDOUBLE) {
2164 gen_cvt_ftof(VT_DOUBLE);
2165 bt = VT_DOUBLE;
2168 gv(RC_FLOAT);
2169 if (t != VT_INT)
2170 size = 8;
2171 else
2172 size = 4;
2174 r = get_reg(RC_INT);
2175 if (bt == VT_FLOAT) {
2176 o(0xf3);
2177 } else if (bt == VT_DOUBLE) {
2178 o(0xf2);
2179 } else {
2180 assert(0);
2182 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2183 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2184 vtop->r = r;
2187 // Generate sign extension from 32 to 64 bits:
2188 ST_FUNC void gen_cvt_sxtw(void)
2190 int r = gv(RC_INT);
2191 /* x86_64 specific: movslq */
2192 o(0x6348);
2193 o(0xc0 + (REG_VALUE(r) << 3) + REG_VALUE(r));
2196 /* char/short to int conversion */
2197 ST_FUNC void gen_cvt_csti(int t)
2199 int r, sz, xl, ll;
2200 r = gv(RC_INT);
2201 sz = !(t & VT_UNSIGNED);
2202 xl = (t & VT_BTYPE) == VT_SHORT;
2203 ll = (vtop->type.t & VT_BTYPE) == VT_LLONG;
2204 orex(ll, r, 0, 0xc0b60f /* mov[sz] %a[xl], %eax */
2205 | (sz << 3 | xl) << 8
2206 | (REG_VALUE(r) << 3 | REG_VALUE(r)) << 16
2210 /* computed goto support */
2211 void ggoto(void)
2213 gcall_or_jmp(1);
2214 vtop--;
2217 /* Save the stack pointer onto the stack and return the location of its address */
2218 ST_FUNC void gen_vla_sp_save(int addr) {
2219 /* mov %rsp,addr(%rbp)*/
2220 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2223 /* Restore the SP from a location on the stack */
2224 ST_FUNC void gen_vla_sp_restore(int addr) {
2225 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2228 #ifdef TCC_TARGET_PE
2229 /* Save result of gen_vla_alloc onto the stack */
2230 ST_FUNC void gen_vla_result(int addr) {
2231 /* mov %rax,addr(%rbp)*/
2232 gen_modrm64(0x89, TREG_RAX, VT_LOCAL, NULL, addr);
2234 #endif
2236 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2237 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2238 int use_call = 0;
2240 #if defined(CONFIG_TCC_BCHECK)
2241 use_call = tcc_state->do_bounds_check;
2242 #endif
2243 #ifdef TCC_TARGET_PE /* alloca does more than just adjust %rsp on Windows */
2244 use_call = 1;
2245 #endif
2246 if (use_call)
2248 vpush_global_sym(&func_old_type, TOK_alloca);
2249 vswap(); /* Move alloca ref past allocation size */
2250 gfunc_call(1);
2252 else {
2253 int r;
2254 r = gv(RC_INT); /* allocation size */
2255 /* sub r,%rsp */
2256 o(0x2b48);
2257 o(0xe0 | REG_VALUE(r));
2258 /* We align to 16 bytes rather than align */
2259 /* and ~15, %rsp */
2260 o(0xf0e48348);
2261 vpop();
2266 /* end of x86-64 code generator */
2267 /*************************************************************/
2268 #endif /* ! TARGET_DEFS_ONLY */
2269 /******************************************************/