tccgen: multi-dimensional vla: bug fixes
[tinycc.git] / x86_64-gen.c
blob81ec5d954749bec73fccf9d9246b82cdef9d6b29
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
35 #define RC_RAX 0x0004
36 #define RC_RCX 0x0008
37 #define RC_RDX 0x0010
38 #define RC_ST0 0x0080 /* only for long double */
39 #define RC_R8 0x0100
40 #define RC_R9 0x0200
41 #define RC_R10 0x0400
42 #define RC_R11 0x0800
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_IRE2 RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_FRE2 RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
57 enum {
58 TREG_RAX = 0,
59 TREG_RCX = 1,
60 TREG_RDX = 2,
61 TREG_RSP = 4,
62 TREG_RSI = 6,
63 TREG_RDI = 7,
65 TREG_R8 = 8,
66 TREG_R9 = 9,
67 TREG_R10 = 10,
68 TREG_R11 = 11,
70 TREG_XMM0 = 16,
71 TREG_XMM1 = 17,
72 TREG_XMM2 = 18,
73 TREG_XMM3 = 19,
74 TREG_XMM4 = 20,
75 TREG_XMM5 = 21,
76 TREG_XMM6 = 22,
77 TREG_XMM7 = 23,
79 TREG_ST0 = 24,
81 TREG_MEM = 0x20
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_IRE2 TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_FRE2 TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
97 #define PTR_SIZE 8
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
103 #define MAX_ALIGN 16
105 /* define if return values need to be extended explicitely
106 at caller side (for interfacing with non-TCC compilers) */
107 #define PROMOTE_RET
108 /******************************************************/
109 #else /* ! TARGET_DEFS_ONLY */
110 /******************************************************/
111 #define USING_GLOBALS
112 #include "tcc.h"
113 #include <assert.h>
115 ST_DATA const char * const target_machine_defs =
116 "__x86_64__\0"
117 "__amd64__\0"
120 ST_DATA const int reg_classes[NB_REGS] = {
121 /* eax */ RC_INT | RC_RAX,
122 /* ecx */ RC_INT | RC_RCX,
123 /* edx */ RC_INT | RC_RDX,
129 RC_R8,
130 RC_R9,
131 RC_R10,
132 RC_R11,
137 /* xmm0 */ RC_FLOAT | RC_XMM0,
138 /* xmm1 */ RC_FLOAT | RC_XMM1,
139 /* xmm2 */ RC_FLOAT | RC_XMM2,
140 /* xmm3 */ RC_FLOAT | RC_XMM3,
141 /* xmm4 */ RC_FLOAT | RC_XMM4,
142 /* xmm5 */ RC_FLOAT | RC_XMM5,
143 /* xmm6 an xmm7 are included so gv() can be used on them,
144 but they are not tagged with RC_FLOAT because they are
145 callee saved on Windows */
146 RC_XMM6,
147 RC_XMM7,
148 /* st0 */ RC_ST0
151 static unsigned long func_sub_sp_offset;
152 static int func_ret_sub;
154 #if defined(CONFIG_TCC_BCHECK)
155 static addr_t func_bound_offset;
156 static unsigned long func_bound_ind;
157 ST_DATA int func_bound_add_epilog;
158 #endif
160 #ifdef TCC_TARGET_PE
161 static int func_scratch, func_alloca;
162 #endif
164 /* XXX: make it faster ? */
165 ST_FUNC void g(int c)
167 int ind1;
168 if (nocode_wanted)
169 return;
170 ind1 = ind + 1;
171 if (ind1 > cur_text_section->data_allocated)
172 section_realloc(cur_text_section, ind1);
173 cur_text_section->data[ind] = c;
174 ind = ind1;
177 ST_FUNC void o(unsigned int c)
179 while (c) {
180 g(c);
181 c = c >> 8;
185 ST_FUNC void gen_le16(int v)
187 g(v);
188 g(v >> 8);
191 ST_FUNC void gen_le32(int c)
193 g(c);
194 g(c >> 8);
195 g(c >> 16);
196 g(c >> 24);
199 ST_FUNC void gen_le64(int64_t c)
201 g(c);
202 g(c >> 8);
203 g(c >> 16);
204 g(c >> 24);
205 g(c >> 32);
206 g(c >> 40);
207 g(c >> 48);
208 g(c >> 56);
211 static void orex(int ll, int r, int r2, int b)
213 if ((r & VT_VALMASK) >= VT_CONST)
214 r = 0;
215 if ((r2 & VT_VALMASK) >= VT_CONST)
216 r2 = 0;
217 if (ll || REX_BASE(r) || REX_BASE(r2))
218 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
219 o(b);
222 /* output a symbol and patch all calls to it */
223 ST_FUNC void gsym_addr(int t, int a)
225 while (t) {
226 unsigned char *ptr = cur_text_section->data + t;
227 uint32_t n = read32le(ptr); /* next value */
228 write32le(ptr, a < 0 ? -a : a - t - 4);
229 t = n;
233 static int is64_type(int t)
235 return ((t & VT_BTYPE) == VT_PTR ||
236 (t & VT_BTYPE) == VT_FUNC ||
237 (t & VT_BTYPE) == VT_LLONG);
240 /* instruction + 4 bytes data. Return the address of the data */
241 static int oad(int c, int s)
243 int t;
244 if (nocode_wanted)
245 return s;
246 o(c);
247 t = ind;
248 gen_le32(s);
249 return t;
252 /* generate jmp to a label */
253 #define gjmp2(instr,lbl) oad(instr,lbl)
255 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
257 if (r & VT_SYM)
258 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
259 gen_le32(c);
262 /* output constant with relocation if 'r & VT_SYM' is true */
263 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
265 if (r & VT_SYM)
266 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
267 gen_le64(c);
270 /* output constant with relocation if 'r & VT_SYM' is true */
271 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
273 if (r & VT_SYM)
274 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
275 gen_le32(c-4);
278 /* output got address with relocation */
279 static void gen_gotpcrel(int r, Sym *sym, int c)
281 #ifdef TCC_TARGET_PE
282 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
283 get_tok_str(sym->v, NULL), c, r,
284 cur_text_section->data[ind-3],
285 cur_text_section->data[ind-2],
286 cur_text_section->data[ind-1]
288 #endif
289 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
290 gen_le32(0);
291 if (c) {
292 /* we use add c, %xxx for displacement */
293 orex(1, r, 0, 0x81);
294 o(0xc0 + REG_VALUE(r));
295 gen_le32(c);
299 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
301 op_reg = REG_VALUE(op_reg) << 3;
302 if ((r & VT_VALMASK) == VT_CONST) {
303 /* constant memory reference */
304 if (!(r & VT_SYM)) {
305 /* Absolute memory reference */
306 o(0x04 | op_reg); /* [sib] | destreg */
307 oad(0x25, c); /* disp32 */
308 } else {
309 o(0x05 | op_reg); /* (%rip)+disp32 | destreg */
310 if (is_got) {
311 gen_gotpcrel(r, sym, c);
312 } else {
313 gen_addrpc32(r, sym, c);
316 } else if ((r & VT_VALMASK) == VT_LOCAL) {
317 /* currently, we use only ebp as base */
318 if (c == (char)c) {
319 /* short reference */
320 o(0x45 | op_reg);
321 g(c);
322 } else {
323 oad(0x85 | op_reg, c);
325 } else if ((r & VT_VALMASK) >= TREG_MEM) {
326 if (c) {
327 g(0x80 | op_reg | REG_VALUE(r));
328 gen_le32(c);
329 } else {
330 g(0x00 | op_reg | REG_VALUE(r));
332 } else {
333 g(0x00 | op_reg | REG_VALUE(r));
337 /* generate a modrm reference. 'op_reg' contains the additional 3
338 opcode bits */
339 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
341 gen_modrm_impl(op_reg, r, sym, c, 0);
344 /* generate a modrm reference. 'op_reg' contains the additional 3
345 opcode bits */
346 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
348 int is_got;
349 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
350 orex(1, r, op_reg, opcode);
351 gen_modrm_impl(op_reg, r, sym, c, is_got);
355 /* load 'r' from value 'sv' */
356 void load(int r, SValue *sv)
358 int v, t, ft, fc, fr;
359 SValue v1;
361 #ifdef TCC_TARGET_PE
362 SValue v2;
363 sv = pe_getimport(sv, &v2);
364 #endif
366 fr = sv->r;
367 ft = sv->type.t & ~VT_DEFSIGN;
368 fc = sv->c.i;
369 if (fc != sv->c.i && (fr & VT_SYM))
370 tcc_error("64 bit addend in load");
372 ft &= ~(VT_VOLATILE | VT_CONSTANT);
374 #ifndef TCC_TARGET_PE
375 /* we use indirect access via got */
376 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
377 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
378 /* use the result register as a temporal register */
379 int tr = r | TREG_MEM;
380 if (is_float(ft)) {
381 /* we cannot use float registers as a temporal register */
382 tr = get_reg(RC_INT) | TREG_MEM;
384 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
386 /* load from the temporal register */
387 fr = tr | VT_LVAL;
389 #endif
391 v = fr & VT_VALMASK;
392 if (fr & VT_LVAL) {
393 int b, ll;
394 if (v == VT_LLOCAL) {
395 v1.type.t = VT_PTR;
396 v1.r = VT_LOCAL | VT_LVAL;
397 v1.c.i = fc;
398 fr = r;
399 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
400 fr = get_reg(RC_INT);
401 load(fr, &v1);
403 if (fc != sv->c.i) {
404 /* If the addends doesn't fit into a 32bit signed
405 we must use a 64bit move. We've checked above
406 that this doesn't have a sym associated. */
407 v1.type.t = VT_LLONG;
408 v1.r = VT_CONST;
409 v1.c.i = sv->c.i;
410 fr = r;
411 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
412 fr = get_reg(RC_INT);
413 load(fr, &v1);
414 fc = 0;
416 ll = 0;
417 /* Like GCC we can load from small enough properly sized
418 structs and unions as well.
419 XXX maybe move to generic operand handling, but should
420 occur only with asm, so tccasm.c might also be a better place */
421 if ((ft & VT_BTYPE) == VT_STRUCT) {
422 int align;
423 switch (type_size(&sv->type, &align)) {
424 case 1: ft = VT_BYTE; break;
425 case 2: ft = VT_SHORT; break;
426 case 4: ft = VT_INT; break;
427 case 8: ft = VT_LLONG; break;
428 default:
429 tcc_error("invalid aggregate type for register load");
430 break;
433 if ((ft & VT_BTYPE) == VT_FLOAT) {
434 b = 0x6e0f66;
435 r = REG_VALUE(r); /* movd */
436 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
437 b = 0x7e0ff3; /* movq */
438 r = REG_VALUE(r);
439 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
440 b = 0xdb, r = 5; /* fldt */
441 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
442 b = 0xbe0f; /* movsbl */
443 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
444 b = 0xb60f; /* movzbl */
445 } else if ((ft & VT_TYPE) == VT_SHORT) {
446 b = 0xbf0f; /* movswl */
447 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
448 b = 0xb70f; /* movzwl */
449 } else if ((ft & VT_TYPE) == (VT_VOID)) {
450 /* Can happen with zero size structs */
451 return;
452 } else {
453 assert(((ft & VT_BTYPE) == VT_INT)
454 || ((ft & VT_BTYPE) == VT_LLONG)
455 || ((ft & VT_BTYPE) == VT_PTR)
456 || ((ft & VT_BTYPE) == VT_FUNC)
458 ll = is64_type(ft);
459 b = 0x8b;
461 if (ll) {
462 gen_modrm64(b, r, fr, sv->sym, fc);
463 } else {
464 orex(ll, fr, r, b);
465 gen_modrm(r, fr, sv->sym, fc);
467 } else {
468 if (v == VT_CONST) {
469 if (fr & VT_SYM) {
470 #ifdef TCC_TARGET_PE
471 orex(1,0,r,0x8d);
472 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
473 gen_addrpc32(fr, sv->sym, fc);
474 #else
475 if (sv->sym->type.t & VT_STATIC) {
476 orex(1,0,r,0x8d);
477 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
478 gen_addrpc32(fr, sv->sym, fc);
479 } else {
480 orex(1,0,r,0x8b);
481 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
482 gen_gotpcrel(r, sv->sym, fc);
484 #endif
485 } else if (is64_type(ft)) {
486 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
487 gen_le64(sv->c.i);
488 } else {
489 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
490 gen_le32(fc);
492 } else if (v == VT_LOCAL) {
493 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
494 gen_modrm(r, VT_LOCAL, sv->sym, fc);
495 } else if (v == VT_CMP) {
496 if (fc & 0x100)
498 v = vtop->cmp_r;
499 fc &= ~0x100;
500 /* This was a float compare. If the parity bit is
501 set the result was unordered, meaning false for everything
502 except TOK_NE, and true for TOK_NE. */
503 orex(0, r, 0, 0xb0 + REG_VALUE(r)); /* mov $0/1,%al */
504 g(v ^ fc ^ (v == TOK_NE));
505 o(0x037a + (REX_BASE(r) << 8));
507 orex(0,r,0, 0x0f); /* setxx %br */
508 o(fc);
509 o(0xc0 + REG_VALUE(r));
510 orex(0,r,0, 0x0f);
511 o(0xc0b6 + REG_VALUE(r) * 0x900); /* movzbl %al, %eax */
512 } else if (v == VT_JMP || v == VT_JMPI) {
513 t = v & 1;
514 orex(0,r,0,0);
515 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
516 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
517 gsym(fc);
518 orex(0,r,0,0);
519 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
520 } else if (v != r) {
521 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
522 if (v == TREG_ST0) {
523 /* gen_cvt_ftof(VT_DOUBLE); */
524 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
525 /* movsd -0x10(%rsp),%xmmN */
526 o(0x100ff2);
527 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
528 o(0xf024);
529 } else {
530 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
531 if ((ft & VT_BTYPE) == VT_FLOAT) {
532 o(0x100ff3);
533 } else {
534 assert((ft & VT_BTYPE) == VT_DOUBLE);
535 o(0x100ff2);
537 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
539 } else if (r == TREG_ST0) {
540 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
541 /* gen_cvt_ftof(VT_LDOUBLE); */
542 /* movsd %xmmN,-0x10(%rsp) */
543 o(0x110ff2);
544 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
545 o(0xf024);
546 o(0xf02444dd); /* fldl -0x10(%rsp) */
547 } else {
548 orex(is64_type(ft), r, v, 0x89);
549 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
555 /* store register 'r' in lvalue 'v' */
556 void store(int r, SValue *v)
558 int fr, bt, ft, fc;
559 int op64 = 0;
560 /* store the REX prefix in this variable when PIC is enabled */
561 int pic = 0;
563 #ifdef TCC_TARGET_PE
564 SValue v2;
565 v = pe_getimport(v, &v2);
566 #endif
568 fr = v->r & VT_VALMASK;
569 ft = v->type.t;
570 fc = v->c.i;
571 if (fc != v->c.i && (fr & VT_SYM))
572 tcc_error("64 bit addend in store");
573 ft &= ~(VT_VOLATILE | VT_CONSTANT);
574 bt = ft & VT_BTYPE;
576 #ifndef TCC_TARGET_PE
577 /* we need to access the variable via got */
578 if (fr == VT_CONST
579 && (v->r & VT_SYM)
580 && !(v->sym->type.t & VT_STATIC)) {
581 /* mov xx(%rip), %r11 */
582 o(0x1d8b4c);
583 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
584 pic = is64_type(bt) ? 0x49 : 0x41;
586 #endif
588 /* XXX: incorrect if float reg to reg */
589 if (bt == VT_FLOAT) {
590 o(0x66);
591 o(pic);
592 o(0x7e0f); /* movd */
593 r = REG_VALUE(r);
594 } else if (bt == VT_DOUBLE) {
595 o(0x66);
596 o(pic);
597 o(0xd60f); /* movq */
598 r = REG_VALUE(r);
599 } else if (bt == VT_LDOUBLE) {
600 o(0xc0d9); /* fld %st(0) */
601 o(pic);
602 o(0xdb); /* fstpt */
603 r = 7;
604 } else {
605 if (bt == VT_SHORT)
606 o(0x66);
607 o(pic);
608 if (bt == VT_BYTE || bt == VT_BOOL)
609 orex(0, 0, r, 0x88);
610 else if (is64_type(bt))
611 op64 = 0x89;
612 else
613 orex(0, 0, r, 0x89);
615 if (pic) {
616 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
617 if (op64)
618 o(op64);
619 o(3 + (r << 3));
620 } else if (op64) {
621 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
622 gen_modrm64(op64, r, v->r, v->sym, fc);
623 } else if (fr != r) {
624 orex(1, fr, r, op64);
625 o(0xc0 + fr + r * 8); /* mov r, fr */
627 } else {
628 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
629 gen_modrm(r, v->r, v->sym, fc);
630 } else if (fr != r) {
631 o(0xc0 + fr + r * 8); /* mov r, fr */
636 /* 'is_jmp' is '1' if it is a jump */
637 static void gcall_or_jmp(int is_jmp)
639 int r;
640 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
641 ((vtop->r & VT_SYM) && (vtop->c.i-4) == (int)(vtop->c.i-4))) {
642 /* constant symbolic case -> simple relocation */
643 #ifdef TCC_TARGET_PE
644 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
645 #else
646 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
647 #endif
648 oad(0xe8 + is_jmp, 0); /* call/jmp im */
649 } else {
650 /* otherwise, indirect call */
651 r = TREG_R11;
652 load(r, vtop);
653 o(0x41); /* REX */
654 o(0xff); /* call/jmp *r */
655 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
659 #if defined(CONFIG_TCC_BCHECK)
661 static void gen_bounds_call(int v)
663 Sym *sym = external_helper_sym(v);
664 oad(0xe8, 0);
665 #ifdef TCC_TARGET_PE
666 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
667 #else
668 greloca(cur_text_section, sym, ind-4, R_X86_64_PLT32, -4);
669 #endif
672 #ifdef TCC_TARGET_PE
673 # define TREG_FASTCALL_1 TREG_RCX
674 #else
675 # define TREG_FASTCALL_1 TREG_RDI
676 #endif
678 static void gen_bounds_prolog(void)
680 /* leave some room for bound checking code */
681 func_bound_offset = lbounds_section->data_offset;
682 func_bound_ind = ind;
683 func_bound_add_epilog = 0;
684 o(0x0d8d48 + ((TREG_FASTCALL_1 == TREG_RDI) * 0x300000)); /*lbound section pointer */
685 gen_le32 (0);
686 oad(0xb8, 0); /* call to function */
689 static void gen_bounds_epilog(void)
691 addr_t saved_ind;
692 addr_t *bounds_ptr;
693 Sym *sym_data;
694 int offset_modified = func_bound_offset != lbounds_section->data_offset;
696 if (!offset_modified && !func_bound_add_epilog)
697 return;
699 /* add end of table info */
700 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
701 *bounds_ptr = 0;
703 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
704 func_bound_offset, lbounds_section->data_offset);
706 /* generate bound local allocation */
707 if (offset_modified) {
708 saved_ind = ind;
709 ind = func_bound_ind;
710 greloca(cur_text_section, sym_data, ind + 3, R_X86_64_PC32, -4);
711 ind = ind + 7;
712 gen_bounds_call(TOK___bound_local_new);
713 ind = saved_ind;
716 /* generate bound check local freeing */
717 o(0x5250); /* save returned value, if any */
718 greloca(cur_text_section, sym_data, ind + 3, R_X86_64_PC32, -4);
719 o(0x0d8d48 + ((TREG_FASTCALL_1 == TREG_RDI) * 0x300000)); /* lea xxx(%rip), %rcx/rdi */
720 gen_le32 (0);
721 gen_bounds_call(TOK___bound_local_delete);
722 o(0x585a); /* restore returned value, if any */
724 #endif
726 #ifdef TCC_TARGET_PE
728 #define REGN 4
729 static const uint8_t arg_regs[REGN] = {
730 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
733 /* Prepare arguments in R10 and R11 rather than RCX and RDX
734 because gv() will not ever use these */
735 static int arg_prepare_reg(int idx) {
736 if (idx == 0 || idx == 1)
737 /* idx=0: r10, idx=1: r11 */
738 return idx + 10;
739 else
740 return idx >= 0 && idx < REGN ? arg_regs[idx] : 0;
743 /* Generate function call. The function address is pushed first, then
744 all the parameters in call order. This functions pops all the
745 parameters and the function address. */
747 static void gen_offs_sp(int b, int r, int d)
749 orex(1,0,r & 0x100 ? 0 : r, b);
750 if (d == (char)d) {
751 o(0x2444 | (REG_VALUE(r) << 3));
752 g(d);
753 } else {
754 o(0x2484 | (REG_VALUE(r) << 3));
755 gen_le32(d);
759 static int using_regs(int size)
761 return !(size > 8 || (size & (size - 1)));
764 /* Return the number of registers needed to return the struct, or 0 if
765 returning via struct pointer. */
766 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
768 int size, align;
769 *ret_align = 1; // Never have to re-align return values for x86-64
770 *regsize = 8;
771 size = type_size(vt, &align);
772 if (!using_regs(size))
773 return 0;
774 if (size == 8)
775 ret->t = VT_LLONG;
776 else if (size == 4)
777 ret->t = VT_INT;
778 else if (size == 2)
779 ret->t = VT_SHORT;
780 else
781 ret->t = VT_BYTE;
782 ret->ref = NULL;
783 return 1;
786 static int is_sse_float(int t) {
787 int bt;
788 bt = t & VT_BTYPE;
789 return bt == VT_DOUBLE || bt == VT_FLOAT;
792 static int gfunc_arg_size(CType *type) {
793 int align;
794 if (type->t & (VT_ARRAY|VT_BITFIELD))
795 return 8;
796 return type_size(type, &align);
799 void gfunc_call(int nb_args)
801 int size, r, args_size, i, d, bt, struct_size;
802 int arg;
804 #ifdef CONFIG_TCC_BCHECK
805 if (tcc_state->do_bounds_check)
806 gbound_args(nb_args);
807 #endif
809 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
810 arg = nb_args;
812 /* for struct arguments, we need to call memcpy and the function
813 call breaks register passing arguments we are preparing.
814 So, we process arguments which will be passed by stack first. */
815 struct_size = args_size;
816 for(i = 0; i < nb_args; i++) {
817 SValue *sv;
819 --arg;
820 sv = &vtop[-i];
821 bt = (sv->type.t & VT_BTYPE);
822 size = gfunc_arg_size(&sv->type);
824 if (using_regs(size))
825 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
827 if (bt == VT_STRUCT) {
828 /* align to stack align size */
829 size = (size + 15) & ~15;
830 /* generate structure store */
831 r = get_reg(RC_INT);
832 gen_offs_sp(0x8d, r, struct_size);
833 struct_size += size;
835 /* generate memcpy call */
836 vset(&sv->type, r | VT_LVAL, 0);
837 vpushv(sv);
838 vstore();
839 --vtop;
840 } else if (bt == VT_LDOUBLE) {
841 gv(RC_ST0);
842 gen_offs_sp(0xdb, 0x107, struct_size);
843 struct_size += 16;
847 if (func_scratch < struct_size)
848 func_scratch = struct_size;
850 arg = nb_args;
851 struct_size = args_size;
853 for(i = 0; i < nb_args; i++) {
854 --arg;
855 bt = (vtop->type.t & VT_BTYPE);
857 size = gfunc_arg_size(&vtop->type);
858 if (!using_regs(size)) {
859 /* align to stack align size */
860 size = (size + 15) & ~15;
861 if (arg >= REGN) {
862 d = get_reg(RC_INT);
863 gen_offs_sp(0x8d, d, struct_size);
864 gen_offs_sp(0x89, d, arg*8);
865 } else {
866 d = arg_prepare_reg(arg);
867 gen_offs_sp(0x8d, d, struct_size);
869 struct_size += size;
870 } else {
871 if (is_sse_float(vtop->type.t)) {
872 if (tcc_state->nosse)
873 tcc_error("SSE disabled");
874 if (arg >= REGN) {
875 gv(RC_XMM0);
876 /* movq %xmm0, j*8(%rsp) */
877 gen_offs_sp(0xd60f66, 0x100, arg*8);
878 } else {
879 /* Load directly to xmmN register */
880 gv(RC_XMM0 << arg);
881 d = arg_prepare_reg(arg);
882 /* mov %xmmN, %rxx */
883 o(0x66);
884 orex(1,d,0, 0x7e0f);
885 o(0xc0 + arg*8 + REG_VALUE(d));
887 } else {
888 if (bt == VT_STRUCT) {
889 vtop->type.ref = NULL;
890 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
891 : size > 1 ? VT_SHORT : VT_BYTE;
894 r = gv(RC_INT);
895 if (arg >= REGN) {
896 gen_offs_sp(0x89, r, arg*8);
897 } else {
898 d = arg_prepare_reg(arg);
899 orex(1,d,r,0x89); /* mov */
900 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
904 vtop--;
906 save_regs(0);
907 /* Copy R10 and R11 into RCX and RDX, respectively */
908 if (nb_args > 0) {
909 o(0xd1894c); /* mov %r10, %rcx */
910 if (nb_args > 1) {
911 o(0xda894c); /* mov %r11, %rdx */
915 gcall_or_jmp(0);
917 if ((vtop->r & VT_SYM) && vtop->sym->v == TOK_alloca) {
918 /* need to add the "func_scratch" area after alloca */
919 o(0x48); func_alloca = oad(0x05, func_alloca); /* add $NN, %rax */
920 #ifdef CONFIG_TCC_BCHECK
921 if (tcc_state->do_bounds_check)
922 gen_bounds_call(TOK___bound_alloca_nr); /* new region */
923 #endif
925 vtop--;
929 #define FUNC_PROLOG_SIZE 11
931 /* generate function prolog of type 't' */
932 void gfunc_prolog(Sym *func_sym)
934 CType *func_type = &func_sym->type;
935 int addr, reg_param_index, bt, size;
936 Sym *sym;
937 CType *type;
939 func_ret_sub = 0;
940 func_scratch = 32;
941 func_alloca = 0;
942 loc = 0;
944 addr = PTR_SIZE * 2;
945 ind += FUNC_PROLOG_SIZE;
946 func_sub_sp_offset = ind;
947 reg_param_index = 0;
949 sym = func_type->ref;
951 /* if the function returns a structure, then add an
952 implicit pointer parameter */
953 size = gfunc_arg_size(&func_vt);
954 if (!using_regs(size)) {
955 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
956 func_vc = addr;
957 reg_param_index++;
958 addr += 8;
961 /* define parameters */
962 while ((sym = sym->next) != NULL) {
963 type = &sym->type;
964 bt = type->t & VT_BTYPE;
965 size = gfunc_arg_size(type);
966 if (!using_regs(size)) {
967 if (reg_param_index < REGN) {
968 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
970 sym_push(sym->v & ~SYM_FIELD, type,
971 VT_LLOCAL | VT_LVAL, addr);
972 } else {
973 if (reg_param_index < REGN) {
974 /* save arguments passed by register */
975 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
976 if (tcc_state->nosse)
977 tcc_error("SSE disabled");
978 o(0xd60f66); /* movq */
979 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
980 } else {
981 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
984 sym_push(sym->v & ~SYM_FIELD, type,
985 VT_LOCAL | VT_LVAL, addr);
987 addr += 8;
988 reg_param_index++;
991 while (reg_param_index < REGN) {
992 if (func_var) {
993 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
994 addr += 8;
996 reg_param_index++;
998 #ifdef CONFIG_TCC_BCHECK
999 if (tcc_state->do_bounds_check)
1000 gen_bounds_prolog();
1001 #endif
1004 /* generate function epilog */
1005 void gfunc_epilog(void)
1007 int v, saved_ind;
1009 /* align local size to word & save local variables */
1010 func_scratch = (func_scratch + 15) & -16;
1011 loc = (loc & -16) - func_scratch;
1013 #ifdef CONFIG_TCC_BCHECK
1014 if (tcc_state->do_bounds_check)
1015 gen_bounds_epilog();
1016 #endif
1018 o(0xc9); /* leave */
1019 if (func_ret_sub == 0) {
1020 o(0xc3); /* ret */
1021 } else {
1022 o(0xc2); /* ret n */
1023 g(func_ret_sub);
1024 g(func_ret_sub >> 8);
1027 saved_ind = ind;
1028 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1029 v = -loc;
1031 if (v >= 4096) {
1032 Sym *sym = external_helper_sym(TOK___chkstk);
1033 oad(0xb8, v); /* mov stacksize, %eax */
1034 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1035 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1036 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1037 } else {
1038 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1039 o(0xec8148); /* sub rsp, stacksize */
1040 gen_le32(v);
1043 /* add the "func_scratch" area after each alloca seen */
1044 gsym_addr(func_alloca, -func_scratch);
1046 cur_text_section->data_offset = saved_ind;
1047 pe_add_unwind_data(ind, saved_ind, v);
1048 ind = cur_text_section->data_offset;
1051 #else
1053 static void gadd_sp(int val)
1055 if (val == (char)val) {
1056 o(0xc48348);
1057 g(val);
1058 } else {
1059 oad(0xc48148, val); /* add $xxx, %rsp */
1063 typedef enum X86_64_Mode {
1064 x86_64_mode_none,
1065 x86_64_mode_memory,
1066 x86_64_mode_integer,
1067 x86_64_mode_sse,
1068 x86_64_mode_x87
1069 } X86_64_Mode;
1071 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1073 if (a == b)
1074 return a;
1075 else if (a == x86_64_mode_none)
1076 return b;
1077 else if (b == x86_64_mode_none)
1078 return a;
1079 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1080 return x86_64_mode_memory;
1081 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1082 return x86_64_mode_integer;
1083 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1084 return x86_64_mode_memory;
1085 else
1086 return x86_64_mode_sse;
1089 static X86_64_Mode classify_x86_64_inner(CType *ty)
1091 X86_64_Mode mode;
1092 Sym *f;
1094 switch (ty->t & VT_BTYPE) {
1095 case VT_VOID: return x86_64_mode_none;
1097 case VT_INT:
1098 case VT_BYTE:
1099 case VT_SHORT:
1100 case VT_LLONG:
1101 case VT_BOOL:
1102 case VT_PTR:
1103 case VT_FUNC:
1104 return x86_64_mode_integer;
1106 case VT_FLOAT:
1107 case VT_DOUBLE: return x86_64_mode_sse;
1109 case VT_LDOUBLE: return x86_64_mode_x87;
1111 case VT_STRUCT:
1112 f = ty->ref;
1114 mode = x86_64_mode_none;
1115 for (f = f->next; f; f = f->next)
1116 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1118 return mode;
1120 assert(0);
1121 return 0;
1124 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1126 X86_64_Mode mode;
1127 int size, align, ret_t = 0;
1129 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1130 *psize = 8;
1131 *palign = 8;
1132 *reg_count = 1;
1133 ret_t = ty->t;
1134 mode = x86_64_mode_integer;
1135 } else {
1136 size = type_size(ty, &align);
1137 *psize = (size + 7) & ~7;
1138 *palign = (align + 7) & ~7;
1140 if (size > 16) {
1141 mode = x86_64_mode_memory;
1142 } else {
1143 mode = classify_x86_64_inner(ty);
1144 switch (mode) {
1145 case x86_64_mode_integer:
1146 if (size > 8) {
1147 *reg_count = 2;
1148 ret_t = VT_QLONG;
1149 } else {
1150 *reg_count = 1;
1151 if (size > 4)
1152 ret_t = VT_LLONG;
1153 else if (size > 2)
1154 ret_t = VT_INT;
1155 else if (size > 1)
1156 ret_t = VT_SHORT;
1157 else
1158 ret_t = VT_BYTE;
1159 if ((ty->t & VT_BTYPE) == VT_STRUCT || (ty->t & VT_UNSIGNED))
1160 ret_t |= VT_UNSIGNED;
1162 break;
1164 case x86_64_mode_x87:
1165 *reg_count = 1;
1166 ret_t = VT_LDOUBLE;
1167 break;
1169 case x86_64_mode_sse:
1170 if (size > 8) {
1171 *reg_count = 2;
1172 ret_t = VT_QFLOAT;
1173 } else {
1174 *reg_count = 1;
1175 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1177 break;
1178 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1183 if (ret) {
1184 ret->ref = NULL;
1185 ret->t = ret_t;
1188 return mode;
1191 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1193 /* This definition must be synced with stdarg.h */
1194 enum __va_arg_type {
1195 __va_gen_reg, __va_float_reg, __va_stack
1197 int size, align, reg_count;
1198 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1199 switch (mode) {
1200 default: return __va_stack;
1201 case x86_64_mode_integer: return __va_gen_reg;
1202 case x86_64_mode_sse: return __va_float_reg;
1206 /* Return the number of registers needed to return the struct, or 0 if
1207 returning via struct pointer. */
1208 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1210 int size, align, reg_count;
1211 *ret_align = 1; // Never have to re-align return values for x86-64
1212 *regsize = 8;
1213 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1216 #define REGN 6
1217 static const uint8_t arg_regs[REGN] = {
1218 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1221 static int arg_prepare_reg(int idx) {
1222 if (idx == 2 || idx == 3)
1223 /* idx=2: r10, idx=3: r11 */
1224 return idx + 8;
1225 else
1226 return idx >= 0 && idx < REGN ? arg_regs[idx] : 0;
1229 /* Generate function call. The function address is pushed first, then
1230 all the parameters in call order. This functions pops all the
1231 parameters and the function address. */
1232 void gfunc_call(int nb_args)
1234 X86_64_Mode mode;
1235 CType type;
1236 int size, align, r, args_size, stack_adjust, i, reg_count, k;
1237 int nb_reg_args = 0;
1238 int nb_sse_args = 0;
1239 int sse_reg, gen_reg;
1240 char *onstack = tcc_malloc((nb_args + 1) * sizeof (char));
1242 #ifdef CONFIG_TCC_BCHECK
1243 if (tcc_state->do_bounds_check)
1244 gbound_args(nb_args);
1245 #endif
1247 /* calculate the number of integer/float register arguments, remember
1248 arguments to be passed via stack (in onstack[]), and also remember
1249 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1250 to be done in a left-to-right pass over arguments. */
1251 stack_adjust = 0;
1252 for(i = nb_args - 1; i >= 0; i--) {
1253 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1254 if (size == 0) continue;
1255 if (mode == x86_64_mode_sse && nb_sse_args + reg_count <= 8) {
1256 nb_sse_args += reg_count;
1257 onstack[i] = 0;
1258 } else if (mode == x86_64_mode_integer && nb_reg_args + reg_count <= REGN) {
1259 nb_reg_args += reg_count;
1260 onstack[i] = 0;
1261 } else if (mode == x86_64_mode_none) {
1262 onstack[i] = 0;
1263 } else {
1264 if (align == 16 && (stack_adjust &= 15)) {
1265 onstack[i] = 2;
1266 stack_adjust = 0;
1267 } else
1268 onstack[i] = 1;
1269 stack_adjust += size;
1273 if (nb_sse_args && tcc_state->nosse)
1274 tcc_error("SSE disabled but floating point arguments passed");
1276 /* fetch cpu flag before generating any code */
1277 if ((vtop->r & VT_VALMASK) == VT_CMP)
1278 gv(RC_INT);
1280 /* for struct arguments, we need to call memcpy and the function
1281 call breaks register passing arguments we are preparing.
1282 So, we process arguments which will be passed by stack first. */
1283 gen_reg = nb_reg_args;
1284 sse_reg = nb_sse_args;
1285 args_size = 0;
1286 stack_adjust &= 15;
1287 for (i = k = 0; i < nb_args;) {
1288 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1289 if (size) {
1290 if (!onstack[i + k]) {
1291 ++i;
1292 continue;
1294 /* Possibly adjust stack to align SSE boundary. We're processing
1295 args from right to left while allocating happens left to right
1296 (stack grows down), so the adjustment needs to happen _after_
1297 an argument that requires it. */
1298 if (stack_adjust) {
1299 o(0x50); /* push %rax; aka sub $8,%rsp */
1300 args_size += 8;
1301 stack_adjust = 0;
1303 if (onstack[i + k] == 2)
1304 stack_adjust = 1;
1307 vrotb(i+1);
1309 switch (vtop->type.t & VT_BTYPE) {
1310 case VT_STRUCT:
1311 /* allocate the necessary size on stack */
1312 o(0x48);
1313 oad(0xec81, size); /* sub $xxx, %rsp */
1314 /* generate structure store */
1315 r = get_reg(RC_INT);
1316 orex(1, r, 0, 0x89); /* mov %rsp, r */
1317 o(0xe0 + REG_VALUE(r));
1318 vset(&vtop->type, r | VT_LVAL, 0);
1319 vswap();
1320 vstore();
1321 break;
1323 case VT_LDOUBLE:
1324 gv(RC_ST0);
1325 oad(0xec8148, size); /* sub $xxx, %rsp */
1326 o(0x7cdb); /* fstpt 0(%rsp) */
1327 g(0x24);
1328 g(0x00);
1329 break;
1331 case VT_FLOAT:
1332 case VT_DOUBLE:
1333 assert(mode == x86_64_mode_sse);
1334 r = gv(RC_FLOAT);
1335 o(0x50); /* push $rax */
1336 /* movq %xmmN, (%rsp) */
1337 o(0xd60f66);
1338 o(0x04 + REG_VALUE(r)*8);
1339 o(0x24);
1340 break;
1342 default:
1343 assert(mode == x86_64_mode_integer);
1344 /* simple type */
1345 /* XXX: implicit cast ? */
1346 r = gv(RC_INT);
1347 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1348 break;
1350 args_size += size;
1352 vpop();
1353 --nb_args;
1354 k++;
1357 tcc_free(onstack);
1359 /* XXX This should be superfluous. */
1360 save_regs(0); /* save used temporary registers */
1362 /* then, we prepare register passing arguments.
1363 Note that we cannot set RDX and RCX in this loop because gv()
1364 may break these temporary registers. Let's use R10 and R11
1365 instead of them */
1366 assert(gen_reg <= REGN);
1367 assert(sse_reg <= 8);
1368 for(i = 0; i < nb_args; i++) {
1369 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1370 if (size == 0) continue;
1371 /* Alter stack entry type so that gv() knows how to treat it */
1372 vtop->type = type;
1373 if (mode == x86_64_mode_sse) {
1374 if (reg_count == 2) {
1375 sse_reg -= 2;
1376 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1377 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1378 /* movaps %xmm1, %xmmN */
1379 o(0x280f);
1380 o(0xc1 + ((sse_reg+1) << 3));
1381 /* movaps %xmm0, %xmmN */
1382 o(0x280f);
1383 o(0xc0 + (sse_reg << 3));
1385 } else {
1386 assert(reg_count == 1);
1387 --sse_reg;
1388 /* Load directly to register */
1389 gv(RC_XMM0 << sse_reg);
1391 } else if (mode == x86_64_mode_integer) {
1392 /* simple type */
1393 /* XXX: implicit cast ? */
1394 int d;
1395 gen_reg -= reg_count;
1396 r = gv(RC_INT);
1397 d = arg_prepare_reg(gen_reg);
1398 orex(1,d,r,0x89); /* mov */
1399 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1400 if (reg_count == 2) {
1401 d = arg_prepare_reg(gen_reg+1);
1402 orex(1,d,vtop->r2,0x89); /* mov */
1403 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1406 vtop--;
1408 assert(gen_reg == 0);
1409 assert(sse_reg == 0);
1411 /* We shouldn't have many operands on the stack anymore, but the
1412 call address itself is still there, and it might be in %eax
1413 (or edx/ecx) currently, which the below writes would clobber.
1414 So evict all remaining operands here. */
1415 save_regs(0);
1417 /* Copy R10 and R11 into RDX and RCX, respectively */
1418 if (nb_reg_args > 2) {
1419 o(0xd2894c); /* mov %r10, %rdx */
1420 if (nb_reg_args > 3) {
1421 o(0xd9894c); /* mov %r11, %rcx */
1425 if (vtop->type.ref->f.func_type != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1426 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1427 gcall_or_jmp(0);
1428 if (args_size)
1429 gadd_sp(args_size);
1430 vtop--;
1433 #define FUNC_PROLOG_SIZE 11
1435 static void push_arg_reg(int i) {
1436 loc -= 8;
1437 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1440 /* generate function prolog of type 't' */
1441 void gfunc_prolog(Sym *func_sym)
1443 CType *func_type = &func_sym->type;
1444 X86_64_Mode mode, ret_mode;
1445 int i, addr, align, size, reg_count;
1446 int param_addr = 0, reg_param_index, sse_param_index;
1447 Sym *sym;
1448 CType *type;
1450 sym = func_type->ref;
1451 addr = PTR_SIZE * 2;
1452 loc = 0;
1453 ind += FUNC_PROLOG_SIZE;
1454 func_sub_sp_offset = ind;
1455 func_ret_sub = 0;
1456 ret_mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1458 if (func_var) {
1459 int seen_reg_num, seen_sse_num, seen_stack_size;
1460 seen_reg_num = ret_mode == x86_64_mode_memory;
1461 seen_sse_num = 0;
1462 /* frame pointer and return address */
1463 seen_stack_size = PTR_SIZE * 2;
1464 /* count the number of seen parameters */
1465 sym = func_type->ref;
1466 while ((sym = sym->next) != NULL) {
1467 type = &sym->type;
1468 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1469 switch (mode) {
1470 default:
1471 stack_arg:
1472 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1473 break;
1475 case x86_64_mode_integer:
1476 if (seen_reg_num + reg_count > REGN)
1477 goto stack_arg;
1478 seen_reg_num += reg_count;
1479 break;
1481 case x86_64_mode_sse:
1482 if (seen_sse_num + reg_count > 8)
1483 goto stack_arg;
1484 seen_sse_num += reg_count;
1485 break;
1489 loc -= 24;
1490 /* movl $0x????????, -0x18(%rbp) */
1491 o(0xe845c7);
1492 gen_le32(seen_reg_num * 8);
1493 /* movl $0x????????, -0x14(%rbp) */
1494 o(0xec45c7);
1495 gen_le32(seen_sse_num * 16 + 48);
1496 /* leaq $0x????????, %r11 */
1497 o(0x9d8d4c);
1498 gen_le32(seen_stack_size);
1499 /* movq %r11, -0x10(%rbp) */
1500 o(0xf05d894c);
1501 /* leaq $-192(%rbp), %r11 */
1502 o(0x9d8d4c);
1503 gen_le32(-176 - 24);
1504 /* movq %r11, -0x8(%rbp) */
1505 o(0xf85d894c);
1507 /* save all register passing arguments */
1508 for (i = 0; i < 8; i++) {
1509 loc -= 16;
1510 if (!tcc_state->nosse) {
1511 o(0xd60f66); /* movq */
1512 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1514 /* movq $0, loc+8(%rbp) */
1515 o(0x85c748);
1516 gen_le32(loc + 8);
1517 gen_le32(0);
1519 for (i = 0; i < REGN; i++) {
1520 push_arg_reg(REGN-1-i);
1524 sym = func_type->ref;
1525 reg_param_index = 0;
1526 sse_param_index = 0;
1528 /* if the function returns a structure, then add an
1529 implicit pointer parameter */
1530 if (ret_mode == x86_64_mode_memory) {
1531 push_arg_reg(reg_param_index);
1532 func_vc = loc;
1533 reg_param_index++;
1535 /* define parameters */
1536 while ((sym = sym->next) != NULL) {
1537 type = &sym->type;
1538 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1539 switch (mode) {
1540 case x86_64_mode_sse:
1541 if (tcc_state->nosse)
1542 tcc_error("SSE disabled but floating point arguments used");
1543 if (sse_param_index + reg_count <= 8) {
1544 /* save arguments passed by register */
1545 loc -= reg_count * 8;
1546 param_addr = loc;
1547 for (i = 0; i < reg_count; ++i) {
1548 o(0xd60f66); /* movq */
1549 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1550 ++sse_param_index;
1552 } else {
1553 addr = (addr + align - 1) & -align;
1554 param_addr = addr;
1555 addr += size;
1557 break;
1559 case x86_64_mode_memory:
1560 case x86_64_mode_x87:
1561 addr = (addr + align - 1) & -align;
1562 param_addr = addr;
1563 addr += size;
1564 break;
1566 case x86_64_mode_integer: {
1567 if (reg_param_index + reg_count <= REGN) {
1568 /* save arguments passed by register */
1569 loc -= reg_count * 8;
1570 param_addr = loc;
1571 for (i = 0; i < reg_count; ++i) {
1572 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1573 ++reg_param_index;
1575 } else {
1576 addr = (addr + align - 1) & -align;
1577 param_addr = addr;
1578 addr += size;
1580 break;
1582 default: break; /* nothing to be done for x86_64_mode_none */
1584 sym_push(sym->v & ~SYM_FIELD, type,
1585 VT_LOCAL | VT_LVAL, param_addr);
1588 #ifdef CONFIG_TCC_BCHECK
1589 if (tcc_state->do_bounds_check)
1590 gen_bounds_prolog();
1591 #endif
1594 /* generate function epilog */
1595 void gfunc_epilog(void)
1597 int v, saved_ind;
1599 #ifdef CONFIG_TCC_BCHECK
1600 if (tcc_state->do_bounds_check)
1601 gen_bounds_epilog();
1602 #endif
1603 o(0xc9); /* leave */
1604 if (func_ret_sub == 0) {
1605 o(0xc3); /* ret */
1606 } else {
1607 o(0xc2); /* ret n */
1608 g(func_ret_sub);
1609 g(func_ret_sub >> 8);
1611 /* align local size to word & save local variables */
1612 v = (-loc + 15) & -16;
1613 saved_ind = ind;
1614 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1615 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1616 o(0xec8148); /* sub rsp, stacksize */
1617 gen_le32(v);
1618 ind = saved_ind;
1621 #endif /* not PE */
1623 ST_FUNC void gen_fill_nops(int bytes)
1625 while (bytes--)
1626 g(0x90);
1629 /* generate a jump to a label */
1630 int gjmp(int t)
1632 return gjmp2(0xe9, t);
1635 /* generate a jump to a fixed address */
1636 void gjmp_addr(int a)
1638 int r;
1639 r = a - ind - 2;
1640 if (r == (char)r) {
1641 g(0xeb);
1642 g(r);
1643 } else {
1644 oad(0xe9, a - ind - 5);
1648 ST_FUNC int gjmp_append(int n, int t)
1650 void *p;
1651 /* insert vtop->c jump list in t */
1652 if (n) {
1653 uint32_t n1 = n, n2;
1654 while ((n2 = read32le(p = cur_text_section->data + n1)))
1655 n1 = n2;
1656 write32le(p, t);
1657 t = n;
1659 return t;
1662 ST_FUNC int gjmp_cond(int op, int t)
1664 if (op & 0x100)
1666 /* This was a float compare. If the parity flag is set
1667 the result was unordered. For anything except != this
1668 means false and we don't jump (anding both conditions).
1669 For != this means true (oring both).
1670 Take care about inverting the test. We need to jump
1671 to our target if the result was unordered and test wasn't NE,
1672 otherwise if unordered we don't want to jump. */
1673 int v = vtop->cmp_r;
1674 op &= ~0x100;
1675 if (op ^ v ^ (v != TOK_NE))
1676 o(0x067a); /* jp +6 */
1677 else
1679 g(0x0f);
1680 t = gjmp2(0x8a, t); /* jp t */
1683 g(0x0f);
1684 t = gjmp2(op - 16, t);
1685 return t;
1688 /* generate an integer binary operation */
1689 void gen_opi(int op)
1691 int r, fr, opc, c;
1692 int ll, uu, cc;
1694 ll = is64_type(vtop[-1].type.t);
1695 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1696 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1698 switch(op) {
1699 case '+':
1700 case TOK_ADDC1: /* add with carry generation */
1701 opc = 0;
1702 gen_op8:
1703 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1704 /* constant case */
1705 vswap();
1706 r = gv(RC_INT);
1707 vswap();
1708 c = vtop->c.i;
1709 if (c == (char)c) {
1710 /* XXX: generate inc and dec for smaller code ? */
1711 orex(ll, r, 0, 0x83);
1712 o(0xc0 | (opc << 3) | REG_VALUE(r));
1713 g(c);
1714 } else {
1715 orex(ll, r, 0, 0x81);
1716 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1718 } else {
1719 gv2(RC_INT, RC_INT);
1720 r = vtop[-1].r;
1721 fr = vtop[0].r;
1722 orex(ll, r, fr, (opc << 3) | 0x01);
1723 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1725 vtop--;
1726 if (op >= TOK_ULT && op <= TOK_GT)
1727 vset_VT_CMP(op);
1728 break;
1729 case '-':
1730 case TOK_SUBC1: /* sub with carry generation */
1731 opc = 5;
1732 goto gen_op8;
1733 case TOK_ADDC2: /* add with carry use */
1734 opc = 2;
1735 goto gen_op8;
1736 case TOK_SUBC2: /* sub with carry use */
1737 opc = 3;
1738 goto gen_op8;
1739 case '&':
1740 opc = 4;
1741 goto gen_op8;
1742 case '^':
1743 opc = 6;
1744 goto gen_op8;
1745 case '|':
1746 opc = 1;
1747 goto gen_op8;
1748 case '*':
1749 gv2(RC_INT, RC_INT);
1750 r = vtop[-1].r;
1751 fr = vtop[0].r;
1752 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1753 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1754 vtop--;
1755 break;
1756 case TOK_SHL:
1757 opc = 4;
1758 goto gen_shift;
1759 case TOK_SHR:
1760 opc = 5;
1761 goto gen_shift;
1762 case TOK_SAR:
1763 opc = 7;
1764 gen_shift:
1765 opc = 0xc0 | (opc << 3);
1766 if (cc) {
1767 /* constant case */
1768 vswap();
1769 r = gv(RC_INT);
1770 vswap();
1771 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1772 o(opc | REG_VALUE(r));
1773 g(vtop->c.i & (ll ? 63 : 31));
1774 } else {
1775 /* we generate the shift in ecx */
1776 gv2(RC_INT, RC_RCX);
1777 r = vtop[-1].r;
1778 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1779 o(opc | REG_VALUE(r));
1781 vtop--;
1782 break;
1783 case TOK_UDIV:
1784 case TOK_UMOD:
1785 uu = 1;
1786 goto divmod;
1787 case '/':
1788 case '%':
1789 case TOK_PDIV:
1790 uu = 0;
1791 divmod:
1792 /* first operand must be in eax */
1793 /* XXX: need better constraint for second operand */
1794 gv2(RC_RAX, RC_RCX);
1795 r = vtop[-1].r;
1796 fr = vtop[0].r;
1797 vtop--;
1798 save_reg(TREG_RDX);
1799 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1800 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1801 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1802 if (op == '%' || op == TOK_UMOD)
1803 r = TREG_RDX;
1804 else
1805 r = TREG_RAX;
1806 vtop->r = r;
1807 break;
1808 default:
1809 opc = 7;
1810 goto gen_op8;
1814 void gen_opl(int op)
1816 gen_opi(op);
1819 void vpush_const(int t, int v)
1821 CType ctype = { t | VT_CONSTANT, 0 };
1822 vpushsym(&ctype, external_global_sym(v, &ctype));
1823 vtop->r |= VT_LVAL;
1826 /* generate a floating point operation 'v = t1 op t2' instruction. The
1827 two operands are guaranteed to have the same floating point type */
1828 /* XXX: need to use ST1 too */
1829 void gen_opf(int op)
1831 int a, ft, fc, swapped, r;
1832 int bt = vtop->type.t & VT_BTYPE;
1833 int float_type = bt == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1835 if (op == TOK_NEG) { /* unary minus */
1836 gv(float_type);
1837 if (float_type == RC_ST0) {
1838 o(0xe0d9); /* fchs */
1839 } else {
1840 /* -0.0, in libtcc1.c */
1841 vpush_const(bt, bt == VT_FLOAT ? TOK___mzerosf : TOK___mzerodf);
1842 gv(RC_FLOAT);
1843 if (bt == VT_DOUBLE)
1844 o(0x66);
1845 /* xorp[sd] %xmm1, %xmm0 */
1846 o(0xc0570f | (REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8) << 16);
1847 vtop--;
1849 return;
1852 /* convert constants to memory references */
1853 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1854 vswap();
1855 gv(float_type);
1856 vswap();
1858 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1859 gv(float_type);
1861 /* must put at least one value in the floating point register */
1862 if ((vtop[-1].r & VT_LVAL) &&
1863 (vtop[0].r & VT_LVAL)) {
1864 vswap();
1865 gv(float_type);
1866 vswap();
1868 swapped = 0;
1869 /* swap the stack if needed so that t1 is the register and t2 is
1870 the memory reference */
1871 if (vtop[-1].r & VT_LVAL) {
1872 vswap();
1873 swapped = 1;
1875 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1876 if (op >= TOK_ULT && op <= TOK_GT) {
1877 /* load on stack second operand */
1878 load(TREG_ST0, vtop);
1879 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1880 if (op == TOK_GE || op == TOK_GT)
1881 swapped = !swapped;
1882 else if (op == TOK_EQ || op == TOK_NE)
1883 swapped = 0;
1884 if (swapped)
1885 o(0xc9d9); /* fxch %st(1) */
1886 if (op == TOK_EQ || op == TOK_NE)
1887 o(0xe9da); /* fucompp */
1888 else
1889 o(0xd9de); /* fcompp */
1890 o(0xe0df); /* fnstsw %ax */
1891 if (op == TOK_EQ) {
1892 o(0x45e480); /* and $0x45, %ah */
1893 o(0x40fC80); /* cmp $0x40, %ah */
1894 } else if (op == TOK_NE) {
1895 o(0x45e480); /* and $0x45, %ah */
1896 o(0x40f480); /* xor $0x40, %ah */
1897 op = TOK_NE;
1898 } else if (op == TOK_GE || op == TOK_LE) {
1899 o(0x05c4f6); /* test $0x05, %ah */
1900 op = TOK_EQ;
1901 } else {
1902 o(0x45c4f6); /* test $0x45, %ah */
1903 op = TOK_EQ;
1905 vtop--;
1906 vset_VT_CMP(op);
1907 } else {
1908 /* no memory reference possible for long double operations */
1909 load(TREG_ST0, vtop);
1910 swapped = !swapped;
1912 switch(op) {
1913 default:
1914 case '+':
1915 a = 0;
1916 break;
1917 case '-':
1918 a = 4;
1919 if (swapped)
1920 a++;
1921 break;
1922 case '*':
1923 a = 1;
1924 break;
1925 case '/':
1926 a = 6;
1927 if (swapped)
1928 a++;
1929 break;
1931 ft = vtop->type.t;
1932 fc = vtop->c.i;
1933 o(0xde); /* fxxxp %st, %st(1) */
1934 o(0xc1 + (a << 3));
1935 vtop--;
1937 } else {
1938 if (op >= TOK_ULT && op <= TOK_GT) {
1939 /* if saved lvalue, then we must reload it */
1940 r = vtop->r;
1941 fc = vtop->c.i;
1942 if ((r & VT_VALMASK) == VT_LLOCAL) {
1943 SValue v1;
1944 r = get_reg(RC_INT);
1945 v1.type.t = VT_PTR;
1946 v1.r = VT_LOCAL | VT_LVAL;
1947 v1.c.i = fc;
1948 load(r, &v1);
1949 fc = 0;
1950 vtop->r = r = r | VT_LVAL;
1953 if (op == TOK_EQ || op == TOK_NE) {
1954 swapped = 0;
1955 } else {
1956 if (op == TOK_LE || op == TOK_LT)
1957 swapped = !swapped;
1958 if (op == TOK_LE || op == TOK_GE) {
1959 op = 0x93; /* setae */
1960 } else {
1961 op = 0x97; /* seta */
1965 if (swapped) {
1966 gv(RC_FLOAT);
1967 vswap();
1969 assert(!(vtop[-1].r & VT_LVAL));
1971 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1972 o(0x66);
1973 if (op == TOK_EQ || op == TOK_NE)
1974 o(0x2e0f); /* ucomisd */
1975 else
1976 o(0x2f0f); /* comisd */
1978 if (vtop->r & VT_LVAL) {
1979 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1980 } else {
1981 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1984 vtop--;
1985 vset_VT_CMP(op | 0x100);
1986 vtop->cmp_r = op;
1987 } else {
1988 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1989 switch(op) {
1990 default:
1991 case '+':
1992 a = 0;
1993 break;
1994 case '-':
1995 a = 4;
1996 break;
1997 case '*':
1998 a = 1;
1999 break;
2000 case '/':
2001 a = 6;
2002 break;
2004 ft = vtop->type.t;
2005 fc = vtop->c.i;
2006 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2008 r = vtop->r;
2009 /* if saved lvalue, then we must reload it */
2010 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2011 SValue v1;
2012 r = get_reg(RC_INT);
2013 v1.type.t = VT_PTR;
2014 v1.r = VT_LOCAL | VT_LVAL;
2015 v1.c.i = fc;
2016 load(r, &v1);
2017 fc = 0;
2018 vtop->r = r = r | VT_LVAL;
2021 assert(!(vtop[-1].r & VT_LVAL));
2022 if (swapped) {
2023 assert(vtop->r & VT_LVAL);
2024 gv(RC_FLOAT);
2025 vswap();
2028 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2029 o(0xf2);
2030 } else {
2031 o(0xf3);
2033 o(0x0f);
2034 o(0x58 + a);
2036 if (vtop->r & VT_LVAL) {
2037 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2038 } else {
2039 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2042 vtop--;
2047 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2048 and 'long long' cases. */
2049 void gen_cvt_itof(int t)
2051 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2052 save_reg(TREG_ST0);
2053 gv(RC_INT);
2054 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2055 /* signed long long to float/double/long double (unsigned case
2056 is handled generically) */
2057 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2058 o(0x242cdf); /* fildll (%rsp) */
2059 o(0x08c48348); /* add $8, %rsp */
2060 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2061 (VT_INT | VT_UNSIGNED)) {
2062 /* unsigned int to float/double/long double */
2063 o(0x6a); /* push $0 */
2064 g(0x00);
2065 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2066 o(0x242cdf); /* fildll (%rsp) */
2067 o(0x10c48348); /* add $16, %rsp */
2068 } else {
2069 /* int to float/double/long double */
2070 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2071 o(0x2404db); /* fildl (%rsp) */
2072 o(0x08c48348); /* add $8, %rsp */
2074 vtop->r = TREG_ST0;
2075 } else {
2076 int r = get_reg(RC_FLOAT);
2077 gv(RC_INT);
2078 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2079 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2080 (VT_INT | VT_UNSIGNED) ||
2081 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2082 o(0x48); /* REX */
2084 o(0x2a0f);
2085 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2086 vtop->r = r;
2090 /* convert from one floating point type to another */
2091 void gen_cvt_ftof(int t)
2093 int ft, bt, tbt;
2095 ft = vtop->type.t;
2096 bt = ft & VT_BTYPE;
2097 tbt = t & VT_BTYPE;
2099 if (bt == VT_FLOAT) {
2100 gv(RC_FLOAT);
2101 if (tbt == VT_DOUBLE) {
2102 o(0x140f); /* unpcklps */
2103 o(0xc0 + REG_VALUE(vtop->r)*9);
2104 o(0x5a0f); /* cvtps2pd */
2105 o(0xc0 + REG_VALUE(vtop->r)*9);
2106 } else if (tbt == VT_LDOUBLE) {
2107 save_reg(RC_ST0);
2108 /* movss %xmm0,-0x10(%rsp) */
2109 o(0x110ff3);
2110 o(0x44 + REG_VALUE(vtop->r)*8);
2111 o(0xf024);
2112 o(0xf02444d9); /* flds -0x10(%rsp) */
2113 vtop->r = TREG_ST0;
2115 } else if (bt == VT_DOUBLE) {
2116 gv(RC_FLOAT);
2117 if (tbt == VT_FLOAT) {
2118 o(0x140f66); /* unpcklpd */
2119 o(0xc0 + REG_VALUE(vtop->r)*9);
2120 o(0x5a0f66); /* cvtpd2ps */
2121 o(0xc0 + REG_VALUE(vtop->r)*9);
2122 } else if (tbt == VT_LDOUBLE) {
2123 save_reg(RC_ST0);
2124 /* movsd %xmm0,-0x10(%rsp) */
2125 o(0x110ff2);
2126 o(0x44 + REG_VALUE(vtop->r)*8);
2127 o(0xf024);
2128 o(0xf02444dd); /* fldl -0x10(%rsp) */
2129 vtop->r = TREG_ST0;
2131 } else {
2132 int r;
2133 gv(RC_ST0);
2134 r = get_reg(RC_FLOAT);
2135 if (tbt == VT_DOUBLE) {
2136 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2137 /* movsd -0x10(%rsp),%xmm0 */
2138 o(0x100ff2);
2139 o(0x44 + REG_VALUE(r)*8);
2140 o(0xf024);
2141 vtop->r = r;
2142 } else if (tbt == VT_FLOAT) {
2143 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2144 /* movss -0x10(%rsp),%xmm0 */
2145 o(0x100ff3);
2146 o(0x44 + REG_VALUE(r)*8);
2147 o(0xf024);
2148 vtop->r = r;
2153 /* convert fp to int 't' type */
2154 void gen_cvt_ftoi(int t)
2156 int ft, bt, size, r;
2157 ft = vtop->type.t;
2158 bt = ft & VT_BTYPE;
2159 if (bt == VT_LDOUBLE) {
2160 gen_cvt_ftof(VT_DOUBLE);
2161 bt = VT_DOUBLE;
2164 gv(RC_FLOAT);
2165 if (t != VT_INT)
2166 size = 8;
2167 else
2168 size = 4;
2170 r = get_reg(RC_INT);
2171 if (bt == VT_FLOAT) {
2172 o(0xf3);
2173 } else if (bt == VT_DOUBLE) {
2174 o(0xf2);
2175 } else {
2176 assert(0);
2178 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2179 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2180 vtop->r = r;
2183 // Generate sign extension from 32 to 64 bits:
2184 ST_FUNC void gen_cvt_sxtw(void)
2186 int r = gv(RC_INT);
2187 /* x86_64 specific: movslq */
2188 o(0x6348);
2189 o(0xc0 + (REG_VALUE(r) << 3) + REG_VALUE(r));
2192 /* char/short to int conversion */
2193 ST_FUNC void gen_cvt_csti(int t)
2195 int r, sz, xl, ll;
2196 r = gv(RC_INT);
2197 sz = !(t & VT_UNSIGNED);
2198 xl = (t & VT_BTYPE) == VT_SHORT;
2199 ll = (vtop->type.t & VT_BTYPE) == VT_LLONG;
2200 orex(ll, r, 0, 0xc0b60f /* mov[sz] %a[xl], %eax */
2201 | (sz << 3 | xl) << 8
2202 | (REG_VALUE(r) << 3 | REG_VALUE(r)) << 16
2206 /* increment tcov counter */
2207 ST_FUNC void gen_increment_tcov (SValue *sv)
2209 o(0x058348); /* addq $1, xxx(%rip) */
2210 greloca(cur_text_section, sv->sym, ind, R_X86_64_PC32, -5);
2211 gen_le32(0);
2212 o(1);
2215 /* computed goto support */
2216 void ggoto(void)
2218 gcall_or_jmp(1);
2219 vtop--;
2222 /* Save the stack pointer onto the stack and return the location of its address */
2223 ST_FUNC void gen_vla_sp_save(int addr) {
2224 /* mov %rsp,addr(%rbp)*/
2225 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2228 /* Restore the SP from a location on the stack */
2229 ST_FUNC void gen_vla_sp_restore(int addr) {
2230 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2233 #ifdef TCC_TARGET_PE
2234 /* Save result of gen_vla_alloc onto the stack */
2235 ST_FUNC void gen_vla_result(int addr) {
2236 /* mov %rax,addr(%rbp)*/
2237 gen_modrm64(0x89, TREG_RAX, VT_LOCAL, NULL, addr);
2239 #endif
2241 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2242 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2243 int use_call = 0;
2245 #if defined(CONFIG_TCC_BCHECK)
2246 use_call = tcc_state->do_bounds_check;
2247 #endif
2248 #ifdef TCC_TARGET_PE /* alloca does more than just adjust %rsp on Windows */
2249 use_call = 1;
2250 #endif
2251 if (use_call)
2253 vpush_helper_func(TOK_alloca);
2254 vswap(); /* Move alloca ref past allocation size */
2255 gfunc_call(1);
2257 else {
2258 int r;
2259 r = gv(RC_INT); /* allocation size */
2260 /* sub r,%rsp */
2261 o(0x2b48);
2262 o(0xe0 | REG_VALUE(r));
2263 /* We align to 16 bytes rather than align */
2264 /* and ~15, %rsp */
2265 o(0xf0e48348);
2266 vpop();
2271 /* end of x86-64 code generator */
2272 /*************************************************************/
2273 #endif /* ! TARGET_DEFS_ONLY */
2274 /******************************************************/