x86-64: Fix psABI stdarg prologue
[tinycc.git] / x86_64-gen.c
blob19d5dd7dd35d7de3086f6bae87bdeeb96f88b6f8
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
35 #define RC_RAX 0x0004
36 #define RC_RCX 0x0008
37 #define RC_RDX 0x0010
38 #define RC_ST0 0x0080 /* only for long double */
39 #define RC_R8 0x0100
40 #define RC_R9 0x0200
41 #define RC_R10 0x0400
42 #define RC_R11 0x0800
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_LRET RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_QRET RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
57 enum {
58 TREG_RAX = 0,
59 TREG_RCX = 1,
60 TREG_RDX = 2,
61 TREG_RSP = 4,
62 TREG_RSI = 6,
63 TREG_RDI = 7,
65 TREG_R8 = 8,
66 TREG_R9 = 9,
67 TREG_R10 = 10,
68 TREG_R11 = 11,
70 TREG_XMM0 = 16,
71 TREG_XMM1 = 17,
72 TREG_XMM2 = 18,
73 TREG_XMM3 = 19,
74 TREG_XMM4 = 20,
75 TREG_XMM5 = 21,
76 TREG_XMM6 = 22,
77 TREG_XMM7 = 23,
79 TREG_ST0 = 24,
81 TREG_MEM = 0x20
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_LRET TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_QRET TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
97 #define PTR_SIZE 8
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
103 #define MAX_ALIGN 16
105 /******************************************************/
106 #else /* ! TARGET_DEFS_ONLY */
107 /******************************************************/
108 #include "tcc.h"
109 #include <assert.h>
111 ST_DATA const int reg_classes[NB_REGS] = {
112 /* eax */ RC_INT | RC_RAX,
113 /* ecx */ RC_INT | RC_RCX,
114 /* edx */ RC_INT | RC_RDX,
120 RC_R8,
121 RC_R9,
122 RC_R10,
123 RC_R11,
128 /* xmm0 */ RC_FLOAT | RC_XMM0,
129 /* xmm1 */ RC_FLOAT | RC_XMM1,
130 /* xmm2 */ RC_FLOAT | RC_XMM2,
131 /* xmm3 */ RC_FLOAT | RC_XMM3,
132 /* xmm4 */ RC_FLOAT | RC_XMM4,
133 /* xmm5 */ RC_FLOAT | RC_XMM5,
134 /* xmm6 an xmm7 are included so gv() can be used on them,
135 but they are not tagged with RC_FLOAT because they are
136 callee saved on Windows */
137 RC_XMM6,
138 RC_XMM7,
139 /* st0 */ RC_ST0
142 static unsigned long func_sub_sp_offset;
143 static int func_ret_sub;
145 /* XXX: make it faster ? */
146 ST_FUNC void g(int c)
148 int ind1;
149 if (nocode_wanted)
150 return;
151 ind1 = ind + 1;
152 if (ind1 > cur_text_section->data_allocated)
153 section_realloc(cur_text_section, ind1);
154 cur_text_section->data[ind] = c;
155 ind = ind1;
158 ST_FUNC void o(unsigned int c)
160 while (c) {
161 g(c);
162 c = c >> 8;
166 ST_FUNC void gen_le16(int v)
168 g(v);
169 g(v >> 8);
172 ST_FUNC void gen_le32(int c)
174 g(c);
175 g(c >> 8);
176 g(c >> 16);
177 g(c >> 24);
180 ST_FUNC void gen_le64(int64_t c)
182 g(c);
183 g(c >> 8);
184 g(c >> 16);
185 g(c >> 24);
186 g(c >> 32);
187 g(c >> 40);
188 g(c >> 48);
189 g(c >> 56);
192 static void orex(int ll, int r, int r2, int b)
194 if ((r & VT_VALMASK) >= VT_CONST)
195 r = 0;
196 if ((r2 & VT_VALMASK) >= VT_CONST)
197 r2 = 0;
198 if (ll || REX_BASE(r) || REX_BASE(r2))
199 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
200 o(b);
203 /* output a symbol and patch all calls to it */
204 ST_FUNC void gsym_addr(int t, int a)
206 while (t) {
207 unsigned char *ptr = cur_text_section->data + t;
208 uint32_t n = read32le(ptr); /* next value */
209 write32le(ptr, a - t - 4);
210 t = n;
214 void gsym(int t)
216 gsym_addr(t, ind);
220 static int is64_type(int t)
222 return ((t & VT_BTYPE) == VT_PTR ||
223 (t & VT_BTYPE) == VT_FUNC ||
224 (t & VT_BTYPE) == VT_LLONG);
227 /* instruction + 4 bytes data. Return the address of the data */
228 static int oad(int c, int s)
230 int t;
231 if (nocode_wanted)
232 return s;
233 o(c);
234 t = ind;
235 gen_le32(s);
236 return t;
239 /* generate jmp to a label */
240 #define gjmp2(instr,lbl) oad(instr,lbl)
242 ST_FUNC void gen_addr32(int r, Sym *sym, long c)
244 if (r & VT_SYM)
245 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
246 gen_le32(c);
249 /* output constant with relocation if 'r & VT_SYM' is true */
250 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
252 if (r & VT_SYM)
253 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
254 gen_le64(c);
257 /* output constant with relocation if 'r & VT_SYM' is true */
258 ST_FUNC void gen_addrpc32(int r, Sym *sym, long c)
260 if (r & VT_SYM)
261 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
262 gen_le32(c-4);
265 /* output got address with relocation */
266 static void gen_gotpcrel(int r, Sym *sym, int c)
268 #ifdef TCC_TARGET_PE
269 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
270 get_tok_str(sym->v, NULL), c, r,
271 cur_text_section->data[ind-3],
272 cur_text_section->data[ind-2],
273 cur_text_section->data[ind-1]
275 #endif
276 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
277 gen_le32(0);
278 if (c) {
279 /* we use add c, %xxx for displacement */
280 orex(1, r, 0, 0x81);
281 o(0xc0 + REG_VALUE(r));
282 gen_le32(c);
286 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
288 op_reg = REG_VALUE(op_reg) << 3;
289 if ((r & VT_VALMASK) == VT_CONST) {
290 /* constant memory reference */
291 o(0x05 | op_reg);
292 if (is_got) {
293 gen_gotpcrel(r, sym, c);
294 } else {
295 gen_addrpc32(r, sym, c);
297 } else if ((r & VT_VALMASK) == VT_LOCAL) {
298 /* currently, we use only ebp as base */
299 if (c == (char)c) {
300 /* short reference */
301 o(0x45 | op_reg);
302 g(c);
303 } else {
304 oad(0x85 | op_reg, c);
306 } else if ((r & VT_VALMASK) >= TREG_MEM) {
307 if (c) {
308 g(0x80 | op_reg | REG_VALUE(r));
309 gen_le32(c);
310 } else {
311 g(0x00 | op_reg | REG_VALUE(r));
313 } else {
314 g(0x00 | op_reg | REG_VALUE(r));
318 /* generate a modrm reference. 'op_reg' contains the addtional 3
319 opcode bits */
320 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
322 gen_modrm_impl(op_reg, r, sym, c, 0);
325 /* generate a modrm reference. 'op_reg' contains the addtional 3
326 opcode bits */
327 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
329 int is_got;
330 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
331 orex(1, r, op_reg, opcode);
332 gen_modrm_impl(op_reg, r, sym, c, is_got);
336 /* load 'r' from value 'sv' */
337 void load(int r, SValue *sv)
339 int v, t, ft, fc, fr;
340 SValue v1;
342 #ifdef TCC_TARGET_PE
343 SValue v2;
344 sv = pe_getimport(sv, &v2);
345 #endif
347 fr = sv->r;
348 ft = sv->type.t & ~VT_DEFSIGN;
349 fc = sv->c.i;
350 if (fc != sv->c.i && (fr & VT_SYM))
351 tcc_error("64 bit addend in load");
353 ft &= ~(VT_VOLATILE | VT_CONSTANT);
355 #ifndef TCC_TARGET_PE
356 /* we use indirect access via got */
357 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
358 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
359 /* use the result register as a temporal register */
360 int tr = r | TREG_MEM;
361 if (is_float(ft)) {
362 /* we cannot use float registers as a temporal register */
363 tr = get_reg(RC_INT) | TREG_MEM;
365 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
367 /* load from the temporal register */
368 fr = tr | VT_LVAL;
370 #endif
372 v = fr & VT_VALMASK;
373 if (fr & VT_LVAL) {
374 int b, ll;
375 if (v == VT_LLOCAL) {
376 v1.type.t = VT_PTR;
377 v1.r = VT_LOCAL | VT_LVAL;
378 v1.c.i = fc;
379 fr = r;
380 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
381 fr = get_reg(RC_INT);
382 load(fr, &v1);
384 ll = 0;
385 /* Like GCC we can load from small enough properly sized
386 structs and unions as well.
387 XXX maybe move to generic operand handling, but should
388 occur only with asm, so tccasm.c might also be a better place */
389 if ((ft & VT_BTYPE) == VT_STRUCT) {
390 int align;
391 switch (type_size(&sv->type, &align)) {
392 case 1: ft = VT_BYTE; break;
393 case 2: ft = VT_SHORT; break;
394 case 4: ft = VT_INT; break;
395 case 8: ft = VT_LLONG; break;
396 default:
397 tcc_error("invalid aggregate type for register load");
398 break;
401 if ((ft & VT_BTYPE) == VT_FLOAT) {
402 b = 0x6e0f66;
403 r = REG_VALUE(r); /* movd */
404 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
405 b = 0x7e0ff3; /* movq */
406 r = REG_VALUE(r);
407 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
408 b = 0xdb, r = 5; /* fldt */
409 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
410 b = 0xbe0f; /* movsbl */
411 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
412 b = 0xb60f; /* movzbl */
413 } else if ((ft & VT_TYPE) == VT_SHORT) {
414 b = 0xbf0f; /* movswl */
415 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
416 b = 0xb70f; /* movzwl */
417 } else {
418 assert(((ft & VT_BTYPE) == VT_INT) || ((ft & VT_BTYPE) == VT_LLONG)
419 || ((ft & VT_BTYPE) == VT_PTR) || ((ft & VT_BTYPE) == VT_ENUM)
420 || ((ft & VT_BTYPE) == VT_FUNC));
421 ll = is64_type(ft);
422 b = 0x8b;
424 if (ll) {
425 gen_modrm64(b, r, fr, sv->sym, fc);
426 } else {
427 orex(ll, fr, r, b);
428 gen_modrm(r, fr, sv->sym, fc);
430 } else {
431 if (v == VT_CONST) {
432 if (fr & VT_SYM) {
433 #ifdef TCC_TARGET_PE
434 orex(1,0,r,0x8d);
435 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
436 gen_addrpc32(fr, sv->sym, fc);
437 #else
438 if (sv->sym->type.t & VT_STATIC) {
439 orex(1,0,r,0x8d);
440 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
441 gen_addrpc32(fr, sv->sym, fc);
442 } else {
443 orex(1,0,r,0x8b);
444 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
445 gen_gotpcrel(r, sv->sym, fc);
447 #endif
448 } else if (is64_type(ft)) {
449 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
450 gen_le64(sv->c.i);
451 } else {
452 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
453 gen_le32(fc);
455 } else if (v == VT_LOCAL) {
456 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
457 gen_modrm(r, VT_LOCAL, sv->sym, fc);
458 } else if (v == VT_CMP) {
459 orex(0,r,0,0);
460 if ((fc & ~0x100) != TOK_NE)
461 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
462 else
463 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
464 if (fc & 0x100)
466 /* This was a float compare. If the parity bit is
467 set the result was unordered, meaning false for everything
468 except TOK_NE, and true for TOK_NE. */
469 fc &= ~0x100;
470 o(0x037a + (REX_BASE(r) << 8));
472 orex(0,r,0, 0x0f); /* setxx %br */
473 o(fc);
474 o(0xc0 + REG_VALUE(r));
475 } else if (v == VT_JMP || v == VT_JMPI) {
476 t = v & 1;
477 orex(0,r,0,0);
478 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
479 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
480 gsym(fc);
481 orex(0,r,0,0);
482 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
483 } else if (v != r) {
484 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
485 if (v == TREG_ST0) {
486 /* gen_cvt_ftof(VT_DOUBLE); */
487 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
488 /* movsd -0x10(%rsp),%xmmN */
489 o(0x100ff2);
490 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
491 o(0xf024);
492 } else {
493 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
494 if ((ft & VT_BTYPE) == VT_FLOAT) {
495 o(0x100ff3);
496 } else {
497 assert((ft & VT_BTYPE) == VT_DOUBLE);
498 o(0x100ff2);
500 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
502 } else if (r == TREG_ST0) {
503 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
504 /* gen_cvt_ftof(VT_LDOUBLE); */
505 /* movsd %xmmN,-0x10(%rsp) */
506 o(0x110ff2);
507 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
508 o(0xf024);
509 o(0xf02444dd); /* fldl -0x10(%rsp) */
510 } else {
511 orex(1,r,v, 0x89);
512 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
518 /* store register 'r' in lvalue 'v' */
519 void store(int r, SValue *v)
521 int fr, bt, ft, fc;
522 int op64 = 0;
523 /* store the REX prefix in this variable when PIC is enabled */
524 int pic = 0;
526 #ifdef TCC_TARGET_PE
527 SValue v2;
528 v = pe_getimport(v, &v2);
529 #endif
531 fr = v->r & VT_VALMASK;
532 ft = v->type.t;
533 fc = v->c.i;
534 if (fc != v->c.i && (fr & VT_SYM))
535 tcc_error("64 bit addend in store");
536 ft &= ~(VT_VOLATILE | VT_CONSTANT);
537 bt = ft & VT_BTYPE;
539 #ifndef TCC_TARGET_PE
540 /* we need to access the variable via got */
541 if (fr == VT_CONST && (v->r & VT_SYM)) {
542 /* mov xx(%rip), %r11 */
543 o(0x1d8b4c);
544 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
545 pic = is64_type(bt) ? 0x49 : 0x41;
547 #endif
549 /* XXX: incorrect if float reg to reg */
550 if (bt == VT_FLOAT) {
551 o(0x66);
552 o(pic);
553 o(0x7e0f); /* movd */
554 r = REG_VALUE(r);
555 } else if (bt == VT_DOUBLE) {
556 o(0x66);
557 o(pic);
558 o(0xd60f); /* movq */
559 r = REG_VALUE(r);
560 } else if (bt == VT_LDOUBLE) {
561 o(0xc0d9); /* fld %st(0) */
562 o(pic);
563 o(0xdb); /* fstpt */
564 r = 7;
565 } else {
566 if (bt == VT_SHORT)
567 o(0x66);
568 o(pic);
569 if (bt == VT_BYTE || bt == VT_BOOL)
570 orex(0, 0, r, 0x88);
571 else if (is64_type(bt))
572 op64 = 0x89;
573 else
574 orex(0, 0, r, 0x89);
576 if (pic) {
577 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
578 if (op64)
579 o(op64);
580 o(3 + (r << 3));
581 } else if (op64) {
582 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
583 gen_modrm64(op64, r, v->r, v->sym, fc);
584 } else if (fr != r) {
585 /* XXX: don't we really come here? */
586 abort();
587 o(0xc0 + fr + r * 8); /* mov r, fr */
589 } else {
590 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
591 gen_modrm(r, v->r, v->sym, fc);
592 } else if (fr != r) {
593 /* XXX: don't we really come here? */
594 abort();
595 o(0xc0 + fr + r * 8); /* mov r, fr */
600 /* 'is_jmp' is '1' if it is a jump */
601 static void gcall_or_jmp(int is_jmp)
603 int r;
604 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
605 ((vtop->r & VT_SYM) || (vtop->c.i-4) == (int)(vtop->c.i-4))) {
606 /* constant case */
607 if (vtop->r & VT_SYM) {
608 /* relocation case */
609 #ifdef TCC_TARGET_PE
610 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
611 #else
612 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
613 #endif
614 } else {
615 /* put an empty PC32 relocation */
616 put_elf_reloca(symtab_section, cur_text_section,
617 ind + 1, R_X86_64_PC32, 0, (int)(vtop->c.i-4));
619 oad(0xe8 + is_jmp, 0); /* call/jmp im */
620 } else {
621 /* otherwise, indirect call */
622 r = TREG_R11;
623 load(r, vtop);
624 o(0x41); /* REX */
625 o(0xff); /* call/jmp *r */
626 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
630 #if defined(CONFIG_TCC_BCHECK)
631 #ifndef TCC_TARGET_PE
632 static addr_t func_bound_offset;
633 static unsigned long func_bound_ind;
634 #endif
636 static void gen_static_call(int v)
638 Sym *sym = external_global_sym(v, &func_old_type, 0);
639 oad(0xe8, 0);
640 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
643 /* generate a bounded pointer addition */
644 ST_FUNC void gen_bounded_ptr_add(void)
646 /* save all temporary registers */
647 save_regs(0);
649 /* prepare fast x86_64 function call */
650 gv(RC_RAX);
651 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
652 vtop--;
654 gv(RC_RAX);
655 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
656 vtop--;
658 /* do a fast function call */
659 gen_static_call(TOK___bound_ptr_add);
661 /* returned pointer is in rax */
662 vtop++;
663 vtop->r = TREG_RAX | VT_BOUNDED;
666 /* relocation offset of the bounding function call point */
667 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
670 /* patch pointer addition in vtop so that pointer dereferencing is
671 also tested */
672 ST_FUNC void gen_bounded_ptr_deref(void)
674 addr_t func;
675 int size, align;
676 ElfW(Rela) *rel;
677 Sym *sym;
679 size = 0;
680 /* XXX: put that code in generic part of tcc */
681 if (!is_float(vtop->type.t)) {
682 if (vtop->r & VT_LVAL_BYTE)
683 size = 1;
684 else if (vtop->r & VT_LVAL_SHORT)
685 size = 2;
687 if (!size)
688 size = type_size(&vtop->type, &align);
689 switch(size) {
690 case 1: func = TOK___bound_ptr_indir1; break;
691 case 2: func = TOK___bound_ptr_indir2; break;
692 case 4: func = TOK___bound_ptr_indir4; break;
693 case 8: func = TOK___bound_ptr_indir8; break;
694 case 12: func = TOK___bound_ptr_indir12; break;
695 case 16: func = TOK___bound_ptr_indir16; break;
696 default:
697 tcc_error("unhandled size when dereferencing bounded pointer");
698 func = 0;
699 break;
702 sym = external_global_sym(func, &func_old_type, 0);
703 if (!sym->c)
704 put_extern_sym(sym, NULL, 0, 0);
706 /* patch relocation */
707 /* XXX: find a better solution ? */
709 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
710 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
712 #endif
714 #ifdef TCC_TARGET_PE
716 #define REGN 4
717 static const uint8_t arg_regs[REGN] = {
718 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
721 /* Prepare arguments in R10 and R11 rather than RCX and RDX
722 because gv() will not ever use these */
723 static int arg_prepare_reg(int idx) {
724 if (idx == 0 || idx == 1)
725 /* idx=0: r10, idx=1: r11 */
726 return idx + 10;
727 else
728 return arg_regs[idx];
731 static int func_scratch;
733 /* Generate function call. The function address is pushed first, then
734 all the parameters in call order. This functions pops all the
735 parameters and the function address. */
737 void gen_offs_sp(int b, int r, int d)
739 orex(1,0,r & 0x100 ? 0 : r, b);
740 if (d == (char)d) {
741 o(0x2444 | (REG_VALUE(r) << 3));
742 g(d);
743 } else {
744 o(0x2484 | (REG_VALUE(r) << 3));
745 gen_le32(d);
749 /* Return the number of registers needed to return the struct, or 0 if
750 returning via struct pointer. */
751 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
753 int size, align;
754 *ret_align = 1; // Never have to re-align return values for x86-64
755 *regsize = 8;
756 size = type_size(vt, &align);
757 if (size > 8 || (size & (size - 1)))
758 return 0;
759 if (size == 8)
760 ret->t = VT_LLONG;
761 else if (size == 4)
762 ret->t = VT_INT;
763 else if (size == 2)
764 ret->t = VT_SHORT;
765 else
766 ret->t = VT_BYTE;
767 ret->ref = NULL;
768 return 1;
771 static int is_sse_float(int t) {
772 int bt;
773 bt = t & VT_BTYPE;
774 return bt == VT_DOUBLE || bt == VT_FLOAT;
777 int gfunc_arg_size(CType *type) {
778 int align;
779 if (type->t & (VT_ARRAY|VT_BITFIELD))
780 return 8;
781 return type_size(type, &align);
784 void gfunc_call(int nb_args)
786 int size, r, args_size, i, d, bt, struct_size;
787 int arg;
789 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
790 arg = nb_args;
792 /* for struct arguments, we need to call memcpy and the function
793 call breaks register passing arguments we are preparing.
794 So, we process arguments which will be passed by stack first. */
795 struct_size = args_size;
796 for(i = 0; i < nb_args; i++) {
797 SValue *sv;
799 --arg;
800 sv = &vtop[-i];
801 bt = (sv->type.t & VT_BTYPE);
802 size = gfunc_arg_size(&sv->type);
804 if (size <= 8)
805 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
807 if (bt == VT_STRUCT) {
808 /* align to stack align size */
809 size = (size + 15) & ~15;
810 /* generate structure store */
811 r = get_reg(RC_INT);
812 gen_offs_sp(0x8d, r, struct_size);
813 struct_size += size;
815 /* generate memcpy call */
816 vset(&sv->type, r | VT_LVAL, 0);
817 vpushv(sv);
818 vstore();
819 --vtop;
820 } else if (bt == VT_LDOUBLE) {
821 gv(RC_ST0);
822 gen_offs_sp(0xdb, 0x107, struct_size);
823 struct_size += 16;
827 if (func_scratch < struct_size)
828 func_scratch = struct_size;
830 arg = nb_args;
831 struct_size = args_size;
833 for(i = 0; i < nb_args; i++) {
834 --arg;
835 bt = (vtop->type.t & VT_BTYPE);
837 size = gfunc_arg_size(&vtop->type);
838 if (size > 8) {
839 /* align to stack align size */
840 size = (size + 15) & ~15;
841 if (arg >= REGN) {
842 d = get_reg(RC_INT);
843 gen_offs_sp(0x8d, d, struct_size);
844 gen_offs_sp(0x89, d, arg*8);
845 } else {
846 d = arg_prepare_reg(arg);
847 gen_offs_sp(0x8d, d, struct_size);
849 struct_size += size;
850 } else {
851 if (is_sse_float(vtop->type.t)) {
852 if (tcc_state->nosse)
853 tcc_error("SSE disabled");
854 gv(RC_XMM0); /* only use one float register */
855 if (arg >= REGN) {
856 /* movq %xmm0, j*8(%rsp) */
857 gen_offs_sp(0xd60f66, 0x100, arg*8);
858 } else {
859 /* movaps %xmm0, %xmmN */
860 o(0x280f);
861 o(0xc0 + (arg << 3));
862 d = arg_prepare_reg(arg);
863 /* mov %xmm0, %rxx */
864 o(0x66);
865 orex(1,d,0, 0x7e0f);
866 o(0xc0 + REG_VALUE(d));
868 } else {
869 if (bt == VT_STRUCT) {
870 vtop->type.ref = NULL;
871 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
872 : size > 1 ? VT_SHORT : VT_BYTE;
875 r = gv(RC_INT);
876 if (arg >= REGN) {
877 gen_offs_sp(0x89, r, arg*8);
878 } else {
879 d = arg_prepare_reg(arg);
880 orex(1,d,r,0x89); /* mov */
881 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
885 vtop--;
887 save_regs(0);
889 /* Copy R10 and R11 into RCX and RDX, respectively */
890 if (nb_args > 0) {
891 o(0xd1894c); /* mov %r10, %rcx */
892 if (nb_args > 1) {
893 o(0xda894c); /* mov %r11, %rdx */
897 gcall_or_jmp(0);
898 /* other compilers don't clear the upper bits when returning char/short */
899 bt = vtop->type.ref->type.t & (VT_BTYPE | VT_UNSIGNED);
900 if (bt == (VT_BYTE | VT_UNSIGNED))
901 o(0xc0b60f); /* movzbl %al, %eax */
902 else if (bt == VT_BYTE)
903 o(0xc0be0f); /* movsbl %al, %eax */
904 else if (bt == VT_SHORT)
905 o(0x98); /* cwtl */
906 else if (bt == (VT_SHORT | VT_UNSIGNED))
907 o(0xc0b70f); /* movzbl %al, %eax */
908 #if 0 /* handled in gen_cast() */
909 else if (bt == VT_INT)
910 o(0x9848); /* cltq */
911 else if (bt == (VT_INT | VT_UNSIGNED))
912 o(0xc089); /* mov %eax,%eax */
913 #endif
914 vtop--;
918 #define FUNC_PROLOG_SIZE 11
920 /* generate function prolog of type 't' */
921 void gfunc_prolog(CType *func_type)
923 int addr, reg_param_index, bt, size;
924 Sym *sym;
925 CType *type;
927 func_ret_sub = 0;
928 func_scratch = 0;
929 loc = 0;
931 addr = PTR_SIZE * 2;
932 ind += FUNC_PROLOG_SIZE;
933 func_sub_sp_offset = ind;
934 reg_param_index = 0;
936 sym = func_type->ref;
938 /* if the function returns a structure, then add an
939 implicit pointer parameter */
940 func_vt = sym->type;
941 func_var = (sym->c == FUNC_ELLIPSIS);
942 size = gfunc_arg_size(&func_vt);
943 if (size > 8) {
944 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
945 func_vc = addr;
946 reg_param_index++;
947 addr += 8;
950 /* define parameters */
951 while ((sym = sym->next) != NULL) {
952 type = &sym->type;
953 bt = type->t & VT_BTYPE;
954 size = gfunc_arg_size(type);
955 if (size > 8) {
956 if (reg_param_index < REGN) {
957 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
959 sym_push(sym->v & ~SYM_FIELD, type, VT_LLOCAL | VT_LVAL, addr);
960 } else {
961 if (reg_param_index < REGN) {
962 /* save arguments passed by register */
963 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
964 if (tcc_state->nosse)
965 tcc_error("SSE disabled");
966 o(0xd60f66); /* movq */
967 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
968 } else {
969 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
972 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
974 addr += 8;
975 reg_param_index++;
978 while (reg_param_index < REGN) {
979 if (func_type->ref->c == FUNC_ELLIPSIS) {
980 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
981 addr += 8;
983 reg_param_index++;
987 /* generate function epilog */
988 void gfunc_epilog(void)
990 int v, saved_ind;
992 o(0xc9); /* leave */
993 if (func_ret_sub == 0) {
994 o(0xc3); /* ret */
995 } else {
996 o(0xc2); /* ret n */
997 g(func_ret_sub);
998 g(func_ret_sub >> 8);
1001 saved_ind = ind;
1002 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1003 /* align local size to word & save local variables */
1004 v = (func_scratch + -loc + 15) & -16;
1006 if (v >= 4096) {
1007 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
1008 oad(0xb8, v); /* mov stacksize, %eax */
1009 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1010 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1011 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1012 } else {
1013 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1014 o(0xec8148); /* sub rsp, stacksize */
1015 gen_le32(v);
1018 cur_text_section->data_offset = saved_ind;
1019 pe_add_unwind_data(ind, saved_ind, v);
1020 ind = cur_text_section->data_offset;
1023 #else
1025 static void gadd_sp(int val)
1027 if (val == (char)val) {
1028 o(0xc48348);
1029 g(val);
1030 } else {
1031 oad(0xc48148, val); /* add $xxx, %rsp */
1035 typedef enum X86_64_Mode {
1036 x86_64_mode_none,
1037 x86_64_mode_memory,
1038 x86_64_mode_integer,
1039 x86_64_mode_sse,
1040 x86_64_mode_x87
1041 } X86_64_Mode;
1043 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1045 if (a == b)
1046 return a;
1047 else if (a == x86_64_mode_none)
1048 return b;
1049 else if (b == x86_64_mode_none)
1050 return a;
1051 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1052 return x86_64_mode_memory;
1053 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1054 return x86_64_mode_integer;
1055 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1056 return x86_64_mode_memory;
1057 else
1058 return x86_64_mode_sse;
1061 static X86_64_Mode classify_x86_64_inner(CType *ty)
1063 X86_64_Mode mode;
1064 Sym *f;
1066 switch (ty->t & VT_BTYPE) {
1067 case VT_VOID: return x86_64_mode_none;
1069 case VT_INT:
1070 case VT_BYTE:
1071 case VT_SHORT:
1072 case VT_LLONG:
1073 case VT_BOOL:
1074 case VT_PTR:
1075 case VT_FUNC:
1076 case VT_ENUM: return x86_64_mode_integer;
1078 case VT_FLOAT:
1079 case VT_DOUBLE: return x86_64_mode_sse;
1081 case VT_LDOUBLE: return x86_64_mode_x87;
1083 case VT_STRUCT:
1084 f = ty->ref;
1086 mode = x86_64_mode_none;
1087 for (f = f->next; f; f = f->next)
1088 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1090 return mode;
1092 assert(0);
1093 return 0;
1096 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1098 X86_64_Mode mode;
1099 int size, align, ret_t = 0;
1101 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1102 *psize = 8;
1103 *palign = 8;
1104 *reg_count = 1;
1105 ret_t = ty->t;
1106 mode = x86_64_mode_integer;
1107 } else {
1108 size = type_size(ty, &align);
1109 *psize = (size + 7) & ~7;
1110 *palign = (align + 7) & ~7;
1112 if (size > 16) {
1113 mode = x86_64_mode_memory;
1114 } else {
1115 mode = classify_x86_64_inner(ty);
1116 switch (mode) {
1117 case x86_64_mode_integer:
1118 if (size > 8) {
1119 *reg_count = 2;
1120 ret_t = VT_QLONG;
1121 } else {
1122 *reg_count = 1;
1123 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1125 break;
1127 case x86_64_mode_x87:
1128 *reg_count = 1;
1129 ret_t = VT_LDOUBLE;
1130 break;
1132 case x86_64_mode_sse:
1133 if (size > 8) {
1134 *reg_count = 2;
1135 ret_t = VT_QFLOAT;
1136 } else {
1137 *reg_count = 1;
1138 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1140 break;
1141 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1146 if (ret) {
1147 ret->ref = NULL;
1148 ret->t = ret_t;
1151 return mode;
1154 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1156 /* This definition must be synced with stdarg.h */
1157 enum __va_arg_type {
1158 __va_gen_reg, __va_float_reg, __va_stack
1160 int size, align, reg_count;
1161 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1162 switch (mode) {
1163 default: return __va_stack;
1164 case x86_64_mode_integer: return __va_gen_reg;
1165 case x86_64_mode_sse: return __va_float_reg;
1169 /* Return the number of registers needed to return the struct, or 0 if
1170 returning via struct pointer. */
1171 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1173 int size, align, reg_count;
1174 *ret_align = 1; // Never have to re-align return values for x86-64
1175 *regsize = 8;
1176 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1179 #define REGN 6
1180 static const uint8_t arg_regs[REGN] = {
1181 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1184 static int arg_prepare_reg(int idx) {
1185 if (idx == 2 || idx == 3)
1186 /* idx=2: r10, idx=3: r11 */
1187 return idx + 8;
1188 else
1189 return arg_regs[idx];
1192 /* Generate function call. The function address is pushed first, then
1193 all the parameters in call order. This functions pops all the
1194 parameters and the function address. */
1195 void gfunc_call(int nb_args)
1197 X86_64_Mode mode;
1198 CType type;
1199 int size, align, r, args_size, stack_adjust, i, reg_count;
1200 int nb_reg_args = 0;
1201 int nb_sse_args = 0;
1202 int sse_reg, gen_reg;
1203 char _onstack[nb_args], *onstack = _onstack;
1205 /* calculate the number of integer/float register arguments, remember
1206 arguments to be passed via stack (in onstack[]), and also remember
1207 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1208 to be done in a left-to-right pass over arguments. */
1209 stack_adjust = 0;
1210 for(i = nb_args - 1; i >= 0; i--) {
1211 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1212 if (mode == x86_64_mode_sse && nb_sse_args + reg_count <= 8) {
1213 nb_sse_args += reg_count;
1214 onstack[i] = 0;
1215 } else if (mode == x86_64_mode_integer && nb_reg_args + reg_count <= REGN) {
1216 nb_reg_args += reg_count;
1217 onstack[i] = 0;
1218 } else if (mode == x86_64_mode_none) {
1219 onstack[i] = 0;
1220 } else {
1221 if (align == 16 && (stack_adjust &= 15)) {
1222 onstack[i] = 2;
1223 stack_adjust = 0;
1224 } else
1225 onstack[i] = 1;
1226 stack_adjust += size;
1230 if (nb_sse_args && tcc_state->nosse)
1231 tcc_error("SSE disabled but floating point arguments passed");
1233 /* fetch cpu flag before generating any code */
1234 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1235 gv(RC_INT);
1237 /* for struct arguments, we need to call memcpy and the function
1238 call breaks register passing arguments we are preparing.
1239 So, we process arguments which will be passed by stack first. */
1240 gen_reg = nb_reg_args;
1241 sse_reg = nb_sse_args;
1242 args_size = 0;
1243 stack_adjust &= 15;
1244 for (i = 0; i < nb_args;) {
1245 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1246 if (!onstack[i]) {
1247 ++i;
1248 continue;
1250 /* Possibly adjust stack to align SSE boundary. We're processing
1251 args from right to left while allocating happens left to right
1252 (stack grows down), so the adjustment needs to happen _after_
1253 an argument that requires it. */
1254 if (stack_adjust) {
1255 o(0x50); /* push %rax; aka sub $8,%rsp */
1256 args_size += 8;
1257 stack_adjust = 0;
1259 if (onstack[i] == 2)
1260 stack_adjust = 1;
1262 vrotb(i+1);
1264 switch (vtop->type.t & VT_BTYPE) {
1265 case VT_STRUCT:
1266 /* allocate the necessary size on stack */
1267 o(0x48);
1268 oad(0xec81, size); /* sub $xxx, %rsp */
1269 /* generate structure store */
1270 r = get_reg(RC_INT);
1271 orex(1, r, 0, 0x89); /* mov %rsp, r */
1272 o(0xe0 + REG_VALUE(r));
1273 vset(&vtop->type, r | VT_LVAL, 0);
1274 vswap();
1275 vstore();
1276 break;
1278 case VT_LDOUBLE:
1279 gv(RC_ST0);
1280 oad(0xec8148, size); /* sub $xxx, %rsp */
1281 o(0x7cdb); /* fstpt 0(%rsp) */
1282 g(0x24);
1283 g(0x00);
1284 break;
1286 case VT_FLOAT:
1287 case VT_DOUBLE:
1288 assert(mode == x86_64_mode_sse);
1289 r = gv(RC_FLOAT);
1290 o(0x50); /* push $rax */
1291 /* movq %xmmN, (%rsp) */
1292 o(0xd60f66);
1293 o(0x04 + REG_VALUE(r)*8);
1294 o(0x24);
1295 break;
1297 default:
1298 assert(mode == x86_64_mode_integer);
1299 /* simple type */
1300 /* XXX: implicit cast ? */
1301 r = gv(RC_INT);
1302 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1303 break;
1305 args_size += size;
1307 vpop();
1308 --nb_args;
1309 onstack++;
1312 /* XXX This should be superfluous. */
1313 save_regs(0); /* save used temporary registers */
1315 /* then, we prepare register passing arguments.
1316 Note that we cannot set RDX and RCX in this loop because gv()
1317 may break these temporary registers. Let's use R10 and R11
1318 instead of them */
1319 assert(gen_reg <= REGN);
1320 assert(sse_reg <= 8);
1321 for(i = 0; i < nb_args; i++) {
1322 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1323 /* Alter stack entry type so that gv() knows how to treat it */
1324 vtop->type = type;
1325 if (mode == x86_64_mode_sse) {
1326 if (reg_count == 2) {
1327 sse_reg -= 2;
1328 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1329 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1330 /* movaps %xmm0, %xmmN */
1331 o(0x280f);
1332 o(0xc0 + (sse_reg << 3));
1333 /* movaps %xmm1, %xmmN */
1334 o(0x280f);
1335 o(0xc1 + ((sse_reg+1) << 3));
1337 } else {
1338 assert(reg_count == 1);
1339 --sse_reg;
1340 /* Load directly to register */
1341 gv(RC_XMM0 << sse_reg);
1343 } else if (mode == x86_64_mode_integer) {
1344 /* simple type */
1345 /* XXX: implicit cast ? */
1346 int d;
1347 gen_reg -= reg_count;
1348 r = gv(RC_INT);
1349 d = arg_prepare_reg(gen_reg);
1350 orex(1,d,r,0x89); /* mov */
1351 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1352 if (reg_count == 2) {
1353 d = arg_prepare_reg(gen_reg+1);
1354 orex(1,d,vtop->r2,0x89); /* mov */
1355 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1358 vtop--;
1360 assert(gen_reg == 0);
1361 assert(sse_reg == 0);
1363 /* We shouldn't have many operands on the stack anymore, but the
1364 call address itself is still there, and it might be in %eax
1365 (or edx/ecx) currently, which the below writes would clobber.
1366 So evict all remaining operands here. */
1367 save_regs(0);
1369 /* Copy R10 and R11 into RDX and RCX, respectively */
1370 if (nb_reg_args > 2) {
1371 o(0xd2894c); /* mov %r10, %rdx */
1372 if (nb_reg_args > 3) {
1373 o(0xd9894c); /* mov %r11, %rcx */
1377 if (vtop->type.ref->c != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1378 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1379 gcall_or_jmp(0);
1380 if (args_size)
1381 gadd_sp(args_size);
1382 vtop--;
1386 #define FUNC_PROLOG_SIZE 11
1388 static void push_arg_reg(int i) {
1389 loc -= 8;
1390 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1393 /* generate function prolog of type 't' */
1394 void gfunc_prolog(CType *func_type)
1396 X86_64_Mode mode;
1397 int i, addr, align, size, reg_count;
1398 int param_addr = 0, reg_param_index, sse_param_index;
1399 Sym *sym;
1400 CType *type;
1402 sym = func_type->ref;
1403 addr = PTR_SIZE * 2;
1404 loc = 0;
1405 ind += FUNC_PROLOG_SIZE;
1406 func_sub_sp_offset = ind;
1407 func_ret_sub = 0;
1409 if (func_type->ref->c == FUNC_ELLIPSIS) {
1410 int seen_reg_num, seen_sse_num, seen_stack_size;
1411 seen_reg_num = seen_sse_num = 0;
1412 /* frame pointer and return address */
1413 seen_stack_size = PTR_SIZE * 2;
1414 /* count the number of seen parameters */
1415 sym = func_type->ref;
1416 while ((sym = sym->next) != NULL) {
1417 type = &sym->type;
1418 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1419 switch (mode) {
1420 default:
1421 stack_arg:
1422 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1423 break;
1425 case x86_64_mode_integer:
1426 if (seen_reg_num + reg_count > REGN)
1427 goto stack_arg;
1428 seen_reg_num += reg_count;
1429 break;
1431 case x86_64_mode_sse:
1432 if (seen_sse_num + reg_count > 8)
1433 goto stack_arg;
1434 seen_sse_num += reg_count;
1435 break;
1439 loc -= 16;
1440 /* movl $0x????????, -0x10(%rbp) */
1441 o(0xf045c7);
1442 gen_le32(seen_reg_num * 8);
1443 /* movl $0x????????, -0xc(%rbp) */
1444 o(0xf445c7);
1445 gen_le32(seen_sse_num * 16 + 48);
1446 /* movl $0x????????, -0x8(%rbp) */
1447 o(0xf845c7);
1448 gen_le32(seen_stack_size);
1450 /* save all register passing arguments */
1451 for (i = 0; i < 8; i++) {
1452 loc -= 16;
1453 if (!tcc_state->nosse) {
1454 o(0xd60f66); /* movq */
1455 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1457 /* movq $0, loc+8(%rbp) */
1458 o(0x85c748);
1459 gen_le32(loc + 8);
1460 gen_le32(0);
1462 for (i = 0; i < REGN; i++) {
1463 push_arg_reg(REGN-1-i);
1467 sym = func_type->ref;
1468 reg_param_index = 0;
1469 sse_param_index = 0;
1471 /* if the function returns a structure, then add an
1472 implicit pointer parameter */
1473 func_vt = sym->type;
1474 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1475 if (mode == x86_64_mode_memory) {
1476 push_arg_reg(reg_param_index);
1477 func_vc = loc;
1478 reg_param_index++;
1480 /* define parameters */
1481 while ((sym = sym->next) != NULL) {
1482 type = &sym->type;
1483 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1484 switch (mode) {
1485 case x86_64_mode_sse:
1486 if (tcc_state->nosse)
1487 tcc_error("SSE disabled but floating point arguments used");
1488 if (sse_param_index + reg_count <= 8) {
1489 /* save arguments passed by register */
1490 loc -= reg_count * 8;
1491 param_addr = loc;
1492 for (i = 0; i < reg_count; ++i) {
1493 o(0xd60f66); /* movq */
1494 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1495 ++sse_param_index;
1497 } else {
1498 addr = (addr + align - 1) & -align;
1499 param_addr = addr;
1500 addr += size;
1502 break;
1504 case x86_64_mode_memory:
1505 case x86_64_mode_x87:
1506 addr = (addr + align - 1) & -align;
1507 param_addr = addr;
1508 addr += size;
1509 break;
1511 case x86_64_mode_integer: {
1512 if (reg_param_index + reg_count <= REGN) {
1513 /* save arguments passed by register */
1514 loc -= reg_count * 8;
1515 param_addr = loc;
1516 for (i = 0; i < reg_count; ++i) {
1517 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1518 ++reg_param_index;
1520 } else {
1521 addr = (addr + align - 1) & -align;
1522 param_addr = addr;
1523 addr += size;
1525 break;
1527 default: break; /* nothing to be done for x86_64_mode_none */
1529 sym_push(sym->v & ~SYM_FIELD, type,
1530 VT_LOCAL | VT_LVAL, param_addr);
1533 #ifdef CONFIG_TCC_BCHECK
1534 /* leave some room for bound checking code */
1535 if (tcc_state->do_bounds_check) {
1536 func_bound_offset = lbounds_section->data_offset;
1537 func_bound_ind = ind;
1538 oad(0xb8, 0); /* lbound section pointer */
1539 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1540 oad(0xb8, 0); /* call to function */
1542 #endif
1545 /* generate function epilog */
1546 void gfunc_epilog(void)
1548 int v, saved_ind;
1550 #ifdef CONFIG_TCC_BCHECK
1551 if (tcc_state->do_bounds_check
1552 && func_bound_offset != lbounds_section->data_offset)
1554 addr_t saved_ind;
1555 addr_t *bounds_ptr;
1556 Sym *sym_data;
1558 /* add end of table info */
1559 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
1560 *bounds_ptr = 0;
1562 /* generate bound local allocation */
1563 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
1564 func_bound_offset, lbounds_section->data_offset);
1565 saved_ind = ind;
1566 ind = func_bound_ind;
1567 greloca(cur_text_section, sym_data, ind + 1, R_X86_64_64, 0);
1568 ind = ind + 5 + 3;
1569 gen_static_call(TOK___bound_local_new);
1570 ind = saved_ind;
1572 /* generate bound check local freeing */
1573 o(0x5250); /* save returned value, if any */
1574 greloca(cur_text_section, sym_data, ind + 1, R_X86_64_64, 0);
1575 oad(0xb8, 0); /* mov xxx, %rax */
1576 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1577 gen_static_call(TOK___bound_local_delete);
1578 o(0x585a); /* restore returned value, if any */
1580 #endif
1581 o(0xc9); /* leave */
1582 if (func_ret_sub == 0) {
1583 o(0xc3); /* ret */
1584 } else {
1585 o(0xc2); /* ret n */
1586 g(func_ret_sub);
1587 g(func_ret_sub >> 8);
1589 /* align local size to word & save local variables */
1590 v = (-loc + 15) & -16;
1591 saved_ind = ind;
1592 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1593 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1594 o(0xec8148); /* sub rsp, stacksize */
1595 gen_le32(v);
1596 ind = saved_ind;
1599 #endif /* not PE */
1601 /* generate a jump to a label */
1602 int gjmp(int t)
1604 return gjmp2(0xe9, t);
1607 /* generate a jump to a fixed address */
1608 void gjmp_addr(int a)
1610 int r;
1611 r = a - ind - 2;
1612 if (r == (char)r) {
1613 g(0xeb);
1614 g(r);
1615 } else {
1616 oad(0xe9, a - ind - 5);
1620 ST_FUNC void gtst_addr(int inv, int a)
1622 int v = vtop->r & VT_VALMASK;
1623 if (v == VT_CMP) {
1624 inv ^= (vtop--)->c.i;
1625 a -= ind + 2;
1626 if (a == (char)a) {
1627 g(inv - 32);
1628 g(a);
1629 } else {
1630 g(0x0f);
1631 oad(inv - 16, a - 4);
1633 } else if ((v & ~1) == VT_JMP) {
1634 if ((v & 1) != inv) {
1635 gjmp_addr(a);
1636 gsym(vtop->c.i);
1637 } else {
1638 gsym(vtop->c.i);
1639 o(0x05eb);
1640 gjmp_addr(a);
1642 vtop--;
1646 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1647 ST_FUNC int gtst(int inv, int t)
1649 int v = vtop->r & VT_VALMASK;
1651 if (nocode_wanted) {
1653 } else if (v == VT_CMP) {
1654 /* fast case : can jump directly since flags are set */
1655 if (vtop->c.i & 0x100)
1657 /* This was a float compare. If the parity flag is set
1658 the result was unordered. For anything except != this
1659 means false and we don't jump (anding both conditions).
1660 For != this means true (oring both).
1661 Take care about inverting the test. We need to jump
1662 to our target if the result was unordered and test wasn't NE,
1663 otherwise if unordered we don't want to jump. */
1664 vtop->c.i &= ~0x100;
1665 if (inv == (vtop->c.i == TOK_NE))
1666 o(0x067a); /* jp +6 */
1667 else
1669 g(0x0f);
1670 t = gjmp2(0x8a, t); /* jp t */
1673 g(0x0f);
1674 t = gjmp2((vtop->c.i - 16) ^ inv, t);
1675 } else if (v == VT_JMP || v == VT_JMPI) {
1676 /* && or || optimization */
1677 if ((v & 1) == inv) {
1678 /* insert vtop->c jump list in t */
1679 uint32_t n1, n = vtop->c.i;
1680 if (n) {
1681 while ((n1 = read32le(cur_text_section->data + n)))
1682 n = n1;
1683 write32le(cur_text_section->data + n, t);
1684 t = vtop->c.i;
1686 } else {
1687 t = gjmp(t);
1688 gsym(vtop->c.i);
1691 vtop--;
1692 return t;
1695 /* generate an integer binary operation */
1696 void gen_opi(int op)
1698 int r, fr, opc, c;
1699 int ll, uu, cc;
1701 ll = is64_type(vtop[-1].type.t);
1702 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1703 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1705 switch(op) {
1706 case '+':
1707 case TOK_ADDC1: /* add with carry generation */
1708 opc = 0;
1709 gen_op8:
1710 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1711 /* constant case */
1712 vswap();
1713 r = gv(RC_INT);
1714 vswap();
1715 c = vtop->c.i;
1716 if (c == (char)c) {
1717 /* XXX: generate inc and dec for smaller code ? */
1718 orex(ll, r, 0, 0x83);
1719 o(0xc0 | (opc << 3) | REG_VALUE(r));
1720 g(c);
1721 } else {
1722 orex(ll, r, 0, 0x81);
1723 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1725 } else {
1726 gv2(RC_INT, RC_INT);
1727 r = vtop[-1].r;
1728 fr = vtop[0].r;
1729 orex(ll, r, fr, (opc << 3) | 0x01);
1730 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1732 vtop--;
1733 if (op >= TOK_ULT && op <= TOK_GT) {
1734 vtop->r = VT_CMP;
1735 vtop->c.i = op;
1737 break;
1738 case '-':
1739 case TOK_SUBC1: /* sub with carry generation */
1740 opc = 5;
1741 goto gen_op8;
1742 case TOK_ADDC2: /* add with carry use */
1743 opc = 2;
1744 goto gen_op8;
1745 case TOK_SUBC2: /* sub with carry use */
1746 opc = 3;
1747 goto gen_op8;
1748 case '&':
1749 opc = 4;
1750 goto gen_op8;
1751 case '^':
1752 opc = 6;
1753 goto gen_op8;
1754 case '|':
1755 opc = 1;
1756 goto gen_op8;
1757 case '*':
1758 gv2(RC_INT, RC_INT);
1759 r = vtop[-1].r;
1760 fr = vtop[0].r;
1761 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1762 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1763 vtop--;
1764 break;
1765 case TOK_SHL:
1766 opc = 4;
1767 goto gen_shift;
1768 case TOK_SHR:
1769 opc = 5;
1770 goto gen_shift;
1771 case TOK_SAR:
1772 opc = 7;
1773 gen_shift:
1774 opc = 0xc0 | (opc << 3);
1775 if (cc) {
1776 /* constant case */
1777 vswap();
1778 r = gv(RC_INT);
1779 vswap();
1780 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1781 o(opc | REG_VALUE(r));
1782 g(vtop->c.i & (ll ? 63 : 31));
1783 } else {
1784 /* we generate the shift in ecx */
1785 gv2(RC_INT, RC_RCX);
1786 r = vtop[-1].r;
1787 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1788 o(opc | REG_VALUE(r));
1790 vtop--;
1791 break;
1792 case TOK_UDIV:
1793 case TOK_UMOD:
1794 uu = 1;
1795 goto divmod;
1796 case '/':
1797 case '%':
1798 case TOK_PDIV:
1799 uu = 0;
1800 divmod:
1801 /* first operand must be in eax */
1802 /* XXX: need better constraint for second operand */
1803 gv2(RC_RAX, RC_RCX);
1804 r = vtop[-1].r;
1805 fr = vtop[0].r;
1806 vtop--;
1807 save_reg(TREG_RDX);
1808 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1809 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1810 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1811 if (op == '%' || op == TOK_UMOD)
1812 r = TREG_RDX;
1813 else
1814 r = TREG_RAX;
1815 vtop->r = r;
1816 break;
1817 default:
1818 opc = 7;
1819 goto gen_op8;
1823 void gen_opl(int op)
1825 gen_opi(op);
1828 /* generate a floating point operation 'v = t1 op t2' instruction. The
1829 two operands are guaranteed to have the same floating point type */
1830 /* XXX: need to use ST1 too */
1831 void gen_opf(int op)
1833 int a, ft, fc, swapped, r;
1834 int float_type =
1835 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1837 /* convert constants to memory references */
1838 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1839 vswap();
1840 gv(float_type);
1841 vswap();
1843 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1844 gv(float_type);
1846 /* must put at least one value in the floating point register */
1847 if ((vtop[-1].r & VT_LVAL) &&
1848 (vtop[0].r & VT_LVAL)) {
1849 vswap();
1850 gv(float_type);
1851 vswap();
1853 swapped = 0;
1854 /* swap the stack if needed so that t1 is the register and t2 is
1855 the memory reference */
1856 if (vtop[-1].r & VT_LVAL) {
1857 vswap();
1858 swapped = 1;
1860 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1861 if (op >= TOK_ULT && op <= TOK_GT) {
1862 /* load on stack second operand */
1863 load(TREG_ST0, vtop);
1864 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1865 if (op == TOK_GE || op == TOK_GT)
1866 swapped = !swapped;
1867 else if (op == TOK_EQ || op == TOK_NE)
1868 swapped = 0;
1869 if (swapped)
1870 o(0xc9d9); /* fxch %st(1) */
1871 if (op == TOK_EQ || op == TOK_NE)
1872 o(0xe9da); /* fucompp */
1873 else
1874 o(0xd9de); /* fcompp */
1875 o(0xe0df); /* fnstsw %ax */
1876 if (op == TOK_EQ) {
1877 o(0x45e480); /* and $0x45, %ah */
1878 o(0x40fC80); /* cmp $0x40, %ah */
1879 } else if (op == TOK_NE) {
1880 o(0x45e480); /* and $0x45, %ah */
1881 o(0x40f480); /* xor $0x40, %ah */
1882 op = TOK_NE;
1883 } else if (op == TOK_GE || op == TOK_LE) {
1884 o(0x05c4f6); /* test $0x05, %ah */
1885 op = TOK_EQ;
1886 } else {
1887 o(0x45c4f6); /* test $0x45, %ah */
1888 op = TOK_EQ;
1890 vtop--;
1891 vtop->r = VT_CMP;
1892 vtop->c.i = op;
1893 } else {
1894 /* no memory reference possible for long double operations */
1895 load(TREG_ST0, vtop);
1896 swapped = !swapped;
1898 switch(op) {
1899 default:
1900 case '+':
1901 a = 0;
1902 break;
1903 case '-':
1904 a = 4;
1905 if (swapped)
1906 a++;
1907 break;
1908 case '*':
1909 a = 1;
1910 break;
1911 case '/':
1912 a = 6;
1913 if (swapped)
1914 a++;
1915 break;
1917 ft = vtop->type.t;
1918 fc = vtop->c.i;
1919 o(0xde); /* fxxxp %st, %st(1) */
1920 o(0xc1 + (a << 3));
1921 vtop--;
1923 } else {
1924 if (op >= TOK_ULT && op <= TOK_GT) {
1925 /* if saved lvalue, then we must reload it */
1926 r = vtop->r;
1927 fc = vtop->c.i;
1928 if ((r & VT_VALMASK) == VT_LLOCAL) {
1929 SValue v1;
1930 r = get_reg(RC_INT);
1931 v1.type.t = VT_PTR;
1932 v1.r = VT_LOCAL | VT_LVAL;
1933 v1.c.i = fc;
1934 load(r, &v1);
1935 fc = 0;
1938 if (op == TOK_EQ || op == TOK_NE) {
1939 swapped = 0;
1940 } else {
1941 if (op == TOK_LE || op == TOK_LT)
1942 swapped = !swapped;
1943 if (op == TOK_LE || op == TOK_GE) {
1944 op = 0x93; /* setae */
1945 } else {
1946 op = 0x97; /* seta */
1950 if (swapped) {
1951 gv(RC_FLOAT);
1952 vswap();
1954 assert(!(vtop[-1].r & VT_LVAL));
1956 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1957 o(0x66);
1958 if (op == TOK_EQ || op == TOK_NE)
1959 o(0x2e0f); /* ucomisd */
1960 else
1961 o(0x2f0f); /* comisd */
1963 if (vtop->r & VT_LVAL) {
1964 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1965 } else {
1966 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1969 vtop--;
1970 vtop->r = VT_CMP;
1971 vtop->c.i = op | 0x100;
1972 } else {
1973 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1974 switch(op) {
1975 default:
1976 case '+':
1977 a = 0;
1978 break;
1979 case '-':
1980 a = 4;
1981 break;
1982 case '*':
1983 a = 1;
1984 break;
1985 case '/':
1986 a = 6;
1987 break;
1989 ft = vtop->type.t;
1990 fc = vtop->c.i;
1991 assert((ft & VT_BTYPE) != VT_LDOUBLE);
1993 r = vtop->r;
1994 /* if saved lvalue, then we must reload it */
1995 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
1996 SValue v1;
1997 r = get_reg(RC_INT);
1998 v1.type.t = VT_PTR;
1999 v1.r = VT_LOCAL | VT_LVAL;
2000 v1.c.i = fc;
2001 load(r, &v1);
2002 fc = 0;
2005 assert(!(vtop[-1].r & VT_LVAL));
2006 if (swapped) {
2007 assert(vtop->r & VT_LVAL);
2008 gv(RC_FLOAT);
2009 vswap();
2012 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2013 o(0xf2);
2014 } else {
2015 o(0xf3);
2017 o(0x0f);
2018 o(0x58 + a);
2020 if (vtop->r & VT_LVAL) {
2021 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2022 } else {
2023 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2026 vtop--;
2031 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2032 and 'long long' cases. */
2033 void gen_cvt_itof(int t)
2035 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2036 save_reg(TREG_ST0);
2037 gv(RC_INT);
2038 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2039 /* signed long long to float/double/long double (unsigned case
2040 is handled generically) */
2041 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2042 o(0x242cdf); /* fildll (%rsp) */
2043 o(0x08c48348); /* add $8, %rsp */
2044 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2045 (VT_INT | VT_UNSIGNED)) {
2046 /* unsigned int to float/double/long double */
2047 o(0x6a); /* push $0 */
2048 g(0x00);
2049 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2050 o(0x242cdf); /* fildll (%rsp) */
2051 o(0x10c48348); /* add $16, %rsp */
2052 } else {
2053 /* int to float/double/long double */
2054 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2055 o(0x2404db); /* fildl (%rsp) */
2056 o(0x08c48348); /* add $8, %rsp */
2058 vtop->r = TREG_ST0;
2059 } else {
2060 int r = get_reg(RC_FLOAT);
2061 gv(RC_INT);
2062 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2063 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2064 (VT_INT | VT_UNSIGNED) ||
2065 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2066 o(0x48); /* REX */
2068 o(0x2a0f);
2069 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2070 vtop->r = r;
2074 /* convert from one floating point type to another */
2075 void gen_cvt_ftof(int t)
2077 int ft, bt, tbt;
2079 ft = vtop->type.t;
2080 bt = ft & VT_BTYPE;
2081 tbt = t & VT_BTYPE;
2083 if (bt == VT_FLOAT) {
2084 gv(RC_FLOAT);
2085 if (tbt == VT_DOUBLE) {
2086 o(0x140f); /* unpcklps */
2087 o(0xc0 + REG_VALUE(vtop->r)*9);
2088 o(0x5a0f); /* cvtps2pd */
2089 o(0xc0 + REG_VALUE(vtop->r)*9);
2090 } else if (tbt == VT_LDOUBLE) {
2091 save_reg(RC_ST0);
2092 /* movss %xmm0,-0x10(%rsp) */
2093 o(0x110ff3);
2094 o(0x44 + REG_VALUE(vtop->r)*8);
2095 o(0xf024);
2096 o(0xf02444d9); /* flds -0x10(%rsp) */
2097 vtop->r = TREG_ST0;
2099 } else if (bt == VT_DOUBLE) {
2100 gv(RC_FLOAT);
2101 if (tbt == VT_FLOAT) {
2102 o(0x140f66); /* unpcklpd */
2103 o(0xc0 + REG_VALUE(vtop->r)*9);
2104 o(0x5a0f66); /* cvtpd2ps */
2105 o(0xc0 + REG_VALUE(vtop->r)*9);
2106 } else if (tbt == VT_LDOUBLE) {
2107 save_reg(RC_ST0);
2108 /* movsd %xmm0,-0x10(%rsp) */
2109 o(0x110ff2);
2110 o(0x44 + REG_VALUE(vtop->r)*8);
2111 o(0xf024);
2112 o(0xf02444dd); /* fldl -0x10(%rsp) */
2113 vtop->r = TREG_ST0;
2115 } else {
2116 int r;
2117 gv(RC_ST0);
2118 r = get_reg(RC_FLOAT);
2119 if (tbt == VT_DOUBLE) {
2120 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2121 /* movsd -0x10(%rsp),%xmm0 */
2122 o(0x100ff2);
2123 o(0x44 + REG_VALUE(r)*8);
2124 o(0xf024);
2125 vtop->r = r;
2126 } else if (tbt == VT_FLOAT) {
2127 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2128 /* movss -0x10(%rsp),%xmm0 */
2129 o(0x100ff3);
2130 o(0x44 + REG_VALUE(r)*8);
2131 o(0xf024);
2132 vtop->r = r;
2137 /* convert fp to int 't' type */
2138 void gen_cvt_ftoi(int t)
2140 int ft, bt, size, r;
2141 ft = vtop->type.t;
2142 bt = ft & VT_BTYPE;
2143 if (bt == VT_LDOUBLE) {
2144 gen_cvt_ftof(VT_DOUBLE);
2145 bt = VT_DOUBLE;
2148 gv(RC_FLOAT);
2149 if (t != VT_INT)
2150 size = 8;
2151 else
2152 size = 4;
2154 r = get_reg(RC_INT);
2155 if (bt == VT_FLOAT) {
2156 o(0xf3);
2157 } else if (bt == VT_DOUBLE) {
2158 o(0xf2);
2159 } else {
2160 assert(0);
2162 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2163 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2164 vtop->r = r;
2167 /* computed goto support */
2168 void ggoto(void)
2170 gcall_or_jmp(1);
2171 vtop--;
2174 /* Save the stack pointer onto the stack and return the location of its address */
2175 ST_FUNC void gen_vla_sp_save(int addr) {
2176 /* mov %rsp,addr(%rbp)*/
2177 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2180 /* Restore the SP from a location on the stack */
2181 ST_FUNC void gen_vla_sp_restore(int addr) {
2182 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2185 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2186 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2187 #ifdef TCC_TARGET_PE
2188 /* alloca does more than just adjust %rsp on Windows */
2189 vpush_global_sym(&func_old_type, TOK_alloca);
2190 vswap(); /* Move alloca ref past allocation size */
2191 gfunc_call(1);
2192 #else
2193 int r;
2194 r = gv(RC_INT); /* allocation size */
2195 /* sub r,%rsp */
2196 o(0x2b48);
2197 o(0xe0 | REG_VALUE(r));
2198 /* We align to 16 bytes rather than align */
2199 /* and ~15, %rsp */
2200 o(0xf0e48348);
2201 vpop();
2202 #endif
2206 /* end of x86-64 code generator */
2207 /*************************************************************/
2208 #endif /* ! TARGET_DEFS_ONLY */
2209 /******************************************************/