gen_addrpc32: absolute ptr needs *ABS* relocation
[tinycc.git] / x86_64-gen.c
blob72842d6d1627a63b8fbffe433f3a7158055b35d0
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
35 #define RC_RAX 0x0004
36 #define RC_RCX 0x0008
37 #define RC_RDX 0x0010
38 #define RC_ST0 0x0080 /* only for long double */
39 #define RC_R8 0x0100
40 #define RC_R9 0x0200
41 #define RC_R10 0x0400
42 #define RC_R11 0x0800
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_LRET RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_QRET RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
57 enum {
58 TREG_RAX = 0,
59 TREG_RCX = 1,
60 TREG_RDX = 2,
61 TREG_RSP = 4,
62 TREG_RSI = 6,
63 TREG_RDI = 7,
65 TREG_R8 = 8,
66 TREG_R9 = 9,
67 TREG_R10 = 10,
68 TREG_R11 = 11,
70 TREG_XMM0 = 16,
71 TREG_XMM1 = 17,
72 TREG_XMM2 = 18,
73 TREG_XMM3 = 19,
74 TREG_XMM4 = 20,
75 TREG_XMM5 = 21,
76 TREG_XMM6 = 22,
77 TREG_XMM7 = 23,
79 TREG_ST0 = 24,
81 TREG_MEM = 0x20
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_LRET TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_QRET TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
97 #define PTR_SIZE 8
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
103 #define MAX_ALIGN 16
105 /******************************************************/
106 #else /* ! TARGET_DEFS_ONLY */
107 /******************************************************/
108 #include "tcc.h"
109 #include <assert.h>
111 ST_DATA const int reg_classes[NB_REGS] = {
112 /* eax */ RC_INT | RC_RAX,
113 /* ecx */ RC_INT | RC_RCX,
114 /* edx */ RC_INT | RC_RDX,
120 RC_R8,
121 RC_R9,
122 RC_R10,
123 RC_R11,
128 /* xmm0 */ RC_FLOAT | RC_XMM0,
129 /* xmm1 */ RC_FLOAT | RC_XMM1,
130 /* xmm2 */ RC_FLOAT | RC_XMM2,
131 /* xmm3 */ RC_FLOAT | RC_XMM3,
132 /* xmm4 */ RC_FLOAT | RC_XMM4,
133 /* xmm5 */ RC_FLOAT | RC_XMM5,
134 /* xmm6 an xmm7 are included so gv() can be used on them,
135 but they are not tagged with RC_FLOAT because they are
136 callee saved on Windows */
137 RC_XMM6,
138 RC_XMM7,
139 /* st0 */ RC_ST0
142 static unsigned long func_sub_sp_offset;
143 static int func_ret_sub;
145 /* XXX: make it faster ? */
146 ST_FUNC void g(int c)
148 int ind1;
149 if (nocode_wanted)
150 return;
151 ind1 = ind + 1;
152 if (ind1 > cur_text_section->data_allocated)
153 section_realloc(cur_text_section, ind1);
154 cur_text_section->data[ind] = c;
155 ind = ind1;
158 ST_FUNC void o(unsigned int c)
160 while (c) {
161 g(c);
162 c = c >> 8;
166 ST_FUNC void gen_le16(int v)
168 g(v);
169 g(v >> 8);
172 ST_FUNC void gen_le32(int c)
174 g(c);
175 g(c >> 8);
176 g(c >> 16);
177 g(c >> 24);
180 ST_FUNC void gen_le64(int64_t c)
182 g(c);
183 g(c >> 8);
184 g(c >> 16);
185 g(c >> 24);
186 g(c >> 32);
187 g(c >> 40);
188 g(c >> 48);
189 g(c >> 56);
192 static void orex(int ll, int r, int r2, int b)
194 if ((r & VT_VALMASK) >= VT_CONST)
195 r = 0;
196 if ((r2 & VT_VALMASK) >= VT_CONST)
197 r2 = 0;
198 if (ll || REX_BASE(r) || REX_BASE(r2))
199 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
200 o(b);
203 /* output a symbol and patch all calls to it */
204 ST_FUNC void gsym_addr(int t, int a)
206 while (t) {
207 unsigned char *ptr = cur_text_section->data + t;
208 uint32_t n = read32le(ptr); /* next value */
209 write32le(ptr, a - t - 4);
210 t = n;
214 void gsym(int t)
216 gsym_addr(t, ind);
220 static int is64_type(int t)
222 return ((t & VT_BTYPE) == VT_PTR ||
223 (t & VT_BTYPE) == VT_FUNC ||
224 (t & VT_BTYPE) == VT_LLONG);
227 /* instruction + 4 bytes data. Return the address of the data */
228 static int oad(int c, int s)
230 int t;
231 if (nocode_wanted)
232 return s;
233 o(c);
234 t = ind;
235 gen_le32(s);
236 return t;
239 /* generate jmp to a label */
240 #define gjmp2(instr,lbl) oad(instr,lbl)
242 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
244 if (r & VT_SYM)
245 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
246 gen_le32(c);
249 /* output constant with relocation if 'r & VT_SYM' is true */
250 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
252 if (r & VT_SYM)
253 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
254 gen_le64(c);
257 /* output constant with relocation if 'r & VT_SYM' is true */
258 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
260 greloca(cur_text_section, sym, ind, R_X86_64_PC32, 0);
261 gen_le32(c-4);
264 /* output got address with relocation */
265 static void gen_gotpcrel(int r, Sym *sym, int c)
267 #ifdef TCC_TARGET_PE
268 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
269 get_tok_str(sym->v, NULL), c, r,
270 cur_text_section->data[ind-3],
271 cur_text_section->data[ind-2],
272 cur_text_section->data[ind-1]
274 #endif
275 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
276 gen_le32(0);
277 if (c) {
278 /* we use add c, %xxx for displacement */
279 orex(1, r, 0, 0x81);
280 o(0xc0 + REG_VALUE(r));
281 gen_le32(c);
285 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
287 op_reg = REG_VALUE(op_reg) << 3;
288 if ((r & VT_VALMASK) == VT_CONST) {
289 /* constant memory reference */
290 o(0x05 | op_reg);
291 if (is_got) {
292 gen_gotpcrel(r, sym, c);
293 } else {
294 gen_addrpc32(r, sym, c);
296 } else if ((r & VT_VALMASK) == VT_LOCAL) {
297 /* currently, we use only ebp as base */
298 if (c == (char)c) {
299 /* short reference */
300 o(0x45 | op_reg);
301 g(c);
302 } else {
303 oad(0x85 | op_reg, c);
305 } else if ((r & VT_VALMASK) >= TREG_MEM) {
306 if (c) {
307 g(0x80 | op_reg | REG_VALUE(r));
308 gen_le32(c);
309 } else {
310 g(0x00 | op_reg | REG_VALUE(r));
312 } else {
313 g(0x00 | op_reg | REG_VALUE(r));
317 /* generate a modrm reference. 'op_reg' contains the addtional 3
318 opcode bits */
319 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
321 gen_modrm_impl(op_reg, r, sym, c, 0);
324 /* generate a modrm reference. 'op_reg' contains the addtional 3
325 opcode bits */
326 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
328 int is_got;
329 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
330 orex(1, r, op_reg, opcode);
331 gen_modrm_impl(op_reg, r, sym, c, is_got);
335 /* load 'r' from value 'sv' */
336 void load(int r, SValue *sv)
338 int v, t, ft, fc, fr;
339 SValue v1;
341 #ifdef TCC_TARGET_PE
342 SValue v2;
343 sv = pe_getimport(sv, &v2);
344 #endif
346 fr = sv->r;
347 ft = sv->type.t & ~VT_DEFSIGN;
348 fc = sv->c.i;
349 if (fc != sv->c.i && (fr & VT_SYM))
350 tcc_error("64 bit addend in load");
352 ft &= ~(VT_VOLATILE | VT_CONSTANT);
354 #ifndef TCC_TARGET_PE
355 /* we use indirect access via got */
356 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
357 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
358 /* use the result register as a temporal register */
359 int tr = r | TREG_MEM;
360 if (is_float(ft)) {
361 /* we cannot use float registers as a temporal register */
362 tr = get_reg(RC_INT) | TREG_MEM;
364 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
366 /* load from the temporal register */
367 fr = tr | VT_LVAL;
369 #endif
371 v = fr & VT_VALMASK;
372 if (fr & VT_LVAL) {
373 int b, ll;
374 if (v == VT_LLOCAL) {
375 v1.type.t = VT_PTR;
376 v1.r = VT_LOCAL | VT_LVAL;
377 v1.c.i = fc;
378 fr = r;
379 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
380 fr = get_reg(RC_INT);
381 load(fr, &v1);
383 ll = 0;
384 /* Like GCC we can load from small enough properly sized
385 structs and unions as well.
386 XXX maybe move to generic operand handling, but should
387 occur only with asm, so tccasm.c might also be a better place */
388 if ((ft & VT_BTYPE) == VT_STRUCT) {
389 int align;
390 switch (type_size(&sv->type, &align)) {
391 case 1: ft = VT_BYTE; break;
392 case 2: ft = VT_SHORT; break;
393 case 4: ft = VT_INT; break;
394 case 8: ft = VT_LLONG; break;
395 default:
396 tcc_error("invalid aggregate type for register load");
397 break;
400 if ((ft & VT_BTYPE) == VT_FLOAT) {
401 b = 0x6e0f66;
402 r = REG_VALUE(r); /* movd */
403 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
404 b = 0x7e0ff3; /* movq */
405 r = REG_VALUE(r);
406 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
407 b = 0xdb, r = 5; /* fldt */
408 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
409 b = 0xbe0f; /* movsbl */
410 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
411 b = 0xb60f; /* movzbl */
412 } else if ((ft & VT_TYPE) == VT_SHORT) {
413 b = 0xbf0f; /* movswl */
414 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
415 b = 0xb70f; /* movzwl */
416 } else {
417 assert(((ft & VT_BTYPE) == VT_INT)
418 || ((ft & VT_BTYPE) == VT_LLONG)
419 || ((ft & VT_BTYPE) == VT_PTR)
420 || ((ft & VT_BTYPE) == VT_FUNC)
422 ll = is64_type(ft);
423 b = 0x8b;
425 if (ll) {
426 gen_modrm64(b, r, fr, sv->sym, fc);
427 } else {
428 orex(ll, fr, r, b);
429 gen_modrm(r, fr, sv->sym, fc);
431 } else {
432 if (v == VT_CONST) {
433 if (fr & VT_SYM) {
434 #ifdef TCC_TARGET_PE
435 orex(1,0,r,0x8d);
436 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
437 gen_addrpc32(fr, sv->sym, fc);
438 #else
439 if (sv->sym->type.t & VT_STATIC) {
440 orex(1,0,r,0x8d);
441 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
442 gen_addrpc32(fr, sv->sym, fc);
443 } else {
444 orex(1,0,r,0x8b);
445 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
446 gen_gotpcrel(r, sv->sym, fc);
448 #endif
449 } else if (is64_type(ft)) {
450 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
451 gen_le64(sv->c.i);
452 } else {
453 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
454 gen_le32(fc);
456 } else if (v == VT_LOCAL) {
457 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
458 gen_modrm(r, VT_LOCAL, sv->sym, fc);
459 } else if (v == VT_CMP) {
460 orex(0,r,0,0);
461 if ((fc & ~0x100) != TOK_NE)
462 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
463 else
464 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
465 if (fc & 0x100)
467 /* This was a float compare. If the parity bit is
468 set the result was unordered, meaning false for everything
469 except TOK_NE, and true for TOK_NE. */
470 fc &= ~0x100;
471 o(0x037a + (REX_BASE(r) << 8));
473 orex(0,r,0, 0x0f); /* setxx %br */
474 o(fc);
475 o(0xc0 + REG_VALUE(r));
476 } else if (v == VT_JMP || v == VT_JMPI) {
477 t = v & 1;
478 orex(0,r,0,0);
479 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
480 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
481 gsym(fc);
482 orex(0,r,0,0);
483 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
484 } else if (v != r) {
485 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
486 if (v == TREG_ST0) {
487 /* gen_cvt_ftof(VT_DOUBLE); */
488 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
489 /* movsd -0x10(%rsp),%xmmN */
490 o(0x100ff2);
491 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
492 o(0xf024);
493 } else {
494 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
495 if ((ft & VT_BTYPE) == VT_FLOAT) {
496 o(0x100ff3);
497 } else {
498 assert((ft & VT_BTYPE) == VT_DOUBLE);
499 o(0x100ff2);
501 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
503 } else if (r == TREG_ST0) {
504 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
505 /* gen_cvt_ftof(VT_LDOUBLE); */
506 /* movsd %xmmN,-0x10(%rsp) */
507 o(0x110ff2);
508 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
509 o(0xf024);
510 o(0xf02444dd); /* fldl -0x10(%rsp) */
511 } else {
512 orex(1,r,v, 0x89);
513 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
519 /* store register 'r' in lvalue 'v' */
520 void store(int r, SValue *v)
522 int fr, bt, ft, fc;
523 int op64 = 0;
524 /* store the REX prefix in this variable when PIC is enabled */
525 int pic = 0;
527 #ifdef TCC_TARGET_PE
528 SValue v2;
529 v = pe_getimport(v, &v2);
530 #endif
532 fr = v->r & VT_VALMASK;
533 ft = v->type.t;
534 fc = v->c.i;
535 if (fc != v->c.i && (fr & VT_SYM))
536 tcc_error("64 bit addend in store");
537 ft &= ~(VT_VOLATILE | VT_CONSTANT);
538 bt = ft & VT_BTYPE;
540 #ifndef TCC_TARGET_PE
541 /* we need to access the variable via got */
542 if (fr == VT_CONST && (v->r & VT_SYM)) {
543 /* mov xx(%rip), %r11 */
544 o(0x1d8b4c);
545 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
546 pic = is64_type(bt) ? 0x49 : 0x41;
548 #endif
550 /* XXX: incorrect if float reg to reg */
551 if (bt == VT_FLOAT) {
552 o(0x66);
553 o(pic);
554 o(0x7e0f); /* movd */
555 r = REG_VALUE(r);
556 } else if (bt == VT_DOUBLE) {
557 o(0x66);
558 o(pic);
559 o(0xd60f); /* movq */
560 r = REG_VALUE(r);
561 } else if (bt == VT_LDOUBLE) {
562 o(0xc0d9); /* fld %st(0) */
563 o(pic);
564 o(0xdb); /* fstpt */
565 r = 7;
566 } else {
567 if (bt == VT_SHORT)
568 o(0x66);
569 o(pic);
570 if (bt == VT_BYTE || bt == VT_BOOL)
571 orex(0, 0, r, 0x88);
572 else if (is64_type(bt))
573 op64 = 0x89;
574 else
575 orex(0, 0, r, 0x89);
577 if (pic) {
578 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
579 if (op64)
580 o(op64);
581 o(3 + (r << 3));
582 } else if (op64) {
583 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
584 gen_modrm64(op64, r, v->r, v->sym, fc);
585 } else if (fr != r) {
586 /* XXX: don't we really come here? */
587 abort();
588 o(0xc0 + fr + r * 8); /* mov r, fr */
590 } else {
591 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
592 gen_modrm(r, v->r, v->sym, fc);
593 } else if (fr != r) {
594 /* XXX: don't we really come here? */
595 abort();
596 o(0xc0 + fr + r * 8); /* mov r, fr */
601 /* 'is_jmp' is '1' if it is a jump */
602 static void gcall_or_jmp(int is_jmp)
604 int r;
605 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
606 ((vtop->r & VT_SYM) || (vtop->c.i-4) == (int)(vtop->c.i-4))) {
607 /* constant case */
608 if (vtop->r & VT_SYM) {
609 /* relocation case */
610 #ifdef TCC_TARGET_PE
611 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
612 #else
613 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
614 #endif
615 } else {
616 /* put an empty PC32 relocation */
617 put_elf_reloca(symtab_section, cur_text_section,
618 ind + 1, R_X86_64_PC32, 0, (int)(vtop->c.i-4));
620 oad(0xe8 + is_jmp, 0); /* call/jmp im */
621 } else {
622 /* otherwise, indirect call */
623 r = TREG_R11;
624 load(r, vtop);
625 o(0x41); /* REX */
626 o(0xff); /* call/jmp *r */
627 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
631 #if defined(CONFIG_TCC_BCHECK)
632 #ifndef TCC_TARGET_PE
633 static addr_t func_bound_offset;
634 static unsigned long func_bound_ind;
635 #endif
637 static void gen_static_call(int v)
639 Sym *sym = external_global_sym(v, &func_old_type, 0);
640 oad(0xe8, 0);
641 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
644 /* generate a bounded pointer addition */
645 ST_FUNC void gen_bounded_ptr_add(void)
647 /* save all temporary registers */
648 save_regs(0);
650 /* prepare fast x86_64 function call */
651 gv(RC_RAX);
652 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
653 vtop--;
655 gv(RC_RAX);
656 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
657 vtop--;
659 /* do a fast function call */
660 gen_static_call(TOK___bound_ptr_add);
662 /* returned pointer is in rax */
663 vtop++;
664 vtop->r = TREG_RAX | VT_BOUNDED;
667 /* relocation offset of the bounding function call point */
668 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
671 /* patch pointer addition in vtop so that pointer dereferencing is
672 also tested */
673 ST_FUNC void gen_bounded_ptr_deref(void)
675 addr_t func;
676 int size, align;
677 ElfW(Rela) *rel;
678 Sym *sym;
680 size = 0;
681 /* XXX: put that code in generic part of tcc */
682 if (!is_float(vtop->type.t)) {
683 if (vtop->r & VT_LVAL_BYTE)
684 size = 1;
685 else if (vtop->r & VT_LVAL_SHORT)
686 size = 2;
688 if (!size)
689 size = type_size(&vtop->type, &align);
690 switch(size) {
691 case 1: func = TOK___bound_ptr_indir1; break;
692 case 2: func = TOK___bound_ptr_indir2; break;
693 case 4: func = TOK___bound_ptr_indir4; break;
694 case 8: func = TOK___bound_ptr_indir8; break;
695 case 12: func = TOK___bound_ptr_indir12; break;
696 case 16: func = TOK___bound_ptr_indir16; break;
697 default:
698 tcc_error("unhandled size when dereferencing bounded pointer");
699 func = 0;
700 break;
703 sym = external_global_sym(func, &func_old_type, 0);
704 if (!sym->c)
705 put_extern_sym(sym, NULL, 0, 0);
707 /* patch relocation */
708 /* XXX: find a better solution ? */
710 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
711 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
713 #endif
715 #ifdef TCC_TARGET_PE
717 #define REGN 4
718 static const uint8_t arg_regs[REGN] = {
719 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
722 /* Prepare arguments in R10 and R11 rather than RCX and RDX
723 because gv() will not ever use these */
724 static int arg_prepare_reg(int idx) {
725 if (idx == 0 || idx == 1)
726 /* idx=0: r10, idx=1: r11 */
727 return idx + 10;
728 else
729 return arg_regs[idx];
732 static int func_scratch, func_alloca;
734 /* Generate function call. The function address is pushed first, then
735 all the parameters in call order. This functions pops all the
736 parameters and the function address. */
738 static void gen_offs_sp(int b, int r, int d)
740 orex(1,0,r & 0x100 ? 0 : r, b);
741 if (d == (char)d) {
742 o(0x2444 | (REG_VALUE(r) << 3));
743 g(d);
744 } else {
745 o(0x2484 | (REG_VALUE(r) << 3));
746 gen_le32(d);
750 static int using_regs(int size)
752 return !(size > 8 || (size & (size - 1)));
755 /* Return the number of registers needed to return the struct, or 0 if
756 returning via struct pointer. */
757 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
759 int size, align;
760 *ret_align = 1; // Never have to re-align return values for x86-64
761 *regsize = 8;
762 size = type_size(vt, &align);
763 if (!using_regs(size))
764 return 0;
765 if (size == 8)
766 ret->t = VT_LLONG;
767 else if (size == 4)
768 ret->t = VT_INT;
769 else if (size == 2)
770 ret->t = VT_SHORT;
771 else
772 ret->t = VT_BYTE;
773 ret->ref = NULL;
774 return 1;
777 static int is_sse_float(int t) {
778 int bt;
779 bt = t & VT_BTYPE;
780 return bt == VT_DOUBLE || bt == VT_FLOAT;
783 static int gfunc_arg_size(CType *type) {
784 int align;
785 if (type->t & (VT_ARRAY|VT_BITFIELD))
786 return 8;
787 return type_size(type, &align);
790 void gfunc_call(int nb_args)
792 int size, r, args_size, i, d, bt, struct_size;
793 int arg;
795 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
796 arg = nb_args;
798 /* for struct arguments, we need to call memcpy and the function
799 call breaks register passing arguments we are preparing.
800 So, we process arguments which will be passed by stack first. */
801 struct_size = args_size;
802 for(i = 0; i < nb_args; i++) {
803 SValue *sv;
805 --arg;
806 sv = &vtop[-i];
807 bt = (sv->type.t & VT_BTYPE);
808 size = gfunc_arg_size(&sv->type);
810 if (using_regs(size))
811 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
813 if (bt == VT_STRUCT) {
814 /* align to stack align size */
815 size = (size + 15) & ~15;
816 /* generate structure store */
817 r = get_reg(RC_INT);
818 gen_offs_sp(0x8d, r, struct_size);
819 struct_size += size;
821 /* generate memcpy call */
822 vset(&sv->type, r | VT_LVAL, 0);
823 vpushv(sv);
824 vstore();
825 --vtop;
826 } else if (bt == VT_LDOUBLE) {
827 gv(RC_ST0);
828 gen_offs_sp(0xdb, 0x107, struct_size);
829 struct_size += 16;
833 if (func_scratch < struct_size)
834 func_scratch = struct_size;
836 arg = nb_args;
837 struct_size = args_size;
839 for(i = 0; i < nb_args; i++) {
840 --arg;
841 bt = (vtop->type.t & VT_BTYPE);
843 size = gfunc_arg_size(&vtop->type);
844 if (!using_regs(size)) {
845 /* align to stack align size */
846 size = (size + 15) & ~15;
847 if (arg >= REGN) {
848 d = get_reg(RC_INT);
849 gen_offs_sp(0x8d, d, struct_size);
850 gen_offs_sp(0x89, d, arg*8);
851 } else {
852 d = arg_prepare_reg(arg);
853 gen_offs_sp(0x8d, d, struct_size);
855 struct_size += size;
856 } else {
857 if (is_sse_float(vtop->type.t)) {
858 if (tcc_state->nosse)
859 tcc_error("SSE disabled");
860 gv(RC_XMM0); /* only use one float register */
861 if (arg >= REGN) {
862 /* movq %xmm0, j*8(%rsp) */
863 gen_offs_sp(0xd60f66, 0x100, arg*8);
864 } else {
865 /* movaps %xmm0, %xmmN */
866 o(0x280f);
867 o(0xc0 + (arg << 3));
868 d = arg_prepare_reg(arg);
869 /* mov %xmm0, %rxx */
870 o(0x66);
871 orex(1,d,0, 0x7e0f);
872 o(0xc0 + REG_VALUE(d));
874 } else {
875 if (bt == VT_STRUCT) {
876 vtop->type.ref = NULL;
877 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
878 : size > 1 ? VT_SHORT : VT_BYTE;
881 r = gv(RC_INT);
882 if (arg >= REGN) {
883 gen_offs_sp(0x89, r, arg*8);
884 } else {
885 d = arg_prepare_reg(arg);
886 orex(1,d,r,0x89); /* mov */
887 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
891 vtop--;
893 save_regs(0);
895 /* Copy R10 and R11 into RCX and RDX, respectively */
896 if (nb_args > 0) {
897 o(0xd1894c); /* mov %r10, %rcx */
898 if (nb_args > 1) {
899 o(0xda894c); /* mov %r11, %rdx */
903 gcall_or_jmp(0);
905 if ((vtop->r & VT_SYM) && vtop->sym->v == TOK_alloca) {
906 /* need to add the "func_scratch" area after alloca */
907 o(0x0548), gen_le32(func_alloca), func_alloca = ind - 4;
910 /* other compilers don't clear the upper bits when returning char/short */
911 bt = vtop->type.ref->type.t & (VT_BTYPE | VT_UNSIGNED);
912 if (bt == (VT_BYTE | VT_UNSIGNED))
913 o(0xc0b60f); /* movzbl %al, %eax */
914 else if (bt == VT_BYTE)
915 o(0xc0be0f); /* movsbl %al, %eax */
916 else if (bt == VT_SHORT)
917 o(0x98); /* cwtl */
918 else if (bt == (VT_SHORT | VT_UNSIGNED))
919 o(0xc0b70f); /* movzbl %al, %eax */
920 #if 0 /* handled in gen_cast() */
921 else if (bt == VT_INT)
922 o(0x9848); /* cltq */
923 else if (bt == (VT_INT | VT_UNSIGNED))
924 o(0xc089); /* mov %eax,%eax */
925 #endif
926 vtop--;
930 #define FUNC_PROLOG_SIZE 11
932 /* generate function prolog of type 't' */
933 void gfunc_prolog(CType *func_type)
935 int addr, reg_param_index, bt, size;
936 Sym *sym;
937 CType *type;
939 func_ret_sub = 0;
940 func_scratch = 0;
941 func_alloca = 0;
942 loc = 0;
944 addr = PTR_SIZE * 2;
945 ind += FUNC_PROLOG_SIZE;
946 func_sub_sp_offset = ind;
947 reg_param_index = 0;
949 sym = func_type->ref;
951 /* if the function returns a structure, then add an
952 implicit pointer parameter */
953 func_vt = sym->type;
954 func_var = (sym->f.func_type == FUNC_ELLIPSIS);
955 size = gfunc_arg_size(&func_vt);
956 if (!using_regs(size)) {
957 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
958 func_vc = addr;
959 reg_param_index++;
960 addr += 8;
963 /* define parameters */
964 while ((sym = sym->next) != NULL) {
965 type = &sym->type;
966 bt = type->t & VT_BTYPE;
967 size = gfunc_arg_size(type);
968 if (!using_regs(size)) {
969 if (reg_param_index < REGN) {
970 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
972 sym_push(sym->v & ~SYM_FIELD, type, VT_LLOCAL | VT_LVAL, addr);
973 } else {
974 if (reg_param_index < REGN) {
975 /* save arguments passed by register */
976 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
977 if (tcc_state->nosse)
978 tcc_error("SSE disabled");
979 o(0xd60f66); /* movq */
980 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
981 } else {
982 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
985 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
987 addr += 8;
988 reg_param_index++;
991 while (reg_param_index < REGN) {
992 if (func_type->ref->f.func_type == FUNC_ELLIPSIS) {
993 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
994 addr += 8;
996 reg_param_index++;
1000 /* generate function epilog */
1001 void gfunc_epilog(void)
1003 int v, saved_ind;
1005 o(0xc9); /* leave */
1006 if (func_ret_sub == 0) {
1007 o(0xc3); /* ret */
1008 } else {
1009 o(0xc2); /* ret n */
1010 g(func_ret_sub);
1011 g(func_ret_sub >> 8);
1014 saved_ind = ind;
1015 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1016 /* align local size to word & save local variables */
1017 v = (func_scratch + -loc + 15) & -16;
1019 if (v >= 4096) {
1020 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
1021 oad(0xb8, v); /* mov stacksize, %eax */
1022 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1023 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1024 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1025 } else {
1026 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1027 o(0xec8148); /* sub rsp, stacksize */
1028 gen_le32(v);
1031 /* add the "func_scratch" area after each alloca seen */
1032 while (func_alloca) {
1033 unsigned char *ptr = cur_text_section->data + func_alloca;
1034 func_alloca = read32le(ptr);
1035 write32le(ptr, func_scratch);
1038 cur_text_section->data_offset = saved_ind;
1039 pe_add_unwind_data(ind, saved_ind, v);
1040 ind = cur_text_section->data_offset;
1043 #else
1045 static void gadd_sp(int val)
1047 if (val == (char)val) {
1048 o(0xc48348);
1049 g(val);
1050 } else {
1051 oad(0xc48148, val); /* add $xxx, %rsp */
1055 typedef enum X86_64_Mode {
1056 x86_64_mode_none,
1057 x86_64_mode_memory,
1058 x86_64_mode_integer,
1059 x86_64_mode_sse,
1060 x86_64_mode_x87
1061 } X86_64_Mode;
1063 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1065 if (a == b)
1066 return a;
1067 else if (a == x86_64_mode_none)
1068 return b;
1069 else if (b == x86_64_mode_none)
1070 return a;
1071 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1072 return x86_64_mode_memory;
1073 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1074 return x86_64_mode_integer;
1075 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1076 return x86_64_mode_memory;
1077 else
1078 return x86_64_mode_sse;
1081 static X86_64_Mode classify_x86_64_inner(CType *ty)
1083 X86_64_Mode mode;
1084 Sym *f;
1086 switch (ty->t & VT_BTYPE) {
1087 case VT_VOID: return x86_64_mode_none;
1089 case VT_INT:
1090 case VT_BYTE:
1091 case VT_SHORT:
1092 case VT_LLONG:
1093 case VT_BOOL:
1094 case VT_PTR:
1095 case VT_FUNC:
1096 return x86_64_mode_integer;
1098 case VT_FLOAT:
1099 case VT_DOUBLE: return x86_64_mode_sse;
1101 case VT_LDOUBLE: return x86_64_mode_x87;
1103 case VT_STRUCT:
1104 f = ty->ref;
1106 mode = x86_64_mode_none;
1107 for (f = f->next; f; f = f->next)
1108 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1110 return mode;
1112 assert(0);
1113 return 0;
1116 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1118 X86_64_Mode mode;
1119 int size, align, ret_t = 0;
1121 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1122 *psize = 8;
1123 *palign = 8;
1124 *reg_count = 1;
1125 ret_t = ty->t;
1126 mode = x86_64_mode_integer;
1127 } else {
1128 size = type_size(ty, &align);
1129 *psize = (size + 7) & ~7;
1130 *palign = (align + 7) & ~7;
1132 if (size > 16) {
1133 mode = x86_64_mode_memory;
1134 } else {
1135 mode = classify_x86_64_inner(ty);
1136 switch (mode) {
1137 case x86_64_mode_integer:
1138 if (size > 8) {
1139 *reg_count = 2;
1140 ret_t = VT_QLONG;
1141 } else {
1142 *reg_count = 1;
1143 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1145 break;
1147 case x86_64_mode_x87:
1148 *reg_count = 1;
1149 ret_t = VT_LDOUBLE;
1150 break;
1152 case x86_64_mode_sse:
1153 if (size > 8) {
1154 *reg_count = 2;
1155 ret_t = VT_QFLOAT;
1156 } else {
1157 *reg_count = 1;
1158 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1160 break;
1161 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1166 if (ret) {
1167 ret->ref = NULL;
1168 ret->t = ret_t;
1171 return mode;
1174 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1176 /* This definition must be synced with stdarg.h */
1177 enum __va_arg_type {
1178 __va_gen_reg, __va_float_reg, __va_stack
1180 int size, align, reg_count;
1181 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1182 switch (mode) {
1183 default: return __va_stack;
1184 case x86_64_mode_integer: return __va_gen_reg;
1185 case x86_64_mode_sse: return __va_float_reg;
1189 /* Return the number of registers needed to return the struct, or 0 if
1190 returning via struct pointer. */
1191 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1193 int size, align, reg_count;
1194 *ret_align = 1; // Never have to re-align return values for x86-64
1195 *regsize = 8;
1196 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1199 #define REGN 6
1200 static const uint8_t arg_regs[REGN] = {
1201 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1204 static int arg_prepare_reg(int idx) {
1205 if (idx == 2 || idx == 3)
1206 /* idx=2: r10, idx=3: r11 */
1207 return idx + 8;
1208 else
1209 return arg_regs[idx];
1212 /* Generate function call. The function address is pushed first, then
1213 all the parameters in call order. This functions pops all the
1214 parameters and the function address. */
1215 void gfunc_call(int nb_args)
1217 X86_64_Mode mode;
1218 CType type;
1219 int size, align, r, args_size, stack_adjust, i, reg_count;
1220 int nb_reg_args = 0;
1221 int nb_sse_args = 0;
1222 int sse_reg, gen_reg;
1223 char _onstack[nb_args], *onstack = _onstack;
1225 /* calculate the number of integer/float register arguments, remember
1226 arguments to be passed via stack (in onstack[]), and also remember
1227 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1228 to be done in a left-to-right pass over arguments. */
1229 stack_adjust = 0;
1230 for(i = nb_args - 1; i >= 0; i--) {
1231 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1232 if (mode == x86_64_mode_sse && nb_sse_args + reg_count <= 8) {
1233 nb_sse_args += reg_count;
1234 onstack[i] = 0;
1235 } else if (mode == x86_64_mode_integer && nb_reg_args + reg_count <= REGN) {
1236 nb_reg_args += reg_count;
1237 onstack[i] = 0;
1238 } else if (mode == x86_64_mode_none) {
1239 onstack[i] = 0;
1240 } else {
1241 if (align == 16 && (stack_adjust &= 15)) {
1242 onstack[i] = 2;
1243 stack_adjust = 0;
1244 } else
1245 onstack[i] = 1;
1246 stack_adjust += size;
1250 if (nb_sse_args && tcc_state->nosse)
1251 tcc_error("SSE disabled but floating point arguments passed");
1253 /* fetch cpu flag before generating any code */
1254 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1255 gv(RC_INT);
1257 /* for struct arguments, we need to call memcpy and the function
1258 call breaks register passing arguments we are preparing.
1259 So, we process arguments which will be passed by stack first. */
1260 gen_reg = nb_reg_args;
1261 sse_reg = nb_sse_args;
1262 args_size = 0;
1263 stack_adjust &= 15;
1264 for (i = 0; i < nb_args;) {
1265 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1266 if (!onstack[i]) {
1267 ++i;
1268 continue;
1270 /* Possibly adjust stack to align SSE boundary. We're processing
1271 args from right to left while allocating happens left to right
1272 (stack grows down), so the adjustment needs to happen _after_
1273 an argument that requires it. */
1274 if (stack_adjust) {
1275 o(0x50); /* push %rax; aka sub $8,%rsp */
1276 args_size += 8;
1277 stack_adjust = 0;
1279 if (onstack[i] == 2)
1280 stack_adjust = 1;
1282 vrotb(i+1);
1284 switch (vtop->type.t & VT_BTYPE) {
1285 case VT_STRUCT:
1286 /* allocate the necessary size on stack */
1287 o(0x48);
1288 oad(0xec81, size); /* sub $xxx, %rsp */
1289 /* generate structure store */
1290 r = get_reg(RC_INT);
1291 orex(1, r, 0, 0x89); /* mov %rsp, r */
1292 o(0xe0 + REG_VALUE(r));
1293 vset(&vtop->type, r | VT_LVAL, 0);
1294 vswap();
1295 vstore();
1296 break;
1298 case VT_LDOUBLE:
1299 gv(RC_ST0);
1300 oad(0xec8148, size); /* sub $xxx, %rsp */
1301 o(0x7cdb); /* fstpt 0(%rsp) */
1302 g(0x24);
1303 g(0x00);
1304 break;
1306 case VT_FLOAT:
1307 case VT_DOUBLE:
1308 assert(mode == x86_64_mode_sse);
1309 r = gv(RC_FLOAT);
1310 o(0x50); /* push $rax */
1311 /* movq %xmmN, (%rsp) */
1312 o(0xd60f66);
1313 o(0x04 + REG_VALUE(r)*8);
1314 o(0x24);
1315 break;
1317 default:
1318 assert(mode == x86_64_mode_integer);
1319 /* simple type */
1320 /* XXX: implicit cast ? */
1321 r = gv(RC_INT);
1322 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1323 break;
1325 args_size += size;
1327 vpop();
1328 --nb_args;
1329 onstack++;
1332 /* XXX This should be superfluous. */
1333 save_regs(0); /* save used temporary registers */
1335 /* then, we prepare register passing arguments.
1336 Note that we cannot set RDX and RCX in this loop because gv()
1337 may break these temporary registers. Let's use R10 and R11
1338 instead of them */
1339 assert(gen_reg <= REGN);
1340 assert(sse_reg <= 8);
1341 for(i = 0; i < nb_args; i++) {
1342 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1343 /* Alter stack entry type so that gv() knows how to treat it */
1344 vtop->type = type;
1345 if (mode == x86_64_mode_sse) {
1346 if (reg_count == 2) {
1347 sse_reg -= 2;
1348 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1349 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1350 /* movaps %xmm0, %xmmN */
1351 o(0x280f);
1352 o(0xc0 + (sse_reg << 3));
1353 /* movaps %xmm1, %xmmN */
1354 o(0x280f);
1355 o(0xc1 + ((sse_reg+1) << 3));
1357 } else {
1358 assert(reg_count == 1);
1359 --sse_reg;
1360 /* Load directly to register */
1361 gv(RC_XMM0 << sse_reg);
1363 } else if (mode == x86_64_mode_integer) {
1364 /* simple type */
1365 /* XXX: implicit cast ? */
1366 int d;
1367 gen_reg -= reg_count;
1368 r = gv(RC_INT);
1369 d = arg_prepare_reg(gen_reg);
1370 orex(1,d,r,0x89); /* mov */
1371 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1372 if (reg_count == 2) {
1373 d = arg_prepare_reg(gen_reg+1);
1374 orex(1,d,vtop->r2,0x89); /* mov */
1375 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1378 vtop--;
1380 assert(gen_reg == 0);
1381 assert(sse_reg == 0);
1383 /* We shouldn't have many operands on the stack anymore, but the
1384 call address itself is still there, and it might be in %eax
1385 (or edx/ecx) currently, which the below writes would clobber.
1386 So evict all remaining operands here. */
1387 save_regs(0);
1389 /* Copy R10 and R11 into RDX and RCX, respectively */
1390 if (nb_reg_args > 2) {
1391 o(0xd2894c); /* mov %r10, %rdx */
1392 if (nb_reg_args > 3) {
1393 o(0xd9894c); /* mov %r11, %rcx */
1397 if (vtop->type.ref->f.func_type != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1398 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1399 gcall_or_jmp(0);
1400 if (args_size)
1401 gadd_sp(args_size);
1402 vtop--;
1406 #define FUNC_PROLOG_SIZE 11
1408 static void push_arg_reg(int i) {
1409 loc -= 8;
1410 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1413 /* generate function prolog of type 't' */
1414 void gfunc_prolog(CType *func_type)
1416 X86_64_Mode mode;
1417 int i, addr, align, size, reg_count;
1418 int param_addr = 0, reg_param_index, sse_param_index;
1419 Sym *sym;
1420 CType *type;
1422 sym = func_type->ref;
1423 addr = PTR_SIZE * 2;
1424 loc = 0;
1425 ind += FUNC_PROLOG_SIZE;
1426 func_sub_sp_offset = ind;
1427 func_ret_sub = 0;
1429 if (sym->f.func_type == FUNC_ELLIPSIS) {
1430 int seen_reg_num, seen_sse_num, seen_stack_size;
1431 seen_reg_num = seen_sse_num = 0;
1432 /* frame pointer and return address */
1433 seen_stack_size = PTR_SIZE * 2;
1434 /* count the number of seen parameters */
1435 sym = func_type->ref;
1436 while ((sym = sym->next) != NULL) {
1437 type = &sym->type;
1438 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1439 switch (mode) {
1440 default:
1441 stack_arg:
1442 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1443 break;
1445 case x86_64_mode_integer:
1446 if (seen_reg_num + reg_count > REGN)
1447 goto stack_arg;
1448 seen_reg_num += reg_count;
1449 break;
1451 case x86_64_mode_sse:
1452 if (seen_sse_num + reg_count > 8)
1453 goto stack_arg;
1454 seen_sse_num += reg_count;
1455 break;
1459 loc -= 16;
1460 /* movl $0x????????, -0x10(%rbp) */
1461 o(0xf045c7);
1462 gen_le32(seen_reg_num * 8);
1463 /* movl $0x????????, -0xc(%rbp) */
1464 o(0xf445c7);
1465 gen_le32(seen_sse_num * 16 + 48);
1466 /* movl $0x????????, -0x8(%rbp) */
1467 o(0xf845c7);
1468 gen_le32(seen_stack_size);
1470 /* save all register passing arguments */
1471 for (i = 0; i < 8; i++) {
1472 loc -= 16;
1473 if (!tcc_state->nosse) {
1474 o(0xd60f66); /* movq */
1475 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1477 /* movq $0, loc+8(%rbp) */
1478 o(0x85c748);
1479 gen_le32(loc + 8);
1480 gen_le32(0);
1482 for (i = 0; i < REGN; i++) {
1483 push_arg_reg(REGN-1-i);
1487 sym = func_type->ref;
1488 reg_param_index = 0;
1489 sse_param_index = 0;
1491 /* if the function returns a structure, then add an
1492 implicit pointer parameter */
1493 func_vt = sym->type;
1494 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1495 if (mode == x86_64_mode_memory) {
1496 push_arg_reg(reg_param_index);
1497 func_vc = loc;
1498 reg_param_index++;
1500 /* define parameters */
1501 while ((sym = sym->next) != NULL) {
1502 type = &sym->type;
1503 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1504 switch (mode) {
1505 case x86_64_mode_sse:
1506 if (tcc_state->nosse)
1507 tcc_error("SSE disabled but floating point arguments used");
1508 if (sse_param_index + reg_count <= 8) {
1509 /* save arguments passed by register */
1510 loc -= reg_count * 8;
1511 param_addr = loc;
1512 for (i = 0; i < reg_count; ++i) {
1513 o(0xd60f66); /* movq */
1514 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1515 ++sse_param_index;
1517 } else {
1518 addr = (addr + align - 1) & -align;
1519 param_addr = addr;
1520 addr += size;
1522 break;
1524 case x86_64_mode_memory:
1525 case x86_64_mode_x87:
1526 addr = (addr + align - 1) & -align;
1527 param_addr = addr;
1528 addr += size;
1529 break;
1531 case x86_64_mode_integer: {
1532 if (reg_param_index + reg_count <= REGN) {
1533 /* save arguments passed by register */
1534 loc -= reg_count * 8;
1535 param_addr = loc;
1536 for (i = 0; i < reg_count; ++i) {
1537 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1538 ++reg_param_index;
1540 } else {
1541 addr = (addr + align - 1) & -align;
1542 param_addr = addr;
1543 addr += size;
1545 break;
1547 default: break; /* nothing to be done for x86_64_mode_none */
1549 sym_push(sym->v & ~SYM_FIELD, type,
1550 VT_LOCAL | VT_LVAL, param_addr);
1553 #ifdef CONFIG_TCC_BCHECK
1554 /* leave some room for bound checking code */
1555 if (tcc_state->do_bounds_check) {
1556 func_bound_offset = lbounds_section->data_offset;
1557 func_bound_ind = ind;
1558 oad(0xb8, 0); /* lbound section pointer */
1559 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1560 oad(0xb8, 0); /* call to function */
1562 #endif
1565 /* generate function epilog */
1566 void gfunc_epilog(void)
1568 int v, saved_ind;
1570 #ifdef CONFIG_TCC_BCHECK
1571 if (tcc_state->do_bounds_check
1572 && func_bound_offset != lbounds_section->data_offset)
1574 addr_t saved_ind;
1575 addr_t *bounds_ptr;
1576 Sym *sym_data;
1578 /* add end of table info */
1579 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
1580 *bounds_ptr = 0;
1582 /* generate bound local allocation */
1583 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
1584 func_bound_offset, lbounds_section->data_offset);
1585 saved_ind = ind;
1586 ind = func_bound_ind;
1587 greloca(cur_text_section, sym_data, ind + 1, R_X86_64_64, 0);
1588 ind = ind + 5 + 3;
1589 gen_static_call(TOK___bound_local_new);
1590 ind = saved_ind;
1592 /* generate bound check local freeing */
1593 o(0x5250); /* save returned value, if any */
1594 greloca(cur_text_section, sym_data, ind + 1, R_X86_64_64, 0);
1595 oad(0xb8, 0); /* mov xxx, %rax */
1596 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1597 gen_static_call(TOK___bound_local_delete);
1598 o(0x585a); /* restore returned value, if any */
1600 #endif
1601 o(0xc9); /* leave */
1602 if (func_ret_sub == 0) {
1603 o(0xc3); /* ret */
1604 } else {
1605 o(0xc2); /* ret n */
1606 g(func_ret_sub);
1607 g(func_ret_sub >> 8);
1609 /* align local size to word & save local variables */
1610 v = (-loc + 15) & -16;
1611 saved_ind = ind;
1612 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1613 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1614 o(0xec8148); /* sub rsp, stacksize */
1615 gen_le32(v);
1616 ind = saved_ind;
1619 #endif /* not PE */
1621 /* generate a jump to a label */
1622 int gjmp(int t)
1624 return gjmp2(0xe9, t);
1627 /* generate a jump to a fixed address */
1628 void gjmp_addr(int a)
1630 int r;
1631 r = a - ind - 2;
1632 if (r == (char)r) {
1633 g(0xeb);
1634 g(r);
1635 } else {
1636 oad(0xe9, a - ind - 5);
1640 ST_FUNC void gtst_addr(int inv, int a)
1642 int v = vtop->r & VT_VALMASK;
1643 if (v == VT_CMP) {
1644 inv ^= (vtop--)->c.i;
1645 a -= ind + 2;
1646 if (a == (char)a) {
1647 g(inv - 32);
1648 g(a);
1649 } else {
1650 g(0x0f);
1651 oad(inv - 16, a - 4);
1653 } else if ((v & ~1) == VT_JMP) {
1654 if ((v & 1) != inv) {
1655 gjmp_addr(a);
1656 gsym(vtop->c.i);
1657 } else {
1658 gsym(vtop->c.i);
1659 o(0x05eb);
1660 gjmp_addr(a);
1662 vtop--;
1666 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1667 ST_FUNC int gtst(int inv, int t)
1669 int v = vtop->r & VT_VALMASK;
1671 if (nocode_wanted) {
1673 } else if (v == VT_CMP) {
1674 /* fast case : can jump directly since flags are set */
1675 if (vtop->c.i & 0x100)
1677 /* This was a float compare. If the parity flag is set
1678 the result was unordered. For anything except != this
1679 means false and we don't jump (anding both conditions).
1680 For != this means true (oring both).
1681 Take care about inverting the test. We need to jump
1682 to our target if the result was unordered and test wasn't NE,
1683 otherwise if unordered we don't want to jump. */
1684 vtop->c.i &= ~0x100;
1685 if (inv == (vtop->c.i == TOK_NE))
1686 o(0x067a); /* jp +6 */
1687 else
1689 g(0x0f);
1690 t = gjmp2(0x8a, t); /* jp t */
1693 g(0x0f);
1694 t = gjmp2((vtop->c.i - 16) ^ inv, t);
1695 } else if (v == VT_JMP || v == VT_JMPI) {
1696 /* && or || optimization */
1697 if ((v & 1) == inv) {
1698 /* insert vtop->c jump list in t */
1699 uint32_t n1, n = vtop->c.i;
1700 if (n) {
1701 while ((n1 = read32le(cur_text_section->data + n)))
1702 n = n1;
1703 write32le(cur_text_section->data + n, t);
1704 t = vtop->c.i;
1706 } else {
1707 t = gjmp(t);
1708 gsym(vtop->c.i);
1711 vtop--;
1712 return t;
1715 /* generate an integer binary operation */
1716 void gen_opi(int op)
1718 int r, fr, opc, c;
1719 int ll, uu, cc;
1721 ll = is64_type(vtop[-1].type.t);
1722 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1723 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1725 switch(op) {
1726 case '+':
1727 case TOK_ADDC1: /* add with carry generation */
1728 opc = 0;
1729 gen_op8:
1730 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1731 /* constant case */
1732 vswap();
1733 r = gv(RC_INT);
1734 vswap();
1735 c = vtop->c.i;
1736 if (c == (char)c) {
1737 /* XXX: generate inc and dec for smaller code ? */
1738 orex(ll, r, 0, 0x83);
1739 o(0xc0 | (opc << 3) | REG_VALUE(r));
1740 g(c);
1741 } else {
1742 orex(ll, r, 0, 0x81);
1743 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1745 } else {
1746 gv2(RC_INT, RC_INT);
1747 r = vtop[-1].r;
1748 fr = vtop[0].r;
1749 orex(ll, r, fr, (opc << 3) | 0x01);
1750 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1752 vtop--;
1753 if (op >= TOK_ULT && op <= TOK_GT) {
1754 vtop->r = VT_CMP;
1755 vtop->c.i = op;
1757 break;
1758 case '-':
1759 case TOK_SUBC1: /* sub with carry generation */
1760 opc = 5;
1761 goto gen_op8;
1762 case TOK_ADDC2: /* add with carry use */
1763 opc = 2;
1764 goto gen_op8;
1765 case TOK_SUBC2: /* sub with carry use */
1766 opc = 3;
1767 goto gen_op8;
1768 case '&':
1769 opc = 4;
1770 goto gen_op8;
1771 case '^':
1772 opc = 6;
1773 goto gen_op8;
1774 case '|':
1775 opc = 1;
1776 goto gen_op8;
1777 case '*':
1778 gv2(RC_INT, RC_INT);
1779 r = vtop[-1].r;
1780 fr = vtop[0].r;
1781 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1782 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1783 vtop--;
1784 break;
1785 case TOK_SHL:
1786 opc = 4;
1787 goto gen_shift;
1788 case TOK_SHR:
1789 opc = 5;
1790 goto gen_shift;
1791 case TOK_SAR:
1792 opc = 7;
1793 gen_shift:
1794 opc = 0xc0 | (opc << 3);
1795 if (cc) {
1796 /* constant case */
1797 vswap();
1798 r = gv(RC_INT);
1799 vswap();
1800 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1801 o(opc | REG_VALUE(r));
1802 g(vtop->c.i & (ll ? 63 : 31));
1803 } else {
1804 /* we generate the shift in ecx */
1805 gv2(RC_INT, RC_RCX);
1806 r = vtop[-1].r;
1807 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1808 o(opc | REG_VALUE(r));
1810 vtop--;
1811 break;
1812 case TOK_UDIV:
1813 case TOK_UMOD:
1814 uu = 1;
1815 goto divmod;
1816 case '/':
1817 case '%':
1818 case TOK_PDIV:
1819 uu = 0;
1820 divmod:
1821 /* first operand must be in eax */
1822 /* XXX: need better constraint for second operand */
1823 gv2(RC_RAX, RC_RCX);
1824 r = vtop[-1].r;
1825 fr = vtop[0].r;
1826 vtop--;
1827 save_reg(TREG_RDX);
1828 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1829 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1830 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1831 if (op == '%' || op == TOK_UMOD)
1832 r = TREG_RDX;
1833 else
1834 r = TREG_RAX;
1835 vtop->r = r;
1836 break;
1837 default:
1838 opc = 7;
1839 goto gen_op8;
1843 void gen_opl(int op)
1845 gen_opi(op);
1848 /* generate a floating point operation 'v = t1 op t2' instruction. The
1849 two operands are guaranteed to have the same floating point type */
1850 /* XXX: need to use ST1 too */
1851 void gen_opf(int op)
1853 int a, ft, fc, swapped, r;
1854 int float_type =
1855 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1857 /* convert constants to memory references */
1858 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1859 vswap();
1860 gv(float_type);
1861 vswap();
1863 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1864 gv(float_type);
1866 /* must put at least one value in the floating point register */
1867 if ((vtop[-1].r & VT_LVAL) &&
1868 (vtop[0].r & VT_LVAL)) {
1869 vswap();
1870 gv(float_type);
1871 vswap();
1873 swapped = 0;
1874 /* swap the stack if needed so that t1 is the register and t2 is
1875 the memory reference */
1876 if (vtop[-1].r & VT_LVAL) {
1877 vswap();
1878 swapped = 1;
1880 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1881 if (op >= TOK_ULT && op <= TOK_GT) {
1882 /* load on stack second operand */
1883 load(TREG_ST0, vtop);
1884 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1885 if (op == TOK_GE || op == TOK_GT)
1886 swapped = !swapped;
1887 else if (op == TOK_EQ || op == TOK_NE)
1888 swapped = 0;
1889 if (swapped)
1890 o(0xc9d9); /* fxch %st(1) */
1891 if (op == TOK_EQ || op == TOK_NE)
1892 o(0xe9da); /* fucompp */
1893 else
1894 o(0xd9de); /* fcompp */
1895 o(0xe0df); /* fnstsw %ax */
1896 if (op == TOK_EQ) {
1897 o(0x45e480); /* and $0x45, %ah */
1898 o(0x40fC80); /* cmp $0x40, %ah */
1899 } else if (op == TOK_NE) {
1900 o(0x45e480); /* and $0x45, %ah */
1901 o(0x40f480); /* xor $0x40, %ah */
1902 op = TOK_NE;
1903 } else if (op == TOK_GE || op == TOK_LE) {
1904 o(0x05c4f6); /* test $0x05, %ah */
1905 op = TOK_EQ;
1906 } else {
1907 o(0x45c4f6); /* test $0x45, %ah */
1908 op = TOK_EQ;
1910 vtop--;
1911 vtop->r = VT_CMP;
1912 vtop->c.i = op;
1913 } else {
1914 /* no memory reference possible for long double operations */
1915 load(TREG_ST0, vtop);
1916 swapped = !swapped;
1918 switch(op) {
1919 default:
1920 case '+':
1921 a = 0;
1922 break;
1923 case '-':
1924 a = 4;
1925 if (swapped)
1926 a++;
1927 break;
1928 case '*':
1929 a = 1;
1930 break;
1931 case '/':
1932 a = 6;
1933 if (swapped)
1934 a++;
1935 break;
1937 ft = vtop->type.t;
1938 fc = vtop->c.i;
1939 o(0xde); /* fxxxp %st, %st(1) */
1940 o(0xc1 + (a << 3));
1941 vtop--;
1943 } else {
1944 if (op >= TOK_ULT && op <= TOK_GT) {
1945 /* if saved lvalue, then we must reload it */
1946 r = vtop->r;
1947 fc = vtop->c.i;
1948 if ((r & VT_VALMASK) == VT_LLOCAL) {
1949 SValue v1;
1950 r = get_reg(RC_INT);
1951 v1.type.t = VT_PTR;
1952 v1.r = VT_LOCAL | VT_LVAL;
1953 v1.c.i = fc;
1954 load(r, &v1);
1955 fc = 0;
1958 if (op == TOK_EQ || op == TOK_NE) {
1959 swapped = 0;
1960 } else {
1961 if (op == TOK_LE || op == TOK_LT)
1962 swapped = !swapped;
1963 if (op == TOK_LE || op == TOK_GE) {
1964 op = 0x93; /* setae */
1965 } else {
1966 op = 0x97; /* seta */
1970 if (swapped) {
1971 gv(RC_FLOAT);
1972 vswap();
1974 assert(!(vtop[-1].r & VT_LVAL));
1976 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1977 o(0x66);
1978 if (op == TOK_EQ || op == TOK_NE)
1979 o(0x2e0f); /* ucomisd */
1980 else
1981 o(0x2f0f); /* comisd */
1983 if (vtop->r & VT_LVAL) {
1984 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1985 } else {
1986 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1989 vtop--;
1990 vtop->r = VT_CMP;
1991 vtop->c.i = op | 0x100;
1992 } else {
1993 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1994 switch(op) {
1995 default:
1996 case '+':
1997 a = 0;
1998 break;
1999 case '-':
2000 a = 4;
2001 break;
2002 case '*':
2003 a = 1;
2004 break;
2005 case '/':
2006 a = 6;
2007 break;
2009 ft = vtop->type.t;
2010 fc = vtop->c.i;
2011 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2013 r = vtop->r;
2014 /* if saved lvalue, then we must reload it */
2015 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2016 SValue v1;
2017 r = get_reg(RC_INT);
2018 v1.type.t = VT_PTR;
2019 v1.r = VT_LOCAL | VT_LVAL;
2020 v1.c.i = fc;
2021 load(r, &v1);
2022 fc = 0;
2025 assert(!(vtop[-1].r & VT_LVAL));
2026 if (swapped) {
2027 assert(vtop->r & VT_LVAL);
2028 gv(RC_FLOAT);
2029 vswap();
2032 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2033 o(0xf2);
2034 } else {
2035 o(0xf3);
2037 o(0x0f);
2038 o(0x58 + a);
2040 if (vtop->r & VT_LVAL) {
2041 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2042 } else {
2043 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2046 vtop--;
2051 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2052 and 'long long' cases. */
2053 void gen_cvt_itof(int t)
2055 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2056 save_reg(TREG_ST0);
2057 gv(RC_INT);
2058 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2059 /* signed long long to float/double/long double (unsigned case
2060 is handled generically) */
2061 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2062 o(0x242cdf); /* fildll (%rsp) */
2063 o(0x08c48348); /* add $8, %rsp */
2064 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2065 (VT_INT | VT_UNSIGNED)) {
2066 /* unsigned int to float/double/long double */
2067 o(0x6a); /* push $0 */
2068 g(0x00);
2069 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2070 o(0x242cdf); /* fildll (%rsp) */
2071 o(0x10c48348); /* add $16, %rsp */
2072 } else {
2073 /* int to float/double/long double */
2074 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2075 o(0x2404db); /* fildl (%rsp) */
2076 o(0x08c48348); /* add $8, %rsp */
2078 vtop->r = TREG_ST0;
2079 } else {
2080 int r = get_reg(RC_FLOAT);
2081 gv(RC_INT);
2082 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2083 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2084 (VT_INT | VT_UNSIGNED) ||
2085 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2086 o(0x48); /* REX */
2088 o(0x2a0f);
2089 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2090 vtop->r = r;
2094 /* convert from one floating point type to another */
2095 void gen_cvt_ftof(int t)
2097 int ft, bt, tbt;
2099 ft = vtop->type.t;
2100 bt = ft & VT_BTYPE;
2101 tbt = t & VT_BTYPE;
2103 if (bt == VT_FLOAT) {
2104 gv(RC_FLOAT);
2105 if (tbt == VT_DOUBLE) {
2106 o(0x140f); /* unpcklps */
2107 o(0xc0 + REG_VALUE(vtop->r)*9);
2108 o(0x5a0f); /* cvtps2pd */
2109 o(0xc0 + REG_VALUE(vtop->r)*9);
2110 } else if (tbt == VT_LDOUBLE) {
2111 save_reg(RC_ST0);
2112 /* movss %xmm0,-0x10(%rsp) */
2113 o(0x110ff3);
2114 o(0x44 + REG_VALUE(vtop->r)*8);
2115 o(0xf024);
2116 o(0xf02444d9); /* flds -0x10(%rsp) */
2117 vtop->r = TREG_ST0;
2119 } else if (bt == VT_DOUBLE) {
2120 gv(RC_FLOAT);
2121 if (tbt == VT_FLOAT) {
2122 o(0x140f66); /* unpcklpd */
2123 o(0xc0 + REG_VALUE(vtop->r)*9);
2124 o(0x5a0f66); /* cvtpd2ps */
2125 o(0xc0 + REG_VALUE(vtop->r)*9);
2126 } else if (tbt == VT_LDOUBLE) {
2127 save_reg(RC_ST0);
2128 /* movsd %xmm0,-0x10(%rsp) */
2129 o(0x110ff2);
2130 o(0x44 + REG_VALUE(vtop->r)*8);
2131 o(0xf024);
2132 o(0xf02444dd); /* fldl -0x10(%rsp) */
2133 vtop->r = TREG_ST0;
2135 } else {
2136 int r;
2137 gv(RC_ST0);
2138 r = get_reg(RC_FLOAT);
2139 if (tbt == VT_DOUBLE) {
2140 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2141 /* movsd -0x10(%rsp),%xmm0 */
2142 o(0x100ff2);
2143 o(0x44 + REG_VALUE(r)*8);
2144 o(0xf024);
2145 vtop->r = r;
2146 } else if (tbt == VT_FLOAT) {
2147 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2148 /* movss -0x10(%rsp),%xmm0 */
2149 o(0x100ff3);
2150 o(0x44 + REG_VALUE(r)*8);
2151 o(0xf024);
2152 vtop->r = r;
2157 /* convert fp to int 't' type */
2158 void gen_cvt_ftoi(int t)
2160 int ft, bt, size, r;
2161 ft = vtop->type.t;
2162 bt = ft & VT_BTYPE;
2163 if (bt == VT_LDOUBLE) {
2164 gen_cvt_ftof(VT_DOUBLE);
2165 bt = VT_DOUBLE;
2168 gv(RC_FLOAT);
2169 if (t != VT_INT)
2170 size = 8;
2171 else
2172 size = 4;
2174 r = get_reg(RC_INT);
2175 if (bt == VT_FLOAT) {
2176 o(0xf3);
2177 } else if (bt == VT_DOUBLE) {
2178 o(0xf2);
2179 } else {
2180 assert(0);
2182 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2183 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2184 vtop->r = r;
2187 /* computed goto support */
2188 void ggoto(void)
2190 gcall_or_jmp(1);
2191 vtop--;
2194 /* Save the stack pointer onto the stack and return the location of its address */
2195 ST_FUNC void gen_vla_sp_save(int addr) {
2196 /* mov %rsp,addr(%rbp)*/
2197 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2200 /* Restore the SP from a location on the stack */
2201 ST_FUNC void gen_vla_sp_restore(int addr) {
2202 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2205 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2206 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2207 #ifdef TCC_TARGET_PE
2208 /* alloca does more than just adjust %rsp on Windows */
2209 vpush_global_sym(&func_old_type, TOK_alloca);
2210 vswap(); /* Move alloca ref past allocation size */
2211 gfunc_call(1);
2212 #else
2213 int r;
2214 r = gv(RC_INT); /* allocation size */
2215 /* sub r,%rsp */
2216 o(0x2b48);
2217 o(0xe0 | REG_VALUE(r));
2218 /* We align to 16 bytes rather than align */
2219 /* and ~15, %rsp */
2220 o(0xf0e48348);
2221 vpop();
2222 #endif
2226 /* end of x86-64 code generator */
2227 /*************************************************************/
2228 #endif /* ! TARGET_DEFS_ONLY */
2229 /******************************************************/