Fix wrong name for 85 test.
[tinycc.git] / x86_64-gen.c
blob9f19e78aa0c1230310870a774d61c48b6a578af6
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
34 #define RC_RAX 0x0004
35 #define RC_RCX 0x0008
36 #define RC_RDX 0x0010
37 #define RC_ST0 0x0080 /* only for long double */
38 #define RC_R8 0x0100
39 #define RC_R9 0x0200
40 #define RC_R10 0x0400
41 #define RC_R11 0x0800
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
56 enum {
57 TREG_RAX = 0,
58 TREG_RCX = 1,
59 TREG_RDX = 2,
60 TREG_RSP = 4,
61 TREG_RSI = 6,
62 TREG_RDI = 7,
64 TREG_R8 = 8,
65 TREG_R9 = 9,
66 TREG_R10 = 10,
67 TREG_R11 = 11,
69 TREG_XMM0 = 16,
70 TREG_XMM1 = 17,
71 TREG_XMM2 = 18,
72 TREG_XMM3 = 19,
73 TREG_XMM4 = 20,
74 TREG_XMM5 = 21,
75 TREG_XMM6 = 22,
76 TREG_XMM7 = 23,
78 TREG_ST0 = 24,
80 TREG_MEM = 0x20
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
96 #define PTR_SIZE 8
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
102 #define MAX_ALIGN 16
104 /******************************************************/
105 #else /* ! TARGET_DEFS_ONLY */
106 /******************************************************/
107 #include "tcc.h"
108 #include <assert.h>
110 ST_DATA const int reg_classes[NB_REGS] = {
111 /* eax */ RC_INT | RC_RAX,
112 /* ecx */ RC_INT | RC_RCX,
113 /* edx */ RC_INT | RC_RDX,
119 RC_R8,
120 RC_R9,
121 RC_R10,
122 RC_R11,
127 /* xmm0 */ RC_FLOAT | RC_XMM0,
128 /* xmm1 */ RC_FLOAT | RC_XMM1,
129 /* xmm2 */ RC_FLOAT | RC_XMM2,
130 /* xmm3 */ RC_FLOAT | RC_XMM3,
131 /* xmm4 */ RC_FLOAT | RC_XMM4,
132 /* xmm5 */ RC_FLOAT | RC_XMM5,
133 /* xmm6 an xmm7 are included so gv() can be used on them,
134 but they are not tagged with RC_FLOAT because they are
135 callee saved on Windows */
136 RC_XMM6,
137 RC_XMM7,
138 /* st0 */ RC_ST0
141 static unsigned long func_sub_sp_offset;
142 static int func_ret_sub;
144 /* XXX: make it faster ? */
145 ST_FUNC void g(int c)
147 int ind1;
148 if (nocode_wanted)
149 return;
150 ind1 = ind + 1;
151 if (ind1 > cur_text_section->data_allocated)
152 section_realloc(cur_text_section, ind1);
153 cur_text_section->data[ind] = c;
154 ind = ind1;
157 ST_FUNC void o(unsigned int c)
159 while (c) {
160 g(c);
161 c = c >> 8;
165 ST_FUNC void gen_le16(int v)
167 g(v);
168 g(v >> 8);
171 ST_FUNC void gen_le32(int c)
173 g(c);
174 g(c >> 8);
175 g(c >> 16);
176 g(c >> 24);
179 ST_FUNC void gen_le64(int64_t c)
181 g(c);
182 g(c >> 8);
183 g(c >> 16);
184 g(c >> 24);
185 g(c >> 32);
186 g(c >> 40);
187 g(c >> 48);
188 g(c >> 56);
191 static void orex(int ll, int r, int r2, int b)
193 if ((r & VT_VALMASK) >= VT_CONST)
194 r = 0;
195 if ((r2 & VT_VALMASK) >= VT_CONST)
196 r2 = 0;
197 if (ll || REX_BASE(r) || REX_BASE(r2))
198 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
199 o(b);
202 /* output a symbol and patch all calls to it */
203 ST_FUNC void gsym_addr(int t, int a)
205 while (t) {
206 unsigned char *ptr = cur_text_section->data + t;
207 uint32_t n = read32le(ptr); /* next value */
208 write32le(ptr, a - t - 4);
209 t = n;
213 void gsym(int t)
215 gsym_addr(t, ind);
219 static int is64_type(int t)
221 return ((t & VT_BTYPE) == VT_PTR ||
222 (t & VT_BTYPE) == VT_FUNC ||
223 (t & VT_BTYPE) == VT_LLONG);
226 /* instruction + 4 bytes data. Return the address of the data */
227 static int oad(int c, int s)
229 int t;
230 if (nocode_wanted)
231 return s;
232 o(c);
233 t = ind;
234 gen_le32(s);
235 return t;
238 /* generate jmp to a label */
239 #define gjmp2(instr,lbl) oad(instr,lbl)
241 ST_FUNC void gen_addr32(int r, Sym *sym, long c)
243 if (r & VT_SYM)
244 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
245 gen_le32(c);
248 /* output constant with relocation if 'r & VT_SYM' is true */
249 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
251 if (r & VT_SYM)
252 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
253 gen_le64(c);
256 /* output constant with relocation if 'r & VT_SYM' is true */
257 ST_FUNC void gen_addrpc32(int r, Sym *sym, long c)
259 if (r & VT_SYM)
260 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
261 gen_le32(c-4);
264 /* output got address with relocation */
265 static void gen_gotpcrel(int r, Sym *sym, int c)
267 #ifndef TCC_TARGET_PE
268 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
269 #else
270 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
271 get_tok_str(sym->v, NULL), c, r,
272 cur_text_section->data[ind-3],
273 cur_text_section->data[ind-2],
274 cur_text_section->data[ind-1]
276 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
277 #endif
278 gen_le32(0);
279 if (c) {
280 /* we use add c, %xxx for displacement */
281 orex(1, r, 0, 0x81);
282 o(0xc0 + REG_VALUE(r));
283 gen_le32(c);
287 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
289 op_reg = REG_VALUE(op_reg) << 3;
290 if ((r & VT_VALMASK) == VT_CONST) {
291 /* constant memory reference */
292 o(0x05 | op_reg);
293 if (is_got) {
294 gen_gotpcrel(r, sym, c);
295 } else {
296 gen_addrpc32(r, sym, c);
298 } else if ((r & VT_VALMASK) == VT_LOCAL) {
299 /* currently, we use only ebp as base */
300 if (c == (char)c) {
301 /* short reference */
302 o(0x45 | op_reg);
303 g(c);
304 } else {
305 oad(0x85 | op_reg, c);
307 } else if ((r & VT_VALMASK) >= TREG_MEM) {
308 if (c) {
309 g(0x80 | op_reg | REG_VALUE(r));
310 gen_le32(c);
311 } else {
312 g(0x00 | op_reg | REG_VALUE(r));
314 } else {
315 g(0x00 | op_reg | REG_VALUE(r));
319 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
320 opcode bits */
321 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
323 gen_modrm_impl(op_reg, r, sym, c, 0);
326 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
327 opcode bits */
328 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
330 int is_got;
331 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
332 orex(1, r, op_reg, opcode);
333 gen_modrm_impl(op_reg, r, sym, c, is_got);
337 /* load 'r' from value 'sv' */
338 void load(int r, SValue *sv)
340 int v, t, ft, fc, fr;
341 SValue v1;
343 #ifdef TCC_TARGET_PE
344 SValue v2;
345 sv = pe_getimport(sv, &v2);
346 #endif
348 fr = sv->r;
349 ft = sv->type.t & ~VT_DEFSIGN;
350 fc = sv->c.i;
351 if (fc != sv->c.i && (fr & VT_SYM))
352 tcc_error("64 bit addend in load");
354 ft &= ~(VT_VOLATILE | VT_CONSTANT);
356 #ifndef TCC_TARGET_PE
357 /* we use indirect access via got */
358 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
359 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
360 /* use the result register as a temporal register */
361 int tr = r | TREG_MEM;
362 if (is_float(ft)) {
363 /* we cannot use float registers as a temporal register */
364 tr = get_reg(RC_INT) | TREG_MEM;
366 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
368 /* load from the temporal register */
369 fr = tr | VT_LVAL;
371 #endif
373 v = fr & VT_VALMASK;
374 if (fr & VT_LVAL) {
375 int b, ll;
376 if (v == VT_LLOCAL) {
377 v1.type.t = VT_PTR;
378 v1.r = VT_LOCAL | VT_LVAL;
379 v1.c.i = fc;
380 fr = r;
381 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
382 fr = get_reg(RC_INT);
383 load(fr, &v1);
385 ll = 0;
386 /* Like GCC we can load from small enough properly sized
387 structs and unions as well.
388 XXX maybe move to generic operand handling, but should
389 occur only with asm, so tccasm.c might also be a better place */
390 if ((ft & VT_BTYPE) == VT_STRUCT) {
391 int align;
392 switch (type_size(&sv->type, &align)) {
393 case 1: ft = VT_BYTE; break;
394 case 2: ft = VT_SHORT; break;
395 case 4: ft = VT_INT; break;
396 case 8: ft = VT_LLONG; break;
397 default:
398 tcc_error("invalid aggregate type for register load");
399 break;
402 if ((ft & VT_BTYPE) == VT_FLOAT) {
403 b = 0x6e0f66;
404 r = REG_VALUE(r); /* movd */
405 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
406 b = 0x7e0ff3; /* movq */
407 r = REG_VALUE(r);
408 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
409 b = 0xdb, r = 5; /* fldt */
410 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
411 b = 0xbe0f; /* movsbl */
412 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
413 b = 0xb60f; /* movzbl */
414 } else if ((ft & VT_TYPE) == VT_SHORT) {
415 b = 0xbf0f; /* movswl */
416 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
417 b = 0xb70f; /* movzwl */
418 } else {
419 assert(((ft & VT_BTYPE) == VT_INT) || ((ft & VT_BTYPE) == VT_LLONG)
420 || ((ft & VT_BTYPE) == VT_PTR) || ((ft & VT_BTYPE) == VT_ENUM)
421 || ((ft & VT_BTYPE) == VT_FUNC));
422 ll = is64_type(ft);
423 b = 0x8b;
425 if (ll) {
426 gen_modrm64(b, r, fr, sv->sym, fc);
427 } else {
428 orex(ll, fr, r, b);
429 gen_modrm(r, fr, sv->sym, fc);
431 } else {
432 if (v == VT_CONST) {
433 if (fr & VT_SYM) {
434 #ifdef TCC_TARGET_PE
435 orex(1,0,r,0x8d);
436 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
437 gen_addrpc32(fr, sv->sym, fc);
438 #else
439 if (sv->sym->type.t & VT_STATIC) {
440 orex(1,0,r,0x8d);
441 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
442 gen_addrpc32(fr, sv->sym, fc);
443 } else {
444 orex(1,0,r,0x8b);
445 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
446 gen_gotpcrel(r, sv->sym, fc);
448 #endif
449 } else if (is64_type(ft)) {
450 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
451 gen_le64(sv->c.i);
452 } else {
453 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
454 gen_le32(fc);
456 } else if (v == VT_LOCAL) {
457 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
458 gen_modrm(r, VT_LOCAL, sv->sym, fc);
459 } else if (v == VT_CMP) {
460 orex(0,r,0,0);
461 if ((fc & ~0x100) != TOK_NE)
462 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
463 else
464 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
465 if (fc & 0x100)
467 /* This was a float compare. If the parity bit is
468 set the result was unordered, meaning false for everything
469 except TOK_NE, and true for TOK_NE. */
470 fc &= ~0x100;
471 o(0x037a + (REX_BASE(r) << 8));
473 orex(0,r,0, 0x0f); /* setxx %br */
474 o(fc);
475 o(0xc0 + REG_VALUE(r));
476 } else if (v == VT_JMP || v == VT_JMPI) {
477 t = v & 1;
478 orex(0,r,0,0);
479 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
480 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
481 gsym(fc);
482 orex(0,r,0,0);
483 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
484 } else if (v != r) {
485 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
486 if (v == TREG_ST0) {
487 /* gen_cvt_ftof(VT_DOUBLE); */
488 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
489 /* movsd -0x10(%rsp),%xmmN */
490 o(0x100ff2);
491 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
492 o(0xf024);
493 } else {
494 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
495 if ((ft & VT_BTYPE) == VT_FLOAT) {
496 o(0x100ff3);
497 } else {
498 assert((ft & VT_BTYPE) == VT_DOUBLE);
499 o(0x100ff2);
501 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
503 } else if (r == TREG_ST0) {
504 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
505 /* gen_cvt_ftof(VT_LDOUBLE); */
506 /* movsd %xmmN,-0x10(%rsp) */
507 o(0x110ff2);
508 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
509 o(0xf024);
510 o(0xf02444dd); /* fldl -0x10(%rsp) */
511 } else {
512 orex(1,r,v, 0x89);
513 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
519 /* store register 'r' in lvalue 'v' */
520 void store(int r, SValue *v)
522 int fr, bt, ft, fc;
523 int op64 = 0;
524 /* store the REX prefix in this variable when PIC is enabled */
525 int pic = 0;
527 #ifdef TCC_TARGET_PE
528 SValue v2;
529 v = pe_getimport(v, &v2);
530 #endif
532 fr = v->r & VT_VALMASK;
533 ft = v->type.t;
534 fc = v->c.i;
535 if (fc != v->c.i && (fr & VT_SYM))
536 tcc_error("64 bit addend in store");
537 ft &= ~(VT_VOLATILE | VT_CONSTANT);
538 bt = ft & VT_BTYPE;
540 #ifndef TCC_TARGET_PE
541 /* we need to access the variable via got */
542 if (fr == VT_CONST && (v->r & VT_SYM)) {
543 /* mov xx(%rip), %r11 */
544 o(0x1d8b4c);
545 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
546 pic = is64_type(bt) ? 0x49 : 0x41;
548 #endif
550 /* XXX: incorrect if float reg to reg */
551 if (bt == VT_FLOAT) {
552 o(0x66);
553 o(pic);
554 o(0x7e0f); /* movd */
555 r = REG_VALUE(r);
556 } else if (bt == VT_DOUBLE) {
557 o(0x66);
558 o(pic);
559 o(0xd60f); /* movq */
560 r = REG_VALUE(r);
561 } else if (bt == VT_LDOUBLE) {
562 o(0xc0d9); /* fld %st(0) */
563 o(pic);
564 o(0xdb); /* fstpt */
565 r = 7;
566 } else {
567 if (bt == VT_SHORT)
568 o(0x66);
569 o(pic);
570 if (bt == VT_BYTE || bt == VT_BOOL)
571 orex(0, 0, r, 0x88);
572 else if (is64_type(bt))
573 op64 = 0x89;
574 else
575 orex(0, 0, r, 0x89);
577 if (pic) {
578 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
579 if (op64)
580 o(op64);
581 o(3 + (r << 3));
582 } else if (op64) {
583 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
584 gen_modrm64(op64, r, v->r, v->sym, fc);
585 } else if (fr != r) {
586 /* XXX: don't we really come here? */
587 abort();
588 o(0xc0 + fr + r * 8); /* mov r, fr */
590 } else {
591 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
592 gen_modrm(r, v->r, v->sym, fc);
593 } else if (fr != r) {
594 /* XXX: don't we really come here? */
595 abort();
596 o(0xc0 + fr + r * 8); /* mov r, fr */
601 /* 'is_jmp' is '1' if it is a jump */
602 static void gcall_or_jmp(int is_jmp)
604 int r;
605 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
606 ((vtop->r & VT_SYM) || (vtop->c.i-4) == (int)(vtop->c.i-4))) {
607 /* constant case */
608 if (vtop->r & VT_SYM) {
609 /* relocation case */
610 #ifdef TCC_TARGET_PE
611 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
612 #else
613 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
614 #endif
615 } else {
616 /* put an empty PC32 relocation */
617 put_elf_reloca(symtab_section, cur_text_section,
618 ind + 1, R_X86_64_PC32, 0, (int)(vtop->c.i-4));
620 oad(0xe8 + is_jmp, 0); /* call/jmp im */
621 } else {
622 /* otherwise, indirect call */
623 r = TREG_R11;
624 load(r, vtop);
625 o(0x41); /* REX */
626 o(0xff); /* call/jmp *r */
627 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
631 #if defined(CONFIG_TCC_BCHECK)
632 #ifndef TCC_TARGET_PE
633 static addr_t func_bound_offset;
634 static unsigned long func_bound_ind;
635 #endif
637 static void gen_static_call(int v)
639 Sym *sym = external_global_sym(v, &func_old_type, 0);
640 oad(0xe8, 0);
641 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
644 /* generate a bounded pointer addition */
645 ST_FUNC void gen_bounded_ptr_add(void)
647 /* save all temporary registers */
648 save_regs(0);
650 /* prepare fast x86_64 function call */
651 gv(RC_RAX);
652 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
653 vtop--;
655 gv(RC_RAX);
656 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
657 vtop--;
659 /* do a fast function call */
660 gen_static_call(TOK___bound_ptr_add);
662 /* returned pointer is in rax */
663 vtop++;
664 vtop->r = TREG_RAX | VT_BOUNDED;
667 /* relocation offset of the bounding function call point */
668 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
671 /* patch pointer addition in vtop so that pointer dereferencing is
672 also tested */
673 ST_FUNC void gen_bounded_ptr_deref(void)
675 addr_t func;
676 int size, align;
677 ElfW(Rela) *rel;
678 Sym *sym;
680 size = 0;
681 /* XXX: put that code in generic part of tcc */
682 if (!is_float(vtop->type.t)) {
683 if (vtop->r & VT_LVAL_BYTE)
684 size = 1;
685 else if (vtop->r & VT_LVAL_SHORT)
686 size = 2;
688 if (!size)
689 size = type_size(&vtop->type, &align);
690 switch(size) {
691 case 1: func = TOK___bound_ptr_indir1; break;
692 case 2: func = TOK___bound_ptr_indir2; break;
693 case 4: func = TOK___bound_ptr_indir4; break;
694 case 8: func = TOK___bound_ptr_indir8; break;
695 case 12: func = TOK___bound_ptr_indir12; break;
696 case 16: func = TOK___bound_ptr_indir16; break;
697 default:
698 tcc_error("unhandled size when dereferencing bounded pointer");
699 func = 0;
700 break;
703 sym = external_global_sym(func, &func_old_type, 0);
704 if (!sym->c)
705 put_extern_sym(sym, NULL, 0, 0);
707 /* patch relocation */
708 /* XXX: find a better solution ? */
710 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
711 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
713 #endif
715 #ifdef TCC_TARGET_PE
717 #define REGN 4
718 static const uint8_t arg_regs[REGN] = {
719 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
722 /* Prepare arguments in R10 and R11 rather than RCX and RDX
723 because gv() will not ever use these */
724 static int arg_prepare_reg(int idx) {
725 if (idx == 0 || idx == 1)
726 /* idx=0: r10, idx=1: r11 */
727 return idx + 10;
728 else
729 return arg_regs[idx];
732 static int func_scratch;
734 /* Generate function call. The function address is pushed first, then
735 all the parameters in call order. This functions pops all the
736 parameters and the function address. */
738 void gen_offs_sp(int b, int r, int d)
740 orex(1,0,r & 0x100 ? 0 : r, b);
741 if (d == (char)d) {
742 o(0x2444 | (REG_VALUE(r) << 3));
743 g(d);
744 } else {
745 o(0x2484 | (REG_VALUE(r) << 3));
746 gen_le32(d);
750 /* Return the number of registers needed to return the struct, or 0 if
751 returning via struct pointer. */
752 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
754 int size, align;
755 *ret_align = 1; // Never have to re-align return values for x86-64
756 *regsize = 8;
757 size = type_size(vt, &align);
758 if (size > 8 || (size & (size - 1)))
759 return 0;
760 if (size == 8)
761 ret->t = VT_LLONG;
762 else if (size == 4)
763 ret->t = VT_INT;
764 else if (size == 2)
765 ret->t = VT_SHORT;
766 else
767 ret->t = VT_BYTE;
768 ret->ref = NULL;
769 return 1;
772 static int is_sse_float(int t) {
773 int bt;
774 bt = t & VT_BTYPE;
775 return bt == VT_DOUBLE || bt == VT_FLOAT;
778 int gfunc_arg_size(CType *type) {
779 int align;
780 if (type->t & (VT_ARRAY|VT_BITFIELD))
781 return 8;
782 return type_size(type, &align);
785 void gfunc_call(int nb_args)
787 int size, r, args_size, i, d, bt, struct_size;
788 int arg;
790 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
791 arg = nb_args;
793 /* for struct arguments, we need to call memcpy and the function
794 call breaks register passing arguments we are preparing.
795 So, we process arguments which will be passed by stack first. */
796 struct_size = args_size;
797 for(i = 0; i < nb_args; i++) {
798 SValue *sv;
800 --arg;
801 sv = &vtop[-i];
802 bt = (sv->type.t & VT_BTYPE);
803 size = gfunc_arg_size(&sv->type);
805 if (size <= 8)
806 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
808 if (bt == VT_STRUCT) {
809 /* align to stack align size */
810 size = (size + 15) & ~15;
811 /* generate structure store */
812 r = get_reg(RC_INT);
813 gen_offs_sp(0x8d, r, struct_size);
814 struct_size += size;
816 /* generate memcpy call */
817 vset(&sv->type, r | VT_LVAL, 0);
818 vpushv(sv);
819 vstore();
820 --vtop;
821 } else if (bt == VT_LDOUBLE) {
822 gv(RC_ST0);
823 gen_offs_sp(0xdb, 0x107, struct_size);
824 struct_size += 16;
828 if (func_scratch < struct_size)
829 func_scratch = struct_size;
831 arg = nb_args;
832 struct_size = args_size;
834 for(i = 0; i < nb_args; i++) {
835 --arg;
836 bt = (vtop->type.t & VT_BTYPE);
838 size = gfunc_arg_size(&vtop->type);
839 if (size > 8) {
840 /* align to stack align size */
841 size = (size + 15) & ~15;
842 if (arg >= REGN) {
843 d = get_reg(RC_INT);
844 gen_offs_sp(0x8d, d, struct_size);
845 gen_offs_sp(0x89, d, arg*8);
846 } else {
847 d = arg_prepare_reg(arg);
848 gen_offs_sp(0x8d, d, struct_size);
850 struct_size += size;
851 } else {
852 if (is_sse_float(vtop->type.t)) {
853 if (tcc_state->nosse)
854 tcc_error("SSE disabled");
855 gv(RC_XMM0); /* only use one float register */
856 if (arg >= REGN) {
857 /* movq %xmm0, j*8(%rsp) */
858 gen_offs_sp(0xd60f66, 0x100, arg*8);
859 } else {
860 /* movaps %xmm0, %xmmN */
861 o(0x280f);
862 o(0xc0 + (arg << 3));
863 d = arg_prepare_reg(arg);
864 /* mov %xmm0, %rxx */
865 o(0x66);
866 orex(1,d,0, 0x7e0f);
867 o(0xc0 + REG_VALUE(d));
869 } else {
870 if (bt == VT_STRUCT) {
871 vtop->type.ref = NULL;
872 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
873 : size > 1 ? VT_SHORT : VT_BYTE;
876 r = gv(RC_INT);
877 if (arg >= REGN) {
878 gen_offs_sp(0x89, r, arg*8);
879 } else {
880 d = arg_prepare_reg(arg);
881 orex(1,d,r,0x89); /* mov */
882 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
886 vtop--;
888 save_regs(0);
890 /* Copy R10 and R11 into RCX and RDX, respectively */
891 if (nb_args > 0) {
892 o(0xd1894c); /* mov %r10, %rcx */
893 if (nb_args > 1) {
894 o(0xda894c); /* mov %r11, %rdx */
898 gcall_or_jmp(0);
899 /* other compilers don't clear the upper bits when returning char/short */
900 bt = vtop->type.ref->type.t & (VT_BTYPE | VT_UNSIGNED);
901 if (bt == (VT_BYTE | VT_UNSIGNED))
902 o(0xc0b60f); /* movzbl %al, %eax */
903 else if (bt == VT_BYTE)
904 o(0xc0be0f); /* movsbl %al, %eax */
905 else if (bt == VT_SHORT)
906 o(0x98); /* cwtl */
907 else if (bt == (VT_SHORT | VT_UNSIGNED))
908 o(0xc0b70f); /* movzbl %al, %eax */
909 #if 0 /* handled in gen_cast() */
910 else if (bt == VT_INT)
911 o(0x9848); /* cltq */
912 else if (bt == (VT_INT | VT_UNSIGNED))
913 o(0xc089); /* mov %eax,%eax */
914 #endif
915 vtop--;
919 #define FUNC_PROLOG_SIZE 11
921 /* generate function prolog of type 't' */
922 void gfunc_prolog(CType *func_type)
924 int addr, reg_param_index, bt, size;
925 Sym *sym;
926 CType *type;
928 func_ret_sub = 0;
929 func_scratch = 0;
930 loc = 0;
932 addr = PTR_SIZE * 2;
933 ind += FUNC_PROLOG_SIZE;
934 func_sub_sp_offset = ind;
935 reg_param_index = 0;
937 sym = func_type->ref;
939 /* if the function returns a structure, then add an
940 implicit pointer parameter */
941 func_vt = sym->type;
942 func_var = (sym->c == FUNC_ELLIPSIS);
943 size = gfunc_arg_size(&func_vt);
944 if (size > 8) {
945 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
946 func_vc = addr;
947 reg_param_index++;
948 addr += 8;
951 /* define parameters */
952 while ((sym = sym->next) != NULL) {
953 type = &sym->type;
954 bt = type->t & VT_BTYPE;
955 size = gfunc_arg_size(type);
956 if (size > 8) {
957 if (reg_param_index < REGN) {
958 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
960 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL | VT_REF, addr);
961 } else {
962 if (reg_param_index < REGN) {
963 /* save arguments passed by register */
964 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
965 if (tcc_state->nosse)
966 tcc_error("SSE disabled");
967 o(0xd60f66); /* movq */
968 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
969 } else {
970 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
973 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
975 addr += 8;
976 reg_param_index++;
979 while (reg_param_index < REGN) {
980 if (func_type->ref->c == FUNC_ELLIPSIS) {
981 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
982 addr += 8;
984 reg_param_index++;
988 /* generate function epilog */
989 void gfunc_epilog(void)
991 int v, saved_ind;
993 o(0xc9); /* leave */
994 if (func_ret_sub == 0) {
995 o(0xc3); /* ret */
996 } else {
997 o(0xc2); /* ret n */
998 g(func_ret_sub);
999 g(func_ret_sub >> 8);
1002 saved_ind = ind;
1003 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1004 /* align local size to word & save local variables */
1005 v = (func_scratch + -loc + 15) & -16;
1007 if (v >= 4096) {
1008 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
1009 oad(0xb8, v); /* mov stacksize, %eax */
1010 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1011 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1012 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1013 } else {
1014 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1015 o(0xec8148); /* sub rsp, stacksize */
1016 gen_le32(v);
1019 cur_text_section->data_offset = saved_ind;
1020 pe_add_unwind_data(ind, saved_ind, v);
1021 ind = cur_text_section->data_offset;
1024 #else
1026 static void gadd_sp(int val)
1028 if (val == (char)val) {
1029 o(0xc48348);
1030 g(val);
1031 } else {
1032 oad(0xc48148, val); /* add $xxx, %rsp */
1036 typedef enum X86_64_Mode {
1037 x86_64_mode_none,
1038 x86_64_mode_memory,
1039 x86_64_mode_integer,
1040 x86_64_mode_sse,
1041 x86_64_mode_x87
1042 } X86_64_Mode;
1044 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1046 if (a == b)
1047 return a;
1048 else if (a == x86_64_mode_none)
1049 return b;
1050 else if (b == x86_64_mode_none)
1051 return a;
1052 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1053 return x86_64_mode_memory;
1054 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1055 return x86_64_mode_integer;
1056 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1057 return x86_64_mode_memory;
1058 else
1059 return x86_64_mode_sse;
1062 static X86_64_Mode classify_x86_64_inner(CType *ty)
1064 X86_64_Mode mode;
1065 Sym *f;
1067 switch (ty->t & VT_BTYPE) {
1068 case VT_VOID: return x86_64_mode_none;
1070 case VT_INT:
1071 case VT_BYTE:
1072 case VT_SHORT:
1073 case VT_LLONG:
1074 case VT_BOOL:
1075 case VT_PTR:
1076 case VT_FUNC:
1077 case VT_ENUM: return x86_64_mode_integer;
1079 case VT_FLOAT:
1080 case VT_DOUBLE: return x86_64_mode_sse;
1082 case VT_LDOUBLE: return x86_64_mode_x87;
1084 case VT_STRUCT:
1085 f = ty->ref;
1087 mode = x86_64_mode_none;
1088 for (f = f->next; f; f = f->next)
1089 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1091 return mode;
1093 assert(0);
1094 return 0;
1097 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1099 X86_64_Mode mode;
1100 int size, align, ret_t = 0;
1102 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1103 *psize = 8;
1104 *palign = 8;
1105 *reg_count = 1;
1106 ret_t = ty->t;
1107 mode = x86_64_mode_integer;
1108 } else {
1109 size = type_size(ty, &align);
1110 *psize = (size + 7) & ~7;
1111 *palign = (align + 7) & ~7;
1113 if (size > 16) {
1114 mode = x86_64_mode_memory;
1115 } else {
1116 mode = classify_x86_64_inner(ty);
1117 switch (mode) {
1118 case x86_64_mode_integer:
1119 if (size > 8) {
1120 *reg_count = 2;
1121 ret_t = VT_QLONG;
1122 } else {
1123 *reg_count = 1;
1124 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1126 break;
1128 case x86_64_mode_x87:
1129 *reg_count = 1;
1130 ret_t = VT_LDOUBLE;
1131 break;
1133 case x86_64_mode_sse:
1134 if (size > 8) {
1135 *reg_count = 2;
1136 ret_t = VT_QFLOAT;
1137 } else {
1138 *reg_count = 1;
1139 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1141 break;
1142 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1147 if (ret) {
1148 ret->ref = NULL;
1149 ret->t = ret_t;
1152 return mode;
1155 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1157 /* This definition must be synced with stdarg.h */
1158 enum __va_arg_type {
1159 __va_gen_reg, __va_float_reg, __va_stack
1161 int size, align, reg_count;
1162 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1163 switch (mode) {
1164 default: return __va_stack;
1165 case x86_64_mode_integer: return __va_gen_reg;
1166 case x86_64_mode_sse: return __va_float_reg;
1170 /* Return the number of registers needed to return the struct, or 0 if
1171 returning via struct pointer. */
1172 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1174 int size, align, reg_count;
1175 *ret_align = 1; // Never have to re-align return values for x86-64
1176 *regsize = 8;
1177 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1180 #define REGN 6
1181 static const uint8_t arg_regs[REGN] = {
1182 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1185 static int arg_prepare_reg(int idx) {
1186 if (idx == 2 || idx == 3)
1187 /* idx=2: r10, idx=3: r11 */
1188 return idx + 8;
1189 else
1190 return arg_regs[idx];
1193 /* Generate function call. The function address is pushed first, then
1194 all the parameters in call order. This functions pops all the
1195 parameters and the function address. */
1196 void gfunc_call(int nb_args)
1198 X86_64_Mode mode;
1199 CType type;
1200 int size, align, r, args_size, stack_adjust, run_start, run_end, i, reg_count;
1201 int nb_reg_args = 0;
1202 int nb_sse_args = 0;
1203 int sse_reg, gen_reg;
1205 /* calculate the number of integer/float register arguments */
1206 for(i = 0; i < nb_args; i++) {
1207 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1208 if (mode == x86_64_mode_sse)
1209 nb_sse_args += reg_count;
1210 else if (mode == x86_64_mode_integer)
1211 nb_reg_args += reg_count;
1214 if (nb_sse_args && tcc_state->nosse)
1215 tcc_error("SSE disabled but floating point arguments passed");
1217 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1218 and ended by a 16-byte aligned argument. This is because, from the point of view of
1219 the callee, argument alignment is computed from the bottom up. */
1220 /* for struct arguments, we need to call memcpy and the function
1221 call breaks register passing arguments we are preparing.
1222 So, we process arguments which will be passed by stack first. */
1223 gen_reg = nb_reg_args;
1224 sse_reg = nb_sse_args;
1225 run_start = 0;
1226 args_size = 0;
1227 while (run_start != nb_args) {
1228 int run_gen_reg = gen_reg, run_sse_reg = sse_reg;
1230 run_end = nb_args;
1231 stack_adjust = 0;
1232 for(i = run_start; (i < nb_args) && (run_end == nb_args); i++) {
1233 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1234 switch (mode) {
1235 case x86_64_mode_memory:
1236 case x86_64_mode_x87:
1237 stack_arg:
1238 if (align == 16)
1239 run_end = i;
1240 else
1241 stack_adjust += size;
1242 break;
1244 case x86_64_mode_sse:
1245 sse_reg -= reg_count;
1246 if (sse_reg + reg_count > 8) goto stack_arg;
1247 break;
1249 case x86_64_mode_integer:
1250 gen_reg -= reg_count;
1251 if (gen_reg + reg_count > REGN) goto stack_arg;
1252 break;
1253 default: break; /* nothing to be done for x86_64_mode_none */
1257 gen_reg = run_gen_reg;
1258 sse_reg = run_sse_reg;
1260 /* adjust stack to align SSE boundary */
1261 if (stack_adjust &= 15) {
1262 /* fetch cpu flag before the following sub will change the value */
1263 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1264 gv(RC_INT);
1266 stack_adjust = 16 - stack_adjust;
1267 o(0x48);
1268 oad(0xec81, stack_adjust); /* sub $xxx, %rsp */
1269 args_size += stack_adjust;
1272 for(i = run_start; i < run_end;) {
1273 /* Swap argument to top, it will possibly be changed here,
1274 and might use more temps. At the end of the loop we keep
1275 in on the stack and swap it back to its original position
1276 if it is a register. */
1277 SValue tmp = vtop[0];
1278 int arg_stored = 1;
1280 vtop[0] = vtop[-i];
1281 vtop[-i] = tmp;
1282 mode = classify_x86_64_arg(&vtop->type, NULL, &size, &align, &reg_count);
1284 switch (vtop->type.t & VT_BTYPE) {
1285 case VT_STRUCT:
1286 if (mode == x86_64_mode_sse) {
1287 if (sse_reg > 8)
1288 sse_reg -= reg_count;
1289 else
1290 arg_stored = 0;
1291 } else if (mode == x86_64_mode_integer) {
1292 if (gen_reg > REGN)
1293 gen_reg -= reg_count;
1294 else
1295 arg_stored = 0;
1298 if (arg_stored) {
1299 /* allocate the necessary size on stack */
1300 o(0x48);
1301 oad(0xec81, size); /* sub $xxx, %rsp */
1302 /* generate structure store */
1303 r = get_reg(RC_INT);
1304 orex(1, r, 0, 0x89); /* mov %rsp, r */
1305 o(0xe0 + REG_VALUE(r));
1306 vset(&vtop->type, r | VT_LVAL, 0);
1307 vswap();
1308 vstore();
1309 args_size += size;
1311 break;
1313 case VT_LDOUBLE:
1314 assert(0);
1315 break;
1317 case VT_FLOAT:
1318 case VT_DOUBLE:
1319 assert(mode == x86_64_mode_sse);
1320 if (sse_reg > 8) {
1321 --sse_reg;
1322 r = gv(RC_FLOAT);
1323 o(0x50); /* push $rax */
1324 /* movq %xmmN, (%rsp) */
1325 o(0xd60f66);
1326 o(0x04 + REG_VALUE(r)*8);
1327 o(0x24);
1328 args_size += size;
1329 } else {
1330 arg_stored = 0;
1332 break;
1334 default:
1335 assert(mode == x86_64_mode_integer);
1336 /* simple type */
1337 /* XXX: implicit cast ? */
1338 if (gen_reg > REGN) {
1339 --gen_reg;
1340 r = gv(RC_INT);
1341 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1342 args_size += size;
1343 } else {
1344 arg_stored = 0;
1346 break;
1349 /* And swap the argument back to it's original position. */
1350 tmp = vtop[0];
1351 vtop[0] = vtop[-i];
1352 vtop[-i] = tmp;
1354 if (arg_stored) {
1355 vrotb(i+1);
1356 assert((vtop->type.t == tmp.type.t) && (vtop->r == tmp.r));
1357 vpop();
1358 --nb_args;
1359 --run_end;
1360 } else {
1361 ++i;
1365 /* handle 16 byte aligned arguments at end of run */
1366 run_start = i = run_end;
1367 while (i < nb_args) {
1368 /* Rotate argument to top since it will always be popped */
1369 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1370 if (align != 16)
1371 break;
1373 vrotb(i+1);
1375 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1376 gv(RC_ST0);
1377 oad(0xec8148, size); /* sub $xxx, %rsp */
1378 o(0x7cdb); /* fstpt 0(%rsp) */
1379 g(0x24);
1380 g(0x00);
1381 args_size += size;
1382 } else {
1383 assert(mode == x86_64_mode_memory);
1385 /* allocate the necessary size on stack */
1386 o(0x48);
1387 oad(0xec81, size); /* sub $xxx, %rsp */
1388 /* generate structure store */
1389 r = get_reg(RC_INT);
1390 orex(1, r, 0, 0x89); /* mov %rsp, r */
1391 o(0xe0 + REG_VALUE(r));
1392 vset(&vtop->type, r | VT_LVAL, 0);
1393 vswap();
1394 vstore();
1395 args_size += size;
1398 vpop();
1399 --nb_args;
1403 /* XXX This should be superfluous. */
1404 save_regs(0); /* save used temporary registers */
1406 /* then, we prepare register passing arguments.
1407 Note that we cannot set RDX and RCX in this loop because gv()
1408 may break these temporary registers. Let's use R10 and R11
1409 instead of them */
1410 assert(gen_reg <= REGN);
1411 assert(sse_reg <= 8);
1412 for(i = 0; i < nb_args; i++) {
1413 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1414 /* Alter stack entry type so that gv() knows how to treat it */
1415 vtop->type = type;
1416 if (mode == x86_64_mode_sse) {
1417 if (reg_count == 2) {
1418 sse_reg -= 2;
1419 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1420 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1421 /* movaps %xmm0, %xmmN */
1422 o(0x280f);
1423 o(0xc0 + (sse_reg << 3));
1424 /* movaps %xmm1, %xmmN */
1425 o(0x280f);
1426 o(0xc1 + ((sse_reg+1) << 3));
1428 } else {
1429 assert(reg_count == 1);
1430 --sse_reg;
1431 /* Load directly to register */
1432 gv(RC_XMM0 << sse_reg);
1434 } else if (mode == x86_64_mode_integer) {
1435 /* simple type */
1436 /* XXX: implicit cast ? */
1437 int d;
1438 gen_reg -= reg_count;
1439 r = gv(RC_INT);
1440 d = arg_prepare_reg(gen_reg);
1441 orex(1,d,r,0x89); /* mov */
1442 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1443 if (reg_count == 2) {
1444 d = arg_prepare_reg(gen_reg+1);
1445 orex(1,d,vtop->r2,0x89); /* mov */
1446 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1449 vtop--;
1451 assert(gen_reg == 0);
1452 assert(sse_reg == 0);
1454 /* We shouldn't have many operands on the stack anymore, but the
1455 call address itself is still there, and it might be in %eax
1456 (or edx/ecx) currently, which the below writes would clobber.
1457 So evict all remaining operands here. */
1458 save_regs(0);
1460 /* Copy R10 and R11 into RDX and RCX, respectively */
1461 if (nb_reg_args > 2) {
1462 o(0xd2894c); /* mov %r10, %rdx */
1463 if (nb_reg_args > 3) {
1464 o(0xd9894c); /* mov %r11, %rcx */
1468 if (vtop->type.ref->c != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1469 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1470 gcall_or_jmp(0);
1471 if (args_size)
1472 gadd_sp(args_size);
1473 vtop--;
1477 #define FUNC_PROLOG_SIZE 11
1479 static void push_arg_reg(int i) {
1480 loc -= 8;
1481 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1484 /* generate function prolog of type 't' */
1485 void gfunc_prolog(CType *func_type)
1487 X86_64_Mode mode;
1488 int i, addr, align, size, reg_count;
1489 int param_addr = 0, reg_param_index, sse_param_index;
1490 Sym *sym;
1491 CType *type;
1493 sym = func_type->ref;
1494 addr = PTR_SIZE * 2;
1495 loc = 0;
1496 ind += FUNC_PROLOG_SIZE;
1497 func_sub_sp_offset = ind;
1498 func_ret_sub = 0;
1500 if (func_type->ref->c == FUNC_ELLIPSIS) {
1501 int seen_reg_num, seen_sse_num, seen_stack_size;
1502 seen_reg_num = seen_sse_num = 0;
1503 /* frame pointer and return address */
1504 seen_stack_size = PTR_SIZE * 2;
1505 /* count the number of seen parameters */
1506 sym = func_type->ref;
1507 while ((sym = sym->next) != NULL) {
1508 type = &sym->type;
1509 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1510 switch (mode) {
1511 default:
1512 stack_arg:
1513 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1514 break;
1516 case x86_64_mode_integer:
1517 if (seen_reg_num + reg_count <= 8) {
1518 seen_reg_num += reg_count;
1519 } else {
1520 seen_reg_num = 8;
1521 goto stack_arg;
1523 break;
1525 case x86_64_mode_sse:
1526 if (seen_sse_num + reg_count <= 8) {
1527 seen_sse_num += reg_count;
1528 } else {
1529 seen_sse_num = 8;
1530 goto stack_arg;
1532 break;
1536 loc -= 16;
1537 /* movl $0x????????, -0x10(%rbp) */
1538 o(0xf045c7);
1539 gen_le32(seen_reg_num * 8);
1540 /* movl $0x????????, -0xc(%rbp) */
1541 o(0xf445c7);
1542 gen_le32(seen_sse_num * 16 + 48);
1543 /* movl $0x????????, -0x8(%rbp) */
1544 o(0xf845c7);
1545 gen_le32(seen_stack_size);
1547 /* save all register passing arguments */
1548 for (i = 0; i < 8; i++) {
1549 loc -= 16;
1550 if (!tcc_state->nosse) {
1551 o(0xd60f66); /* movq */
1552 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1554 /* movq $0, loc+8(%rbp) */
1555 o(0x85c748);
1556 gen_le32(loc + 8);
1557 gen_le32(0);
1559 for (i = 0; i < REGN; i++) {
1560 push_arg_reg(REGN-1-i);
1564 sym = func_type->ref;
1565 reg_param_index = 0;
1566 sse_param_index = 0;
1568 /* if the function returns a structure, then add an
1569 implicit pointer parameter */
1570 func_vt = sym->type;
1571 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1572 if (mode == x86_64_mode_memory) {
1573 push_arg_reg(reg_param_index);
1574 func_vc = loc;
1575 reg_param_index++;
1577 /* define parameters */
1578 while ((sym = sym->next) != NULL) {
1579 type = &sym->type;
1580 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1581 switch (mode) {
1582 case x86_64_mode_sse:
1583 if (tcc_state->nosse)
1584 tcc_error("SSE disabled but floating point arguments used");
1585 if (sse_param_index + reg_count <= 8) {
1586 /* save arguments passed by register */
1587 loc -= reg_count * 8;
1588 param_addr = loc;
1589 for (i = 0; i < reg_count; ++i) {
1590 o(0xd60f66); /* movq */
1591 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1592 ++sse_param_index;
1594 } else {
1595 addr = (addr + align - 1) & -align;
1596 param_addr = addr;
1597 addr += size;
1599 break;
1601 case x86_64_mode_memory:
1602 case x86_64_mode_x87:
1603 addr = (addr + align - 1) & -align;
1604 param_addr = addr;
1605 addr += size;
1606 break;
1608 case x86_64_mode_integer: {
1609 if (reg_param_index + reg_count <= REGN) {
1610 /* save arguments passed by register */
1611 loc -= reg_count * 8;
1612 param_addr = loc;
1613 for (i = 0; i < reg_count; ++i) {
1614 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1615 ++reg_param_index;
1617 } else {
1618 addr = (addr + align - 1) & -align;
1619 param_addr = addr;
1620 addr += size;
1622 break;
1624 default: break; /* nothing to be done for x86_64_mode_none */
1626 sym_push(sym->v & ~SYM_FIELD, type,
1627 VT_LOCAL | VT_LVAL, param_addr);
1630 #ifdef CONFIG_TCC_BCHECK
1631 /* leave some room for bound checking code */
1632 if (tcc_state->do_bounds_check) {
1633 func_bound_offset = lbounds_section->data_offset;
1634 func_bound_ind = ind;
1635 oad(0xb8, 0); /* lbound section pointer */
1636 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1637 oad(0xb8, 0); /* call to function */
1639 #endif
1642 /* generate function epilog */
1643 void gfunc_epilog(void)
1645 int v, saved_ind;
1647 #ifdef CONFIG_TCC_BCHECK
1648 if (tcc_state->do_bounds_check
1649 && func_bound_offset != lbounds_section->data_offset)
1651 addr_t saved_ind;
1652 addr_t *bounds_ptr;
1653 Sym *sym_data;
1655 /* add end of table info */
1656 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
1657 *bounds_ptr = 0;
1659 /* generate bound local allocation */
1660 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
1661 func_bound_offset, lbounds_section->data_offset);
1662 saved_ind = ind;
1663 ind = func_bound_ind;
1664 greloc(cur_text_section, sym_data, ind + 1, R_386_32);
1665 ind = ind + 5 + 3;
1666 gen_static_call(TOK___bound_local_new);
1667 ind = saved_ind;
1669 /* generate bound check local freeing */
1670 o(0x5250); /* save returned value, if any */
1671 greloc(cur_text_section, sym_data, ind + 1, R_386_32);
1672 oad(0xb8, 0); /* mov xxx, %rax */
1673 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1674 gen_static_call(TOK___bound_local_delete);
1675 o(0x585a); /* restore returned value, if any */
1677 #endif
1678 o(0xc9); /* leave */
1679 if (func_ret_sub == 0) {
1680 o(0xc3); /* ret */
1681 } else {
1682 o(0xc2); /* ret n */
1683 g(func_ret_sub);
1684 g(func_ret_sub >> 8);
1686 /* align local size to word & save local variables */
1687 v = (-loc + 15) & -16;
1688 saved_ind = ind;
1689 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1690 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1691 o(0xec8148); /* sub rsp, stacksize */
1692 gen_le32(v);
1693 ind = saved_ind;
1696 #endif /* not PE */
1698 /* generate a jump to a label */
1699 int gjmp(int t)
1701 return gjmp2(0xe9, t);
1704 /* generate a jump to a fixed address */
1705 void gjmp_addr(int a)
1707 int r;
1708 r = a - ind - 2;
1709 if (r == (char)r) {
1710 g(0xeb);
1711 g(r);
1712 } else {
1713 oad(0xe9, a - ind - 5);
1717 ST_FUNC void gtst_addr(int inv, int a)
1719 int v = vtop->r & VT_VALMASK;
1720 if (v == VT_CMP) {
1721 inv ^= (vtop--)->c.i;
1722 a -= ind + 2;
1723 if (a == (char)a) {
1724 g(inv - 32);
1725 g(a);
1726 } else {
1727 g(0x0f);
1728 oad(inv - 16, a - 4);
1730 } else if ((v & ~1) == VT_JMP) {
1731 if ((v & 1) != inv) {
1732 gjmp_addr(a);
1733 gsym(vtop->c.i);
1734 } else {
1735 gsym(vtop->c.i);
1736 o(0x05eb);
1737 gjmp_addr(a);
1739 vtop--;
1743 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1744 ST_FUNC int gtst(int inv, int t)
1746 int v = vtop->r & VT_VALMASK;
1748 if (nocode_wanted) {
1750 } else if (v == VT_CMP) {
1751 /* fast case : can jump directly since flags are set */
1752 if (vtop->c.i & 0x100)
1754 /* This was a float compare. If the parity flag is set
1755 the result was unordered. For anything except != this
1756 means false and we don't jump (anding both conditions).
1757 For != this means true (oring both).
1758 Take care about inverting the test. We need to jump
1759 to our target if the result was unordered and test wasn't NE,
1760 otherwise if unordered we don't want to jump. */
1761 vtop->c.i &= ~0x100;
1762 if (inv == (vtop->c.i == TOK_NE))
1763 o(0x067a); /* jp +6 */
1764 else
1766 g(0x0f);
1767 t = gjmp2(0x8a, t); /* jp t */
1770 g(0x0f);
1771 t = gjmp2((vtop->c.i - 16) ^ inv, t);
1772 } else if (v == VT_JMP || v == VT_JMPI) {
1773 /* && or || optimization */
1774 if ((v & 1) == inv) {
1775 /* insert vtop->c jump list in t */
1776 uint32_t n1, n = vtop->c.i;
1777 if (n) {
1778 while ((n1 = read32le(cur_text_section->data + n)))
1779 n = n1;
1780 write32le(cur_text_section->data + n, t);
1781 t = vtop->c.i;
1783 } else {
1784 t = gjmp(t);
1785 gsym(vtop->c.i);
1788 vtop--;
1789 return t;
1792 /* generate an integer binary operation */
1793 void gen_opi(int op)
1795 int r, fr, opc, c;
1796 int ll, uu, cc;
1798 ll = is64_type(vtop[-1].type.t);
1799 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1800 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1802 switch(op) {
1803 case '+':
1804 case TOK_ADDC1: /* add with carry generation */
1805 opc = 0;
1806 gen_op8:
1807 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1808 /* constant case */
1809 vswap();
1810 r = gv(RC_INT);
1811 vswap();
1812 c = vtop->c.i;
1813 if (c == (char)c) {
1814 /* XXX: generate inc and dec for smaller code ? */
1815 orex(ll, r, 0, 0x83);
1816 o(0xc0 | (opc << 3) | REG_VALUE(r));
1817 g(c);
1818 } else {
1819 orex(ll, r, 0, 0x81);
1820 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1822 } else {
1823 gv2(RC_INT, RC_INT);
1824 r = vtop[-1].r;
1825 fr = vtop[0].r;
1826 orex(ll, r, fr, (opc << 3) | 0x01);
1827 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1829 vtop--;
1830 if (op >= TOK_ULT && op <= TOK_GT) {
1831 vtop->r = VT_CMP;
1832 vtop->c.i = op;
1834 break;
1835 case '-':
1836 case TOK_SUBC1: /* sub with carry generation */
1837 opc = 5;
1838 goto gen_op8;
1839 case TOK_ADDC2: /* add with carry use */
1840 opc = 2;
1841 goto gen_op8;
1842 case TOK_SUBC2: /* sub with carry use */
1843 opc = 3;
1844 goto gen_op8;
1845 case '&':
1846 opc = 4;
1847 goto gen_op8;
1848 case '^':
1849 opc = 6;
1850 goto gen_op8;
1851 case '|':
1852 opc = 1;
1853 goto gen_op8;
1854 case '*':
1855 gv2(RC_INT, RC_INT);
1856 r = vtop[-1].r;
1857 fr = vtop[0].r;
1858 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1859 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1860 vtop--;
1861 break;
1862 case TOK_SHL:
1863 opc = 4;
1864 goto gen_shift;
1865 case TOK_SHR:
1866 opc = 5;
1867 goto gen_shift;
1868 case TOK_SAR:
1869 opc = 7;
1870 gen_shift:
1871 opc = 0xc0 | (opc << 3);
1872 if (cc) {
1873 /* constant case */
1874 vswap();
1875 r = gv(RC_INT);
1876 vswap();
1877 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1878 o(opc | REG_VALUE(r));
1879 g(vtop->c.i & (ll ? 63 : 31));
1880 } else {
1881 /* we generate the shift in ecx */
1882 gv2(RC_INT, RC_RCX);
1883 r = vtop[-1].r;
1884 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1885 o(opc | REG_VALUE(r));
1887 vtop--;
1888 break;
1889 case TOK_UDIV:
1890 case TOK_UMOD:
1891 uu = 1;
1892 goto divmod;
1893 case '/':
1894 case '%':
1895 case TOK_PDIV:
1896 uu = 0;
1897 divmod:
1898 /* first operand must be in eax */
1899 /* XXX: need better constraint for second operand */
1900 gv2(RC_RAX, RC_RCX);
1901 r = vtop[-1].r;
1902 fr = vtop[0].r;
1903 vtop--;
1904 save_reg(TREG_RDX);
1905 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1906 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1907 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1908 if (op == '%' || op == TOK_UMOD)
1909 r = TREG_RDX;
1910 else
1911 r = TREG_RAX;
1912 vtop->r = r;
1913 break;
1914 default:
1915 opc = 7;
1916 goto gen_op8;
1920 void gen_opl(int op)
1922 gen_opi(op);
1925 /* generate a floating point operation 'v = t1 op t2' instruction. The
1926 two operands are guaranted to have the same floating point type */
1927 /* XXX: need to use ST1 too */
1928 void gen_opf(int op)
1930 int a, ft, fc, swapped, r;
1931 int float_type =
1932 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1934 /* convert constants to memory references */
1935 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1936 vswap();
1937 gv(float_type);
1938 vswap();
1940 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1941 gv(float_type);
1943 /* must put at least one value in the floating point register */
1944 if ((vtop[-1].r & VT_LVAL) &&
1945 (vtop[0].r & VT_LVAL)) {
1946 vswap();
1947 gv(float_type);
1948 vswap();
1950 swapped = 0;
1951 /* swap the stack if needed so that t1 is the register and t2 is
1952 the memory reference */
1953 if (vtop[-1].r & VT_LVAL) {
1954 vswap();
1955 swapped = 1;
1957 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1958 if (op >= TOK_ULT && op <= TOK_GT) {
1959 /* load on stack second operand */
1960 load(TREG_ST0, vtop);
1961 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1962 if (op == TOK_GE || op == TOK_GT)
1963 swapped = !swapped;
1964 else if (op == TOK_EQ || op == TOK_NE)
1965 swapped = 0;
1966 if (swapped)
1967 o(0xc9d9); /* fxch %st(1) */
1968 if (op == TOK_EQ || op == TOK_NE)
1969 o(0xe9da); /* fucompp */
1970 else
1971 o(0xd9de); /* fcompp */
1972 o(0xe0df); /* fnstsw %ax */
1973 if (op == TOK_EQ) {
1974 o(0x45e480); /* and $0x45, %ah */
1975 o(0x40fC80); /* cmp $0x40, %ah */
1976 } else if (op == TOK_NE) {
1977 o(0x45e480); /* and $0x45, %ah */
1978 o(0x40f480); /* xor $0x40, %ah */
1979 op = TOK_NE;
1980 } else if (op == TOK_GE || op == TOK_LE) {
1981 o(0x05c4f6); /* test $0x05, %ah */
1982 op = TOK_EQ;
1983 } else {
1984 o(0x45c4f6); /* test $0x45, %ah */
1985 op = TOK_EQ;
1987 vtop--;
1988 vtop->r = VT_CMP;
1989 vtop->c.i = op;
1990 } else {
1991 /* no memory reference possible for long double operations */
1992 load(TREG_ST0, vtop);
1993 swapped = !swapped;
1995 switch(op) {
1996 default:
1997 case '+':
1998 a = 0;
1999 break;
2000 case '-':
2001 a = 4;
2002 if (swapped)
2003 a++;
2004 break;
2005 case '*':
2006 a = 1;
2007 break;
2008 case '/':
2009 a = 6;
2010 if (swapped)
2011 a++;
2012 break;
2014 ft = vtop->type.t;
2015 fc = vtop->c.i;
2016 o(0xde); /* fxxxp %st, %st(1) */
2017 o(0xc1 + (a << 3));
2018 vtop--;
2020 } else {
2021 if (op >= TOK_ULT && op <= TOK_GT) {
2022 /* if saved lvalue, then we must reload it */
2023 r = vtop->r;
2024 fc = vtop->c.i;
2025 if ((r & VT_VALMASK) == VT_LLOCAL) {
2026 SValue v1;
2027 r = get_reg(RC_INT);
2028 v1.type.t = VT_PTR;
2029 v1.r = VT_LOCAL | VT_LVAL;
2030 v1.c.i = fc;
2031 load(r, &v1);
2032 fc = 0;
2035 if (op == TOK_EQ || op == TOK_NE) {
2036 swapped = 0;
2037 } else {
2038 if (op == TOK_LE || op == TOK_LT)
2039 swapped = !swapped;
2040 if (op == TOK_LE || op == TOK_GE) {
2041 op = 0x93; /* setae */
2042 } else {
2043 op = 0x97; /* seta */
2047 if (swapped) {
2048 gv(RC_FLOAT);
2049 vswap();
2051 assert(!(vtop[-1].r & VT_LVAL));
2053 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
2054 o(0x66);
2055 if (op == TOK_EQ || op == TOK_NE)
2056 o(0x2e0f); /* ucomisd */
2057 else
2058 o(0x2f0f); /* comisd */
2060 if (vtop->r & VT_LVAL) {
2061 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2062 } else {
2063 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2066 vtop--;
2067 vtop->r = VT_CMP;
2068 vtop->c.i = op | 0x100;
2069 } else {
2070 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
2071 switch(op) {
2072 default:
2073 case '+':
2074 a = 0;
2075 break;
2076 case '-':
2077 a = 4;
2078 break;
2079 case '*':
2080 a = 1;
2081 break;
2082 case '/':
2083 a = 6;
2084 break;
2086 ft = vtop->type.t;
2087 fc = vtop->c.i;
2088 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2090 r = vtop->r;
2091 /* if saved lvalue, then we must reload it */
2092 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2093 SValue v1;
2094 r = get_reg(RC_INT);
2095 v1.type.t = VT_PTR;
2096 v1.r = VT_LOCAL | VT_LVAL;
2097 v1.c.i = fc;
2098 load(r, &v1);
2099 fc = 0;
2102 assert(!(vtop[-1].r & VT_LVAL));
2103 if (swapped) {
2104 assert(vtop->r & VT_LVAL);
2105 gv(RC_FLOAT);
2106 vswap();
2109 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2110 o(0xf2);
2111 } else {
2112 o(0xf3);
2114 o(0x0f);
2115 o(0x58 + a);
2117 if (vtop->r & VT_LVAL) {
2118 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2119 } else {
2120 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2123 vtop--;
2128 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2129 and 'long long' cases. */
2130 void gen_cvt_itof(int t)
2132 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2133 save_reg(TREG_ST0);
2134 gv(RC_INT);
2135 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2136 /* signed long long to float/double/long double (unsigned case
2137 is handled generically) */
2138 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2139 o(0x242cdf); /* fildll (%rsp) */
2140 o(0x08c48348); /* add $8, %rsp */
2141 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2142 (VT_INT | VT_UNSIGNED)) {
2143 /* unsigned int to float/double/long double */
2144 o(0x6a); /* push $0 */
2145 g(0x00);
2146 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2147 o(0x242cdf); /* fildll (%rsp) */
2148 o(0x10c48348); /* add $16, %rsp */
2149 } else {
2150 /* int to float/double/long double */
2151 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2152 o(0x2404db); /* fildl (%rsp) */
2153 o(0x08c48348); /* add $8, %rsp */
2155 vtop->r = TREG_ST0;
2156 } else {
2157 int r = get_reg(RC_FLOAT);
2158 gv(RC_INT);
2159 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2160 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2161 (VT_INT | VT_UNSIGNED) ||
2162 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2163 o(0x48); /* REX */
2165 o(0x2a0f);
2166 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2167 vtop->r = r;
2171 /* convert from one floating point type to another */
2172 void gen_cvt_ftof(int t)
2174 int ft, bt, tbt;
2176 ft = vtop->type.t;
2177 bt = ft & VT_BTYPE;
2178 tbt = t & VT_BTYPE;
2180 if (bt == VT_FLOAT) {
2181 gv(RC_FLOAT);
2182 if (tbt == VT_DOUBLE) {
2183 o(0x140f); /* unpcklps */
2184 o(0xc0 + REG_VALUE(vtop->r)*9);
2185 o(0x5a0f); /* cvtps2pd */
2186 o(0xc0 + REG_VALUE(vtop->r)*9);
2187 } else if (tbt == VT_LDOUBLE) {
2188 save_reg(RC_ST0);
2189 /* movss %xmm0,-0x10(%rsp) */
2190 o(0x110ff3);
2191 o(0x44 + REG_VALUE(vtop->r)*8);
2192 o(0xf024);
2193 o(0xf02444d9); /* flds -0x10(%rsp) */
2194 vtop->r = TREG_ST0;
2196 } else if (bt == VT_DOUBLE) {
2197 gv(RC_FLOAT);
2198 if (tbt == VT_FLOAT) {
2199 o(0x140f66); /* unpcklpd */
2200 o(0xc0 + REG_VALUE(vtop->r)*9);
2201 o(0x5a0f66); /* cvtpd2ps */
2202 o(0xc0 + REG_VALUE(vtop->r)*9);
2203 } else if (tbt == VT_LDOUBLE) {
2204 save_reg(RC_ST0);
2205 /* movsd %xmm0,-0x10(%rsp) */
2206 o(0x110ff2);
2207 o(0x44 + REG_VALUE(vtop->r)*8);
2208 o(0xf024);
2209 o(0xf02444dd); /* fldl -0x10(%rsp) */
2210 vtop->r = TREG_ST0;
2212 } else {
2213 int r;
2214 gv(RC_ST0);
2215 r = get_reg(RC_FLOAT);
2216 if (tbt == VT_DOUBLE) {
2217 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2218 /* movsd -0x10(%rsp),%xmm0 */
2219 o(0x100ff2);
2220 o(0x44 + REG_VALUE(r)*8);
2221 o(0xf024);
2222 vtop->r = r;
2223 } else if (tbt == VT_FLOAT) {
2224 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2225 /* movss -0x10(%rsp),%xmm0 */
2226 o(0x100ff3);
2227 o(0x44 + REG_VALUE(r)*8);
2228 o(0xf024);
2229 vtop->r = r;
2234 /* convert fp to int 't' type */
2235 void gen_cvt_ftoi(int t)
2237 int ft, bt, size, r;
2238 ft = vtop->type.t;
2239 bt = ft & VT_BTYPE;
2240 if (bt == VT_LDOUBLE) {
2241 gen_cvt_ftof(VT_DOUBLE);
2242 bt = VT_DOUBLE;
2245 gv(RC_FLOAT);
2246 if (t != VT_INT)
2247 size = 8;
2248 else
2249 size = 4;
2251 r = get_reg(RC_INT);
2252 if (bt == VT_FLOAT) {
2253 o(0xf3);
2254 } else if (bt == VT_DOUBLE) {
2255 o(0xf2);
2256 } else {
2257 assert(0);
2259 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2260 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2261 vtop->r = r;
2264 /* computed goto support */
2265 void ggoto(void)
2267 gcall_or_jmp(1);
2268 vtop--;
2271 /* Save the stack pointer onto the stack and return the location of its address */
2272 ST_FUNC void gen_vla_sp_save(int addr) {
2273 /* mov %rsp,addr(%rbp)*/
2274 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2277 /* Restore the SP from a location on the stack */
2278 ST_FUNC void gen_vla_sp_restore(int addr) {
2279 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2282 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2283 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2284 #ifdef TCC_TARGET_PE
2285 /* alloca does more than just adjust %rsp on Windows */
2286 vpush_global_sym(&func_old_type, TOK_alloca);
2287 vswap(); /* Move alloca ref past allocation size */
2288 gfunc_call(1);
2289 #else
2290 int r;
2291 r = gv(RC_INT); /* allocation size */
2292 /* sub r,%rsp */
2293 o(0x2b48);
2294 o(0xe0 | REG_VALUE(r));
2295 /* We align to 16 bytes rather than align */
2296 /* and ~15, %rsp */
2297 o(0xf0e48348);
2298 vpop();
2299 #endif
2303 /* end of x86-64 code generator */
2304 /*************************************************************/
2305 #endif /* ! TARGET_DEFS_ONLY */
2306 /******************************************************/