Merge branch 'mob' of git://repo.or.cz/tinycc into mypatch
[tinycc.git] / x86_64-gen.c
blob439fd9880403f52c8ca6ce1d6309105c12514743
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
35 #define RC_RAX 0x0004
36 #define RC_RCX 0x0008
37 #define RC_RDX 0x0010
38 #define RC_ST0 0x0080 /* only for long double */
39 #define RC_R8 0x0100
40 #define RC_R9 0x0200
41 #define RC_R10 0x0400
42 #define RC_R11 0x0800
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_IRE2 RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_FRE2 RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
57 enum {
58 TREG_RAX = 0,
59 TREG_RCX = 1,
60 TREG_RDX = 2,
61 TREG_RSP = 4,
62 TREG_RSI = 6,
63 TREG_RDI = 7,
65 TREG_R8 = 8,
66 TREG_R9 = 9,
67 TREG_R10 = 10,
68 TREG_R11 = 11,
70 TREG_XMM0 = 16,
71 TREG_XMM1 = 17,
72 TREG_XMM2 = 18,
73 TREG_XMM3 = 19,
74 TREG_XMM4 = 20,
75 TREG_XMM5 = 21,
76 TREG_XMM6 = 22,
77 TREG_XMM7 = 23,
79 TREG_ST0 = 24,
81 TREG_MEM = 0x20
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_IRE2 TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_FRE2 TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
97 #define PTR_SIZE 8
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
103 #define MAX_ALIGN 16
105 /* define if return values need to be extended explicitely
106 at caller side (for interfacing with non-TCC compilers) */
107 #define PROMOTE_RET
108 /******************************************************/
109 #else /* ! TARGET_DEFS_ONLY */
110 /******************************************************/
111 #define USING_GLOBALS
112 #include "tcc.h"
113 #include <assert.h>
115 ST_DATA const int reg_classes[NB_REGS] = {
116 /* eax */ RC_INT | RC_RAX,
117 /* ecx */ RC_INT | RC_RCX,
118 /* edx */ RC_INT | RC_RDX,
124 RC_R8,
125 RC_R9,
126 RC_R10,
127 RC_R11,
132 /* xmm0 */ RC_FLOAT | RC_XMM0,
133 /* xmm1 */ RC_FLOAT | RC_XMM1,
134 /* xmm2 */ RC_FLOAT | RC_XMM2,
135 /* xmm3 */ RC_FLOAT | RC_XMM3,
136 /* xmm4 */ RC_FLOAT | RC_XMM4,
137 /* xmm5 */ RC_FLOAT | RC_XMM5,
138 /* xmm6 an xmm7 are included so gv() can be used on them,
139 but they are not tagged with RC_FLOAT because they are
140 callee saved on Windows */
141 RC_XMM6,
142 RC_XMM7,
143 /* st0 */ RC_ST0
146 static unsigned long func_sub_sp_offset;
147 static int func_ret_sub;
149 /* XXX: make it faster ? */
150 ST_FUNC void g(int c)
152 int ind1;
153 if (nocode_wanted)
154 return;
155 ind1 = ind + 1;
156 if (ind1 > cur_text_section->data_allocated)
157 section_realloc(cur_text_section, ind1);
158 cur_text_section->data[ind] = c;
159 ind = ind1;
162 ST_FUNC void o(unsigned int c)
164 while (c) {
165 g(c);
166 c = c >> 8;
170 ST_FUNC void gen_le16(int v)
172 g(v);
173 g(v >> 8);
176 ST_FUNC void gen_le32(int c)
178 g(c);
179 g(c >> 8);
180 g(c >> 16);
181 g(c >> 24);
184 ST_FUNC void gen_le64(int64_t c)
186 g(c);
187 g(c >> 8);
188 g(c >> 16);
189 g(c >> 24);
190 g(c >> 32);
191 g(c >> 40);
192 g(c >> 48);
193 g(c >> 56);
196 static void orex(int ll, int r, int r2, int b)
198 if ((r & VT_VALMASK) >= VT_CONST)
199 r = 0;
200 if ((r2 & VT_VALMASK) >= VT_CONST)
201 r2 = 0;
202 if (ll || REX_BASE(r) || REX_BASE(r2))
203 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
204 o(b);
207 /* output a symbol and patch all calls to it */
208 ST_FUNC void gsym_addr(int t, int a)
210 while (t) {
211 unsigned char *ptr = cur_text_section->data + t;
212 uint32_t n = read32le(ptr); /* next value */
213 write32le(ptr, a < 0 ? -a : a - t - 4);
214 t = n;
218 static int is64_type(int t)
220 return ((t & VT_BTYPE) == VT_PTR ||
221 (t & VT_BTYPE) == VT_FUNC ||
222 (t & VT_BTYPE) == VT_LLONG);
225 /* instruction + 4 bytes data. Return the address of the data */
226 static int oad(int c, int s)
228 int t;
229 if (nocode_wanted)
230 return s;
231 o(c);
232 t = ind;
233 gen_le32(s);
234 return t;
237 /* generate jmp to a label */
238 #define gjmp2(instr,lbl) oad(instr,lbl)
240 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
242 if (r & VT_SYM)
243 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
244 gen_le32(c);
247 /* output constant with relocation if 'r & VT_SYM' is true */
248 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
250 if (r & VT_SYM)
251 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
252 gen_le64(c);
255 /* output constant with relocation if 'r & VT_SYM' is true */
256 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
258 if (r & VT_SYM)
259 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
260 gen_le32(c-4);
263 /* output got address with relocation */
264 static void gen_gotpcrel(int r, Sym *sym, int c)
266 #ifdef TCC_TARGET_PE
267 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
268 get_tok_str(sym->v, NULL), c, r,
269 cur_text_section->data[ind-3],
270 cur_text_section->data[ind-2],
271 cur_text_section->data[ind-1]
273 #endif
274 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
275 gen_le32(0);
276 if (c) {
277 /* we use add c, %xxx for displacement */
278 orex(1, r, 0, 0x81);
279 o(0xc0 + REG_VALUE(r));
280 gen_le32(c);
284 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
286 op_reg = REG_VALUE(op_reg) << 3;
287 if ((r & VT_VALMASK) == VT_CONST) {
288 /* constant memory reference */
289 if (!(r & VT_SYM)) {
290 /* Absolute memory reference */
291 o(0x04 | op_reg); /* [sib] | destreg */
292 oad(0x25, c); /* disp32 */
293 } else {
294 o(0x05 | op_reg); /* (%rip)+disp32 | destreg */
295 if (is_got) {
296 gen_gotpcrel(r, sym, c);
297 } else {
298 gen_addrpc32(r, sym, c);
301 } else if ((r & VT_VALMASK) == VT_LOCAL) {
302 /* currently, we use only ebp as base */
303 if (c == (char)c) {
304 /* short reference */
305 o(0x45 | op_reg);
306 g(c);
307 } else {
308 oad(0x85 | op_reg, c);
310 } else if ((r & VT_VALMASK) >= TREG_MEM) {
311 if (c) {
312 g(0x80 | op_reg | REG_VALUE(r));
313 gen_le32(c);
314 } else {
315 g(0x00 | op_reg | REG_VALUE(r));
317 } else {
318 g(0x00 | op_reg | REG_VALUE(r));
322 /* generate a modrm reference. 'op_reg' contains the additional 3
323 opcode bits */
324 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
326 gen_modrm_impl(op_reg, r, sym, c, 0);
329 /* generate a modrm reference. 'op_reg' contains the additional 3
330 opcode bits */
331 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
333 int is_got;
334 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
335 orex(1, r, op_reg, opcode);
336 gen_modrm_impl(op_reg, r, sym, c, is_got);
340 /* load 'r' from value 'sv' */
341 void load(int r, SValue *sv)
343 int v, t, ft, fc, fr;
344 SValue v1;
346 #ifdef TCC_TARGET_PE
347 SValue v2;
348 sv = pe_getimport(sv, &v2);
349 #endif
351 fr = sv->r;
352 ft = sv->type.t & ~VT_DEFSIGN;
353 fc = sv->c.i;
354 if (fc != sv->c.i && (fr & VT_SYM))
355 tcc_error("64 bit addend in load");
357 ft &= ~(VT_VOLATILE | VT_CONSTANT);
359 #ifndef TCC_TARGET_PE
360 /* we use indirect access via got */
361 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
362 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
363 /* use the result register as a temporal register */
364 int tr = r | TREG_MEM;
365 if (is_float(ft)) {
366 /* we cannot use float registers as a temporal register */
367 tr = get_reg(RC_INT) | TREG_MEM;
369 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
371 /* load from the temporal register */
372 fr = tr | VT_LVAL;
374 #endif
376 v = fr & VT_VALMASK;
377 if (fr & VT_LVAL) {
378 int b, ll;
379 if (v == VT_LLOCAL) {
380 v1.type.t = VT_PTR;
381 v1.r = VT_LOCAL | VT_LVAL;
382 v1.c.i = fc;
383 fr = r;
384 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
385 fr = get_reg(RC_INT);
386 load(fr, &v1);
388 if (fc != sv->c.i) {
389 /* If the addends doesn't fit into a 32bit signed
390 we must use a 64bit move. We've checked above
391 that this doesn't have a sym associated. */
392 v1.type.t = VT_LLONG;
393 v1.r = VT_CONST;
394 v1.c.i = sv->c.i;
395 fr = r;
396 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
397 fr = get_reg(RC_INT);
398 load(fr, &v1);
399 fc = 0;
401 ll = 0;
402 /* Like GCC we can load from small enough properly sized
403 structs and unions as well.
404 XXX maybe move to generic operand handling, but should
405 occur only with asm, so tccasm.c might also be a better place */
406 if ((ft & VT_BTYPE) == VT_STRUCT) {
407 int align;
408 switch (type_size(&sv->type, &align)) {
409 case 1: ft = VT_BYTE; break;
410 case 2: ft = VT_SHORT; break;
411 case 4: ft = VT_INT; break;
412 case 8: ft = VT_LLONG; break;
413 default:
414 tcc_error("invalid aggregate type for register load");
415 break;
418 if ((ft & VT_BTYPE) == VT_FLOAT) {
419 b = 0x6e0f66;
420 r = REG_VALUE(r); /* movd */
421 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
422 b = 0x7e0ff3; /* movq */
423 r = REG_VALUE(r);
424 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
425 b = 0xdb, r = 5; /* fldt */
426 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
427 b = 0xbe0f; /* movsbl */
428 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
429 b = 0xb60f; /* movzbl */
430 } else if ((ft & VT_TYPE) == VT_SHORT) {
431 b = 0xbf0f; /* movswl */
432 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
433 b = 0xb70f; /* movzwl */
434 } else {
435 assert(((ft & VT_BTYPE) == VT_INT)
436 || ((ft & VT_BTYPE) == VT_LLONG)
437 || ((ft & VT_BTYPE) == VT_PTR)
438 || ((ft & VT_BTYPE) == VT_FUNC)
440 ll = is64_type(ft);
441 b = 0x8b;
443 if (ll) {
444 gen_modrm64(b, r, fr, sv->sym, fc);
445 } else {
446 orex(ll, fr, r, b);
447 gen_modrm(r, fr, sv->sym, fc);
449 } else {
450 if (v == VT_CONST) {
451 if (fr & VT_SYM) {
452 #ifdef TCC_TARGET_PE
453 orex(1,0,r,0x8d);
454 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
455 gen_addrpc32(fr, sv->sym, fc);
456 #else
457 if (sv->sym->type.t & VT_STATIC) {
458 orex(1,0,r,0x8d);
459 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
460 gen_addrpc32(fr, sv->sym, fc);
461 } else {
462 orex(1,0,r,0x8b);
463 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
464 gen_gotpcrel(r, sv->sym, fc);
466 #endif
467 } else if (is64_type(ft)) {
468 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
469 gen_le64(sv->c.i);
470 } else {
471 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
472 gen_le32(fc);
474 } else if (v == VT_LOCAL) {
475 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
476 gen_modrm(r, VT_LOCAL, sv->sym, fc);
477 } else if (v == VT_CMP) {
478 if (fc & 0x100)
480 v = vtop->cmp_r;
481 fc &= ~0x100;
482 /* This was a float compare. If the parity bit is
483 set the result was unordered, meaning false for everything
484 except TOK_NE, and true for TOK_NE. */
485 orex(0, r, 0, 0xb0 + REG_VALUE(r)); /* mov $0/1,%al */
486 g(v ^ fc ^ (v == TOK_NE));
487 o(0x037a + (REX_BASE(r) << 8));
489 orex(0,r,0, 0x0f); /* setxx %br */
490 o(fc);
491 o(0xc0 + REG_VALUE(r));
492 orex(0,r,0, 0x0f);
493 o(0xc0b6 + REG_VALUE(r) * 0x900); /* movzbl %al, %eax */
494 } else if (v == VT_JMP || v == VT_JMPI) {
495 t = v & 1;
496 orex(0,r,0,0);
497 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
498 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
499 gsym(fc);
500 orex(0,r,0,0);
501 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
502 } else if (v != r) {
503 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
504 if (v == TREG_ST0) {
505 /* gen_cvt_ftof(VT_DOUBLE); */
506 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
507 /* movsd -0x10(%rsp),%xmmN */
508 o(0x100ff2);
509 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
510 o(0xf024);
511 } else {
512 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
513 if ((ft & VT_BTYPE) == VT_FLOAT) {
514 o(0x100ff3);
515 } else {
516 assert((ft & VT_BTYPE) == VT_DOUBLE);
517 o(0x100ff2);
519 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
521 } else if (r == TREG_ST0) {
522 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
523 /* gen_cvt_ftof(VT_LDOUBLE); */
524 /* movsd %xmmN,-0x10(%rsp) */
525 o(0x110ff2);
526 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
527 o(0xf024);
528 o(0xf02444dd); /* fldl -0x10(%rsp) */
529 } else {
530 orex(is64_type(ft), r, v, 0x89);
531 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
537 /* store register 'r' in lvalue 'v' */
538 void store(int r, SValue *v)
540 int fr, bt, ft, fc;
541 int op64 = 0;
542 /* store the REX prefix in this variable when PIC is enabled */
543 int pic = 0;
545 #ifdef TCC_TARGET_PE
546 SValue v2;
547 v = pe_getimport(v, &v2);
548 #endif
550 fr = v->r & VT_VALMASK;
551 ft = v->type.t;
552 fc = v->c.i;
553 if (fc != v->c.i && (fr & VT_SYM))
554 tcc_error("64 bit addend in store");
555 ft &= ~(VT_VOLATILE | VT_CONSTANT);
556 bt = ft & VT_BTYPE;
558 #ifndef TCC_TARGET_PE
559 /* we need to access the variable via got */
560 if (fr == VT_CONST && (v->r & VT_SYM)) {
561 /* mov xx(%rip), %r11 */
562 o(0x1d8b4c);
563 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
564 pic = is64_type(bt) ? 0x49 : 0x41;
566 #endif
568 /* XXX: incorrect if float reg to reg */
569 if (bt == VT_FLOAT) {
570 o(0x66);
571 o(pic);
572 o(0x7e0f); /* movd */
573 r = REG_VALUE(r);
574 } else if (bt == VT_DOUBLE) {
575 o(0x66);
576 o(pic);
577 o(0xd60f); /* movq */
578 r = REG_VALUE(r);
579 } else if (bt == VT_LDOUBLE) {
580 o(0xc0d9); /* fld %st(0) */
581 o(pic);
582 o(0xdb); /* fstpt */
583 r = 7;
584 } else {
585 if (bt == VT_SHORT)
586 o(0x66);
587 o(pic);
588 if (bt == VT_BYTE || bt == VT_BOOL)
589 orex(0, 0, r, 0x88);
590 else if (is64_type(bt))
591 op64 = 0x89;
592 else
593 orex(0, 0, r, 0x89);
595 if (pic) {
596 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
597 if (op64)
598 o(op64);
599 o(3 + (r << 3));
600 } else if (op64) {
601 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
602 gen_modrm64(op64, r, v->r, v->sym, fc);
603 } else if (fr != r) {
604 orex(1, fr, r, op64);
605 o(0xc0 + fr + r * 8); /* mov r, fr */
607 } else {
608 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
609 gen_modrm(r, v->r, v->sym, fc);
610 } else if (fr != r) {
611 o(0xc0 + fr + r * 8); /* mov r, fr */
616 /* 'is_jmp' is '1' if it is a jump */
617 static void gcall_or_jmp(int is_jmp)
619 int r;
620 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
621 ((vtop->r & VT_SYM) && (vtop->c.i-4) == (int)(vtop->c.i-4))) {
622 /* constant symbolic case -> simple relocation */
623 #ifdef TCC_TARGET_PE
624 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
625 #else
626 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
627 #endif
628 oad(0xe8 + is_jmp, 0); /* call/jmp im */
629 } else {
630 /* otherwise, indirect call */
631 r = TREG_R11;
632 load(r, vtop);
633 o(0x41); /* REX */
634 o(0xff); /* call/jmp *r */
635 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
639 #if defined(CONFIG_TCC_BCHECK)
640 static addr_t func_bound_offset;
641 static unsigned long func_bound_ind;
643 static void gen_bounds_call(int v)
645 Sym *sym = external_global_sym(v, &func_old_type);
646 oad(0xe8, 0);
647 #ifdef TCC_TARGET_PE
648 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
649 #else
650 greloca(cur_text_section, sym, ind-4, R_X86_64_PLT32, -4);
651 #endif
654 /* generate a bounded pointer addition */
655 ST_FUNC void gen_bounded_ptr_add(void)
657 vpush_global_sym(&func_old_type, TOK___bound_ptr_add);
658 vrott(3);
659 gfunc_call(2);
660 vpushi(0);
661 /* returned pointer is in rax */
662 vtop->r = TREG_RAX | VT_BOUNDED;
663 if (nocode_wanted)
664 return;
665 /* relocation offset of the bounding function call point */
666 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
669 /* patch pointer addition in vtop so that pointer dereferencing is
670 also tested */
671 ST_FUNC void gen_bounded_ptr_deref(void)
673 addr_t func;
674 int size, align;
675 ElfW(Rela) *rel;
676 Sym *sym;
678 if (nocode_wanted)
679 return;
681 size = type_size(&vtop->type, &align);
682 switch(size) {
683 case 1: func = TOK___bound_ptr_indir1; break;
684 case 2: func = TOK___bound_ptr_indir2; break;
685 case 4: func = TOK___bound_ptr_indir4; break;
686 case 8: func = TOK___bound_ptr_indir8; break;
687 case 12: func = TOK___bound_ptr_indir12; break;
688 case 16: func = TOK___bound_ptr_indir16; break;
689 default:
690 /* may happen with struct member access */
691 return;
692 //tcc_error("unhandled size when dereferencing bounded pointer");
693 //func = 0;
694 //break;
696 sym = external_global_sym(func, &func_old_type);
697 if (!sym->c)
698 put_extern_sym(sym, NULL, 0, 0);
699 /* patch relocation */
700 /* XXX: find a better solution ? */
701 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
702 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
704 #endif
706 #ifdef TCC_TARGET_PE
708 #define REGN 4
709 static const uint8_t arg_regs[REGN] = {
710 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
713 /* Prepare arguments in R10 and R11 rather than RCX and RDX
714 because gv() will not ever use these */
715 static int arg_prepare_reg(int idx) {
716 if (idx == 0 || idx == 1)
717 /* idx=0: r10, idx=1: r11 */
718 return idx + 10;
719 else
720 return arg_regs[idx];
723 static int func_scratch, func_alloca;
725 /* Generate function call. The function address is pushed first, then
726 all the parameters in call order. This functions pops all the
727 parameters and the function address. */
729 static void gen_offs_sp(int b, int r, int d)
731 orex(1,0,r & 0x100 ? 0 : r, b);
732 if (d == (char)d) {
733 o(0x2444 | (REG_VALUE(r) << 3));
734 g(d);
735 } else {
736 o(0x2484 | (REG_VALUE(r) << 3));
737 gen_le32(d);
741 static int using_regs(int size)
743 return !(size > 8 || (size & (size - 1)));
746 /* Return the number of registers needed to return the struct, or 0 if
747 returning via struct pointer. */
748 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
750 int size, align;
751 *ret_align = 1; // Never have to re-align return values for x86-64
752 *regsize = 8;
753 size = type_size(vt, &align);
754 if (!using_regs(size))
755 return 0;
756 if (size == 8)
757 ret->t = VT_LLONG;
758 else if (size == 4)
759 ret->t = VT_INT;
760 else if (size == 2)
761 ret->t = VT_SHORT;
762 else
763 ret->t = VT_BYTE;
764 ret->ref = NULL;
765 return 1;
768 static int is_sse_float(int t) {
769 int bt;
770 bt = t & VT_BTYPE;
771 return bt == VT_DOUBLE || bt == VT_FLOAT;
774 static int gfunc_arg_size(CType *type) {
775 int align;
776 if (type->t & (VT_ARRAY|VT_BITFIELD))
777 return 8;
778 return type_size(type, &align);
781 void gfunc_call(int nb_args)
783 int size, r, args_size, i, d, bt, struct_size;
784 int arg;
786 #ifdef CONFIG_TCC_BCHECK
787 if (tcc_state->do_bounds_check)
788 gbound_args(nb_args);
789 #endif
791 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
792 arg = nb_args;
794 /* for struct arguments, we need to call memcpy and the function
795 call breaks register passing arguments we are preparing.
796 So, we process arguments which will be passed by stack first. */
797 struct_size = args_size;
798 for(i = 0; i < nb_args; i++) {
799 SValue *sv;
801 --arg;
802 sv = &vtop[-i];
803 bt = (sv->type.t & VT_BTYPE);
804 size = gfunc_arg_size(&sv->type);
806 if (using_regs(size))
807 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
809 if (bt == VT_STRUCT) {
810 /* align to stack align size */
811 size = (size + 15) & ~15;
812 /* generate structure store */
813 r = get_reg(RC_INT);
814 gen_offs_sp(0x8d, r, struct_size);
815 struct_size += size;
817 /* generate memcpy call */
818 vset(&sv->type, r | VT_LVAL, 0);
819 vpushv(sv);
820 vstore();
821 --vtop;
822 } else if (bt == VT_LDOUBLE) {
823 gv(RC_ST0);
824 gen_offs_sp(0xdb, 0x107, struct_size);
825 struct_size += 16;
829 if (func_scratch < struct_size)
830 func_scratch = struct_size;
832 arg = nb_args;
833 struct_size = args_size;
835 for(i = 0; i < nb_args; i++) {
836 --arg;
837 bt = (vtop->type.t & VT_BTYPE);
839 size = gfunc_arg_size(&vtop->type);
840 if (!using_regs(size)) {
841 /* align to stack align size */
842 size = (size + 15) & ~15;
843 if (arg >= REGN) {
844 d = get_reg(RC_INT);
845 gen_offs_sp(0x8d, d, struct_size);
846 gen_offs_sp(0x89, d, arg*8);
847 } else {
848 d = arg_prepare_reg(arg);
849 gen_offs_sp(0x8d, d, struct_size);
851 struct_size += size;
852 } else {
853 if (is_sse_float(vtop->type.t)) {
854 if (tcc_state->nosse)
855 tcc_error("SSE disabled");
856 if (arg >= REGN) {
857 gv(RC_XMM0);
858 /* movq %xmm0, j*8(%rsp) */
859 gen_offs_sp(0xd60f66, 0x100, arg*8);
860 } else {
861 /* Load directly to xmmN register */
862 gv(RC_XMM0 << arg);
863 d = arg_prepare_reg(arg);
864 /* mov %xmmN, %rxx */
865 o(0x66);
866 orex(1,d,0, 0x7e0f);
867 o(0xc0 + arg*8 + REG_VALUE(d));
869 } else {
870 if (bt == VT_STRUCT) {
871 vtop->type.ref = NULL;
872 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
873 : size > 1 ? VT_SHORT : VT_BYTE;
876 r = gv(RC_INT);
877 if (arg >= REGN) {
878 gen_offs_sp(0x89, r, arg*8);
879 } else {
880 d = arg_prepare_reg(arg);
881 orex(1,d,r,0x89); /* mov */
882 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
886 vtop--;
888 save_regs(0);
889 /* Copy R10 and R11 into RCX and RDX, respectively */
890 if (nb_args > 0) {
891 o(0xd1894c); /* mov %r10, %rcx */
892 if (nb_args > 1) {
893 o(0xda894c); /* mov %r11, %rdx */
897 gcall_or_jmp(0);
899 if ((vtop->r & VT_SYM) && vtop->sym->v == TOK_alloca) {
900 /* need to add the "func_scratch" area after alloca */
901 o(0x48); func_alloca = oad(0x05, func_alloca); /* add $NN, %rax */
902 #ifdef CONFIG_TCC_BCHECK
903 if (tcc_state->do_bounds_check)
904 gen_bounds_call(TOK___bound_alloca_nr); /* new region */
905 #endif
908 vtop--;
912 #define FUNC_PROLOG_SIZE 11
914 /* generate function prolog of type 't' */
915 void gfunc_prolog(Sym *func_sym)
917 CType *func_type = &func_sym->type;
918 int addr, reg_param_index, bt, size;
919 Sym *sym;
920 CType *type;
921 int n_arg = 0;
923 func_ret_sub = 0;
924 func_scratch = 32;
925 func_alloca = 0;
926 loc = 0;
928 addr = PTR_SIZE * 2;
929 ind += FUNC_PROLOG_SIZE;
930 func_sub_sp_offset = ind;
931 reg_param_index = 0;
933 sym = func_type->ref;
935 /* if the function returns a structure, then add an
936 implicit pointer parameter */
937 func_vt = sym->type;
938 func_var = (sym->f.func_type == FUNC_ELLIPSIS);
939 size = gfunc_arg_size(&func_vt);
940 if (!using_regs(size)) {
941 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
942 func_vc = addr;
943 reg_param_index++;
944 addr += 8;
947 /* define parameters */
948 while ((sym = sym->next) != NULL) {
949 n_arg++;
950 type = &sym->type;
951 bt = type->t & VT_BTYPE;
952 size = gfunc_arg_size(type);
953 if (!using_regs(size)) {
954 if (reg_param_index < REGN) {
955 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
957 sym_push(sym->v & ~SYM_FIELD, type,
958 VT_LLOCAL | VT_LVAL, addr);
959 } else {
960 if (reg_param_index < REGN) {
961 /* save arguments passed by register */
962 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
963 if (tcc_state->nosse)
964 tcc_error("SSE disabled");
965 o(0xd60f66); /* movq */
966 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
967 } else {
968 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
971 sym_push(sym->v & ~SYM_FIELD, type,
972 VT_LOCAL | VT_LVAL, addr);
974 addr += 8;
975 reg_param_index++;
978 while (reg_param_index < REGN) {
979 if (func_type->ref->f.func_type == FUNC_ELLIPSIS) {
980 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
981 addr += 8;
983 reg_param_index++;
985 #ifdef CONFIG_TCC_BCHECK
986 /* leave some room for bound checking code */
987 if (tcc_state->do_bounds_check) {
988 func_bound_offset = lbounds_section->data_offset;
989 func_bound_ind = ind;
990 o(0xb848); /* lbound section pointer */
991 gen_le64 (0);
992 o(0xc18948); /* mov %rax,%rcx ## first arg in %rdi, this must be ptr */
993 o(0x20ec8348); /* sub $20, %rsp */
994 oad(0xb8, 0); /* call to function */
995 o(0x20c48348); /* add $20, %rsp */
996 if (n_arg >= 2 && strcmp (get_tok_str(func_sym->v, NULL), "main") == 0) {
997 o(0x184d8b48); /* mov 0x18(%rbp),%rcx */
998 o(0x20ec8348); /* sub $20, %rsp */
999 gen_bounds_call(TOK___bound_main_arg);
1000 o(0x20c48348); /* add $20, %rsp */
1003 #endif
1006 /* generate function epilog */
1007 void gfunc_epilog(void)
1009 int v, saved_ind;
1011 /* align local size to word & save local variables */
1012 func_scratch = (func_scratch + 15) & -16;
1013 loc = (loc & -16) - func_scratch;
1015 #ifdef CONFIG_TCC_BCHECK
1016 if (tcc_state->do_bounds_check
1017 && func_bound_offset != lbounds_section->data_offset)
1019 addr_t saved_ind;
1020 addr_t *bounds_ptr;
1021 Sym *sym_data;
1023 /* add end of table info */
1024 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
1025 *bounds_ptr = 0;
1027 /* generate bound local allocation */
1028 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
1029 func_bound_offset, lbounds_section->data_offset);
1030 saved_ind = ind;
1031 ind = func_bound_ind;
1032 greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
1033 ind = ind + 10 + 3 + 4;
1034 gen_bounds_call(TOK___bound_local_new);
1035 ind = saved_ind;
1037 /* generate bound check local freeing */
1038 o(0x5250); /* save returned value, if any */
1039 greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
1040 o(0xb848); /* mov xxx, %rax */
1041 gen_le64 (0);
1042 o(0xc18948); /* mov %rax,%rcx # first arg in %rdi, this must be ptr */
1043 o(0x20ec8348); /* sub $20, %rsp */
1044 gen_bounds_call(TOK___bound_local_delete);
1045 o(0x20c48348); /* add $20, %rsp */
1046 o(0x585a); /* restore returned value, if any */
1048 #endif
1049 o(0xc9); /* leave */
1050 if (func_ret_sub == 0) {
1051 o(0xc3); /* ret */
1052 } else {
1053 o(0xc2); /* ret n */
1054 g(func_ret_sub);
1055 g(func_ret_sub >> 8);
1058 saved_ind = ind;
1059 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1060 v = -loc;
1062 if (v >= 4096) {
1063 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type);
1064 oad(0xb8, v); /* mov stacksize, %eax */
1065 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1066 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1067 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1068 } else {
1069 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1070 o(0xec8148); /* sub rsp, stacksize */
1071 gen_le32(v);
1074 /* add the "func_scratch" area after each alloca seen */
1075 gsym_addr(func_alloca, -func_scratch);
1077 cur_text_section->data_offset = saved_ind;
1078 pe_add_unwind_data(ind, saved_ind, v);
1079 ind = cur_text_section->data_offset;
1082 #else
1084 static void gadd_sp(int val)
1086 if (val == (char)val) {
1087 o(0xc48348);
1088 g(val);
1089 } else {
1090 oad(0xc48148, val); /* add $xxx, %rsp */
1094 typedef enum X86_64_Mode {
1095 x86_64_mode_none,
1096 x86_64_mode_memory,
1097 x86_64_mode_integer,
1098 x86_64_mode_sse,
1099 x86_64_mode_x87
1100 } X86_64_Mode;
1102 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1104 if (a == b)
1105 return a;
1106 else if (a == x86_64_mode_none)
1107 return b;
1108 else if (b == x86_64_mode_none)
1109 return a;
1110 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1111 return x86_64_mode_memory;
1112 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1113 return x86_64_mode_integer;
1114 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1115 return x86_64_mode_memory;
1116 else
1117 return x86_64_mode_sse;
1120 static X86_64_Mode classify_x86_64_inner(CType *ty)
1122 X86_64_Mode mode;
1123 Sym *f;
1125 switch (ty->t & VT_BTYPE) {
1126 case VT_VOID: return x86_64_mode_none;
1128 case VT_INT:
1129 case VT_BYTE:
1130 case VT_SHORT:
1131 case VT_LLONG:
1132 case VT_BOOL:
1133 case VT_PTR:
1134 case VT_FUNC:
1135 return x86_64_mode_integer;
1137 case VT_FLOAT:
1138 case VT_DOUBLE: return x86_64_mode_sse;
1140 case VT_LDOUBLE: return x86_64_mode_x87;
1142 case VT_STRUCT:
1143 f = ty->ref;
1145 mode = x86_64_mode_none;
1146 for (f = f->next; f; f = f->next)
1147 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1149 return mode;
1151 assert(0);
1152 return 0;
1155 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1157 X86_64_Mode mode;
1158 int size, align, ret_t = 0;
1160 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1161 *psize = 8;
1162 *palign = 8;
1163 *reg_count = 1;
1164 ret_t = ty->t;
1165 mode = x86_64_mode_integer;
1166 } else {
1167 size = type_size(ty, &align);
1168 *psize = (size + 7) & ~7;
1169 *palign = (align + 7) & ~7;
1171 if (size > 16) {
1172 mode = x86_64_mode_memory;
1173 } else {
1174 mode = classify_x86_64_inner(ty);
1175 switch (mode) {
1176 case x86_64_mode_integer:
1177 if (size > 8) {
1178 *reg_count = 2;
1179 ret_t = VT_QLONG;
1180 } else {
1181 *reg_count = 1;
1182 if (size > 4)
1183 ret_t = VT_LLONG;
1184 else if (size > 2)
1185 ret_t = VT_INT;
1186 else if (size > 1)
1187 ret_t = VT_SHORT;
1188 else
1189 ret_t = VT_BYTE;
1190 if ((ty->t & VT_BTYPE) == VT_STRUCT || (ty->t & VT_UNSIGNED))
1191 ret_t |= VT_UNSIGNED;
1193 break;
1195 case x86_64_mode_x87:
1196 *reg_count = 1;
1197 ret_t = VT_LDOUBLE;
1198 break;
1200 case x86_64_mode_sse:
1201 if (size > 8) {
1202 *reg_count = 2;
1203 ret_t = VT_QFLOAT;
1204 } else {
1205 *reg_count = 1;
1206 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1208 break;
1209 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1214 if (ret) {
1215 ret->ref = NULL;
1216 ret->t = ret_t;
1219 return mode;
1222 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1224 /* This definition must be synced with stdarg.h */
1225 enum __va_arg_type {
1226 __va_gen_reg, __va_float_reg, __va_stack
1228 int size, align, reg_count;
1229 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1230 switch (mode) {
1231 default: return __va_stack;
1232 case x86_64_mode_integer: return __va_gen_reg;
1233 case x86_64_mode_sse: return __va_float_reg;
1237 /* Return the number of registers needed to return the struct, or 0 if
1238 returning via struct pointer. */
1239 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1241 int size, align, reg_count;
1242 *ret_align = 1; // Never have to re-align return values for x86-64
1243 *regsize = 8;
1244 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1247 #define REGN 6
1248 static const uint8_t arg_regs[REGN] = {
1249 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1252 static int arg_prepare_reg(int idx) {
1253 if (idx == 2 || idx == 3)
1254 /* idx=2: r10, idx=3: r11 */
1255 return idx + 8;
1256 else
1257 return arg_regs[idx];
1260 /* Generate function call. The function address is pushed first, then
1261 all the parameters in call order. This functions pops all the
1262 parameters and the function address. */
1263 void gfunc_call(int nb_args)
1265 X86_64_Mode mode;
1266 CType type;
1267 int size, align, r, args_size, stack_adjust, i, reg_count;
1268 int nb_reg_args = 0;
1269 int nb_sse_args = 0;
1270 int sse_reg, gen_reg;
1271 char _onstack[nb_args ? nb_args : 1], *onstack = _onstack;
1273 #ifdef CONFIG_TCC_BCHECK
1274 if (tcc_state->do_bounds_check)
1275 gbound_args(nb_args);
1276 #endif
1278 /* calculate the number of integer/float register arguments, remember
1279 arguments to be passed via stack (in onstack[]), and also remember
1280 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1281 to be done in a left-to-right pass over arguments. */
1282 stack_adjust = 0;
1283 for(i = nb_args - 1; i >= 0; i--) {
1284 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1285 if (mode == x86_64_mode_sse && nb_sse_args + reg_count <= 8) {
1286 nb_sse_args += reg_count;
1287 onstack[i] = 0;
1288 } else if (mode == x86_64_mode_integer && nb_reg_args + reg_count <= REGN) {
1289 nb_reg_args += reg_count;
1290 onstack[i] = 0;
1291 } else if (mode == x86_64_mode_none) {
1292 onstack[i] = 0;
1293 } else {
1294 if (align == 16 && (stack_adjust &= 15)) {
1295 onstack[i] = 2;
1296 stack_adjust = 0;
1297 } else
1298 onstack[i] = 1;
1299 stack_adjust += size;
1303 if (nb_sse_args && tcc_state->nosse)
1304 tcc_error("SSE disabled but floating point arguments passed");
1306 /* fetch cpu flag before generating any code */
1307 if ((vtop->r & VT_VALMASK) == VT_CMP)
1308 gv(RC_INT);
1310 /* for struct arguments, we need to call memcpy and the function
1311 call breaks register passing arguments we are preparing.
1312 So, we process arguments which will be passed by stack first. */
1313 gen_reg = nb_reg_args;
1314 sse_reg = nb_sse_args;
1315 args_size = 0;
1316 stack_adjust &= 15;
1317 for (i = 0; i < nb_args;) {
1318 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1319 if (!onstack[i]) {
1320 ++i;
1321 continue;
1323 /* Possibly adjust stack to align SSE boundary. We're processing
1324 args from right to left while allocating happens left to right
1325 (stack grows down), so the adjustment needs to happen _after_
1326 an argument that requires it. */
1327 if (stack_adjust) {
1328 o(0x50); /* push %rax; aka sub $8,%rsp */
1329 args_size += 8;
1330 stack_adjust = 0;
1332 if (onstack[i] == 2)
1333 stack_adjust = 1;
1335 vrotb(i+1);
1337 switch (vtop->type.t & VT_BTYPE) {
1338 case VT_STRUCT:
1339 /* allocate the necessary size on stack */
1340 o(0x48);
1341 oad(0xec81, size); /* sub $xxx, %rsp */
1342 /* generate structure store */
1343 r = get_reg(RC_INT);
1344 orex(1, r, 0, 0x89); /* mov %rsp, r */
1345 o(0xe0 + REG_VALUE(r));
1346 vset(&vtop->type, r | VT_LVAL, 0);
1347 vswap();
1348 vstore();
1349 break;
1351 case VT_LDOUBLE:
1352 gv(RC_ST0);
1353 oad(0xec8148, size); /* sub $xxx, %rsp */
1354 o(0x7cdb); /* fstpt 0(%rsp) */
1355 g(0x24);
1356 g(0x00);
1357 break;
1359 case VT_FLOAT:
1360 case VT_DOUBLE:
1361 assert(mode == x86_64_mode_sse);
1362 r = gv(RC_FLOAT);
1363 o(0x50); /* push $rax */
1364 /* movq %xmmN, (%rsp) */
1365 o(0xd60f66);
1366 o(0x04 + REG_VALUE(r)*8);
1367 o(0x24);
1368 break;
1370 default:
1371 assert(mode == x86_64_mode_integer);
1372 /* simple type */
1373 /* XXX: implicit cast ? */
1374 r = gv(RC_INT);
1375 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1376 break;
1378 args_size += size;
1380 vpop();
1381 --nb_args;
1382 onstack++;
1385 /* XXX This should be superfluous. */
1386 save_regs(0); /* save used temporary registers */
1388 /* then, we prepare register passing arguments.
1389 Note that we cannot set RDX and RCX in this loop because gv()
1390 may break these temporary registers. Let's use R10 and R11
1391 instead of them */
1392 assert(gen_reg <= REGN);
1393 assert(sse_reg <= 8);
1394 for(i = 0; i < nb_args; i++) {
1395 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1396 /* Alter stack entry type so that gv() knows how to treat it */
1397 vtop->type = type;
1398 if (mode == x86_64_mode_sse) {
1399 if (reg_count == 2) {
1400 sse_reg -= 2;
1401 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1402 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1403 /* movaps %xmm1, %xmmN */
1404 o(0x280f);
1405 o(0xc1 + ((sse_reg+1) << 3));
1406 /* movaps %xmm0, %xmmN */
1407 o(0x280f);
1408 o(0xc0 + (sse_reg << 3));
1410 } else {
1411 assert(reg_count == 1);
1412 --sse_reg;
1413 /* Load directly to register */
1414 gv(RC_XMM0 << sse_reg);
1416 } else if (mode == x86_64_mode_integer) {
1417 /* simple type */
1418 /* XXX: implicit cast ? */
1419 int d;
1420 gen_reg -= reg_count;
1421 r = gv(RC_INT);
1422 d = arg_prepare_reg(gen_reg);
1423 orex(1,d,r,0x89); /* mov */
1424 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1425 if (reg_count == 2) {
1426 d = arg_prepare_reg(gen_reg+1);
1427 orex(1,d,vtop->r2,0x89); /* mov */
1428 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1431 vtop--;
1433 assert(gen_reg == 0);
1434 assert(sse_reg == 0);
1436 /* We shouldn't have many operands on the stack anymore, but the
1437 call address itself is still there, and it might be in %eax
1438 (or edx/ecx) currently, which the below writes would clobber.
1439 So evict all remaining operands here. */
1440 save_regs(0);
1442 /* Copy R10 and R11 into RDX and RCX, respectively */
1443 if (nb_reg_args > 2) {
1444 o(0xd2894c); /* mov %r10, %rdx */
1445 if (nb_reg_args > 3) {
1446 o(0xd9894c); /* mov %r11, %rcx */
1450 if (vtop->type.ref->f.func_type != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1451 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1452 gcall_or_jmp(0);
1453 if (args_size)
1454 gadd_sp(args_size);
1455 vtop--;
1458 #define FUNC_PROLOG_SIZE 11
1460 static void push_arg_reg(int i) {
1461 loc -= 8;
1462 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1465 /* generate function prolog of type 't' */
1466 void gfunc_prolog(Sym *func_sym)
1468 CType *func_type = &func_sym->type;
1469 X86_64_Mode mode;
1470 int i, addr, align, size, reg_count;
1471 int param_addr = 0, reg_param_index, sse_param_index;
1472 int n_arg = 0;
1473 Sym *sym;
1474 CType *type;
1476 sym = func_type->ref;
1477 addr = PTR_SIZE * 2;
1478 loc = 0;
1479 ind += FUNC_PROLOG_SIZE;
1480 func_sub_sp_offset = ind;
1481 func_ret_sub = 0;
1483 if (sym->f.func_type == FUNC_ELLIPSIS) {
1484 int seen_reg_num, seen_sse_num, seen_stack_size;
1485 seen_reg_num = seen_sse_num = 0;
1486 /* frame pointer and return address */
1487 seen_stack_size = PTR_SIZE * 2;
1488 /* count the number of seen parameters */
1489 sym = func_type->ref;
1490 while ((sym = sym->next) != NULL) {
1491 type = &sym->type;
1492 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1493 switch (mode) {
1494 default:
1495 stack_arg:
1496 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1497 break;
1499 case x86_64_mode_integer:
1500 if (seen_reg_num + reg_count > REGN)
1501 goto stack_arg;
1502 seen_reg_num += reg_count;
1503 break;
1505 case x86_64_mode_sse:
1506 if (seen_sse_num + reg_count > 8)
1507 goto stack_arg;
1508 seen_sse_num += reg_count;
1509 break;
1513 loc -= 16;
1514 /* movl $0x????????, -0x10(%rbp) */
1515 o(0xf045c7);
1516 gen_le32(seen_reg_num * 8);
1517 /* movl $0x????????, -0xc(%rbp) */
1518 o(0xf445c7);
1519 gen_le32(seen_sse_num * 16 + 48);
1520 /* movl $0x????????, -0x8(%rbp) */
1521 o(0xf845c7);
1522 gen_le32(seen_stack_size);
1524 /* save all register passing arguments */
1525 for (i = 0; i < 8; i++) {
1526 loc -= 16;
1527 if (!tcc_state->nosse) {
1528 o(0xd60f66); /* movq */
1529 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1531 /* movq $0, loc+8(%rbp) */
1532 o(0x85c748);
1533 gen_le32(loc + 8);
1534 gen_le32(0);
1536 for (i = 0; i < REGN; i++) {
1537 push_arg_reg(REGN-1-i);
1541 sym = func_type->ref;
1542 reg_param_index = 0;
1543 sse_param_index = 0;
1545 /* if the function returns a structure, then add an
1546 implicit pointer parameter */
1547 func_vt = sym->type;
1548 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1549 if (mode == x86_64_mode_memory) {
1550 push_arg_reg(reg_param_index);
1551 func_vc = loc;
1552 reg_param_index++;
1554 /* define parameters */
1555 while ((sym = sym->next) != NULL) {
1556 n_arg++;
1557 type = &sym->type;
1558 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1559 switch (mode) {
1560 case x86_64_mode_sse:
1561 if (tcc_state->nosse)
1562 tcc_error("SSE disabled but floating point arguments used");
1563 if (sse_param_index + reg_count <= 8) {
1564 /* save arguments passed by register */
1565 loc -= reg_count * 8;
1566 param_addr = loc;
1567 for (i = 0; i < reg_count; ++i) {
1568 o(0xd60f66); /* movq */
1569 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1570 ++sse_param_index;
1572 } else {
1573 addr = (addr + align - 1) & -align;
1574 param_addr = addr;
1575 addr += size;
1577 break;
1579 case x86_64_mode_memory:
1580 case x86_64_mode_x87:
1581 addr = (addr + align - 1) & -align;
1582 param_addr = addr;
1583 addr += size;
1584 break;
1586 case x86_64_mode_integer: {
1587 if (reg_param_index + reg_count <= REGN) {
1588 /* save arguments passed by register */
1589 loc -= reg_count * 8;
1590 param_addr = loc;
1591 for (i = 0; i < reg_count; ++i) {
1592 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1593 ++reg_param_index;
1595 } else {
1596 addr = (addr + align - 1) & -align;
1597 param_addr = addr;
1598 addr += size;
1600 break;
1602 default: break; /* nothing to be done for x86_64_mode_none */
1604 sym_push(sym->v & ~SYM_FIELD, type,
1605 VT_LOCAL | VT_LVAL, param_addr);
1608 #ifdef CONFIG_TCC_BCHECK
1609 /* leave some room for bound checking code */
1610 if (tcc_state->do_bounds_check) {
1611 func_bound_offset = lbounds_section->data_offset;
1612 func_bound_ind = ind;
1613 o(0xb848); /* lbound section pointer */
1614 gen_le64 (0);
1615 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1616 oad(0xb8, 0); /* call to function */
1617 if (n_arg >= 2 && strcmp (get_tok_str(func_sym->v, NULL), "main") == 0) {
1618 o(0xf07d8b48); /* mov -0x10(%rbp),%rdi */
1619 gen_bounds_call(TOK___bound_main_arg);
1622 #endif
1625 /* generate function epilog */
1626 void gfunc_epilog(void)
1628 int v, saved_ind;
1630 #ifdef CONFIG_TCC_BCHECK
1631 if (tcc_state->do_bounds_check
1632 && func_bound_offset != lbounds_section->data_offset)
1634 addr_t saved_ind;
1635 addr_t *bounds_ptr;
1636 Sym *sym_data;
1638 /* add end of table info */
1639 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
1640 *bounds_ptr = 0;
1642 /* generate bound local allocation */
1643 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
1644 func_bound_offset, lbounds_section->data_offset);
1645 saved_ind = ind;
1646 ind = func_bound_ind;
1647 greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
1648 ind = ind + 10 + 3;
1649 gen_bounds_call(TOK___bound_local_new);
1650 ind = saved_ind;
1652 /* generate bound check local freeing */
1653 o(0x5250); /* save returned value, if any */
1654 greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
1655 o(0xb848); /* mov xxx, %rax */
1656 gen_le64 (0);
1657 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1658 gen_bounds_call(TOK___bound_local_delete);
1659 o(0x585a); /* restore returned value, if any */
1661 #endif
1662 o(0xc9); /* leave */
1663 if (func_ret_sub == 0) {
1664 o(0xc3); /* ret */
1665 } else {
1666 o(0xc2); /* ret n */
1667 g(func_ret_sub);
1668 g(func_ret_sub >> 8);
1670 /* align local size to word & save local variables */
1671 v = (-loc + 15) & -16;
1672 saved_ind = ind;
1673 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1674 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1675 o(0xec8148); /* sub rsp, stacksize */
1676 gen_le32(v);
1677 ind = saved_ind;
1680 #endif /* not PE */
1682 ST_FUNC void gen_fill_nops(int bytes)
1684 while (bytes--)
1685 g(0x90);
1688 /* generate a jump to a label */
1689 int gjmp(int t)
1691 return gjmp2(0xe9, t);
1694 /* generate a jump to a fixed address */
1695 void gjmp_addr(int a)
1697 int r;
1698 r = a - ind - 2;
1699 if (r == (char)r) {
1700 g(0xeb);
1701 g(r);
1702 } else {
1703 oad(0xe9, a - ind - 5);
1707 ST_FUNC int gjmp_append(int n, int t)
1709 void *p;
1710 /* insert vtop->c jump list in t */
1711 if (n) {
1712 uint32_t n1 = n, n2;
1713 while ((n2 = read32le(p = cur_text_section->data + n1)))
1714 n1 = n2;
1715 write32le(p, t);
1716 t = n;
1718 return t;
1721 ST_FUNC int gjmp_cond(int op, int t)
1723 if (op & 0x100)
1725 /* This was a float compare. If the parity flag is set
1726 the result was unordered. For anything except != this
1727 means false and we don't jump (anding both conditions).
1728 For != this means true (oring both).
1729 Take care about inverting the test. We need to jump
1730 to our target if the result was unordered and test wasn't NE,
1731 otherwise if unordered we don't want to jump. */
1732 int v = vtop->cmp_r;
1733 op &= ~0x100;
1734 if (op ^ v ^ (v != TOK_NE))
1735 o(0x067a); /* jp +6 */
1736 else
1738 g(0x0f);
1739 t = gjmp2(0x8a, t); /* jp t */
1742 g(0x0f);
1743 t = gjmp2(op - 16, t);
1744 return t;
1747 /* generate an integer binary operation */
1748 void gen_opi(int op)
1750 int r, fr, opc, c;
1751 int ll, uu, cc;
1753 ll = is64_type(vtop[-1].type.t);
1754 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1755 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1757 switch(op) {
1758 case '+':
1759 case TOK_ADDC1: /* add with carry generation */
1760 opc = 0;
1761 gen_op8:
1762 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1763 /* constant case */
1764 vswap();
1765 r = gv(RC_INT);
1766 vswap();
1767 c = vtop->c.i;
1768 if (c == (char)c) {
1769 /* XXX: generate inc and dec for smaller code ? */
1770 orex(ll, r, 0, 0x83);
1771 o(0xc0 | (opc << 3) | REG_VALUE(r));
1772 g(c);
1773 } else {
1774 orex(ll, r, 0, 0x81);
1775 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1777 } else {
1778 gv2(RC_INT, RC_INT);
1779 r = vtop[-1].r;
1780 fr = vtop[0].r;
1781 orex(ll, r, fr, (opc << 3) | 0x01);
1782 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1784 vtop--;
1785 if (op >= TOK_ULT && op <= TOK_GT)
1786 vset_VT_CMP(op);
1787 break;
1788 case '-':
1789 case TOK_SUBC1: /* sub with carry generation */
1790 opc = 5;
1791 goto gen_op8;
1792 case TOK_ADDC2: /* add with carry use */
1793 opc = 2;
1794 goto gen_op8;
1795 case TOK_SUBC2: /* sub with carry use */
1796 opc = 3;
1797 goto gen_op8;
1798 case '&':
1799 opc = 4;
1800 goto gen_op8;
1801 case '^':
1802 opc = 6;
1803 goto gen_op8;
1804 case '|':
1805 opc = 1;
1806 goto gen_op8;
1807 case '*':
1808 gv2(RC_INT, RC_INT);
1809 r = vtop[-1].r;
1810 fr = vtop[0].r;
1811 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1812 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1813 vtop--;
1814 break;
1815 case TOK_SHL:
1816 opc = 4;
1817 goto gen_shift;
1818 case TOK_SHR:
1819 opc = 5;
1820 goto gen_shift;
1821 case TOK_SAR:
1822 opc = 7;
1823 gen_shift:
1824 opc = 0xc0 | (opc << 3);
1825 if (cc) {
1826 /* constant case */
1827 vswap();
1828 r = gv(RC_INT);
1829 vswap();
1830 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1831 o(opc | REG_VALUE(r));
1832 g(vtop->c.i & (ll ? 63 : 31));
1833 } else {
1834 /* we generate the shift in ecx */
1835 gv2(RC_INT, RC_RCX);
1836 r = vtop[-1].r;
1837 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1838 o(opc | REG_VALUE(r));
1840 vtop--;
1841 break;
1842 case TOK_UDIV:
1843 case TOK_UMOD:
1844 uu = 1;
1845 goto divmod;
1846 case '/':
1847 case '%':
1848 case TOK_PDIV:
1849 uu = 0;
1850 divmod:
1851 /* first operand must be in eax */
1852 /* XXX: need better constraint for second operand */
1853 gv2(RC_RAX, RC_RCX);
1854 r = vtop[-1].r;
1855 fr = vtop[0].r;
1856 vtop--;
1857 save_reg(TREG_RDX);
1858 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1859 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1860 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1861 if (op == '%' || op == TOK_UMOD)
1862 r = TREG_RDX;
1863 else
1864 r = TREG_RAX;
1865 vtop->r = r;
1866 break;
1867 default:
1868 opc = 7;
1869 goto gen_op8;
1873 void gen_opl(int op)
1875 gen_opi(op);
1878 /* generate a floating point operation 'v = t1 op t2' instruction. The
1879 two operands are guaranteed to have the same floating point type */
1880 /* XXX: need to use ST1 too */
1881 void gen_opf(int op)
1883 int a, ft, fc, swapped, r;
1884 int float_type =
1885 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1887 /* convert constants to memory references */
1888 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1889 vswap();
1890 gv(float_type);
1891 vswap();
1893 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1894 gv(float_type);
1896 /* must put at least one value in the floating point register */
1897 if ((vtop[-1].r & VT_LVAL) &&
1898 (vtop[0].r & VT_LVAL)) {
1899 vswap();
1900 gv(float_type);
1901 vswap();
1903 swapped = 0;
1904 /* swap the stack if needed so that t1 is the register and t2 is
1905 the memory reference */
1906 if (vtop[-1].r & VT_LVAL) {
1907 vswap();
1908 swapped = 1;
1910 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1911 if (op >= TOK_ULT && op <= TOK_GT) {
1912 /* load on stack second operand */
1913 load(TREG_ST0, vtop);
1914 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1915 if (op == TOK_GE || op == TOK_GT)
1916 swapped = !swapped;
1917 else if (op == TOK_EQ || op == TOK_NE)
1918 swapped = 0;
1919 if (swapped)
1920 o(0xc9d9); /* fxch %st(1) */
1921 if (op == TOK_EQ || op == TOK_NE)
1922 o(0xe9da); /* fucompp */
1923 else
1924 o(0xd9de); /* fcompp */
1925 o(0xe0df); /* fnstsw %ax */
1926 if (op == TOK_EQ) {
1927 o(0x45e480); /* and $0x45, %ah */
1928 o(0x40fC80); /* cmp $0x40, %ah */
1929 } else if (op == TOK_NE) {
1930 o(0x45e480); /* and $0x45, %ah */
1931 o(0x40f480); /* xor $0x40, %ah */
1932 op = TOK_NE;
1933 } else if (op == TOK_GE || op == TOK_LE) {
1934 o(0x05c4f6); /* test $0x05, %ah */
1935 op = TOK_EQ;
1936 } else {
1937 o(0x45c4f6); /* test $0x45, %ah */
1938 op = TOK_EQ;
1940 vtop--;
1941 vset_VT_CMP(op);
1942 } else {
1943 /* no memory reference possible for long double operations */
1944 load(TREG_ST0, vtop);
1945 swapped = !swapped;
1947 switch(op) {
1948 default:
1949 case '+':
1950 a = 0;
1951 break;
1952 case '-':
1953 a = 4;
1954 if (swapped)
1955 a++;
1956 break;
1957 case '*':
1958 a = 1;
1959 break;
1960 case '/':
1961 a = 6;
1962 if (swapped)
1963 a++;
1964 break;
1966 ft = vtop->type.t;
1967 fc = vtop->c.i;
1968 o(0xde); /* fxxxp %st, %st(1) */
1969 o(0xc1 + (a << 3));
1970 vtop--;
1972 } else {
1973 if (op >= TOK_ULT && op <= TOK_GT) {
1974 /* if saved lvalue, then we must reload it */
1975 r = vtop->r;
1976 fc = vtop->c.i;
1977 if ((r & VT_VALMASK) == VT_LLOCAL) {
1978 SValue v1;
1979 r = get_reg(RC_INT);
1980 v1.type.t = VT_PTR;
1981 v1.r = VT_LOCAL | VT_LVAL;
1982 v1.c.i = fc;
1983 load(r, &v1);
1984 fc = 0;
1985 vtop->r = r = r | VT_LVAL;
1988 if (op == TOK_EQ || op == TOK_NE) {
1989 swapped = 0;
1990 } else {
1991 if (op == TOK_LE || op == TOK_LT)
1992 swapped = !swapped;
1993 if (op == TOK_LE || op == TOK_GE) {
1994 op = 0x93; /* setae */
1995 } else {
1996 op = 0x97; /* seta */
2000 if (swapped) {
2001 gv(RC_FLOAT);
2002 vswap();
2004 assert(!(vtop[-1].r & VT_LVAL));
2006 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
2007 o(0x66);
2008 if (op == TOK_EQ || op == TOK_NE)
2009 o(0x2e0f); /* ucomisd */
2010 else
2011 o(0x2f0f); /* comisd */
2013 if (vtop->r & VT_LVAL) {
2014 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2015 } else {
2016 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2019 vtop--;
2020 vset_VT_CMP(op | 0x100);
2021 vtop->cmp_r = op;
2022 } else {
2023 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
2024 switch(op) {
2025 default:
2026 case '+':
2027 a = 0;
2028 break;
2029 case '-':
2030 a = 4;
2031 break;
2032 case '*':
2033 a = 1;
2034 break;
2035 case '/':
2036 a = 6;
2037 break;
2039 ft = vtop->type.t;
2040 fc = vtop->c.i;
2041 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2043 r = vtop->r;
2044 /* if saved lvalue, then we must reload it */
2045 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2046 SValue v1;
2047 r = get_reg(RC_INT);
2048 v1.type.t = VT_PTR;
2049 v1.r = VT_LOCAL | VT_LVAL;
2050 v1.c.i = fc;
2051 load(r, &v1);
2052 fc = 0;
2053 vtop->r = r = r | VT_LVAL;
2056 assert(!(vtop[-1].r & VT_LVAL));
2057 if (swapped) {
2058 assert(vtop->r & VT_LVAL);
2059 gv(RC_FLOAT);
2060 vswap();
2063 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2064 o(0xf2);
2065 } else {
2066 o(0xf3);
2068 o(0x0f);
2069 o(0x58 + a);
2071 if (vtop->r & VT_LVAL) {
2072 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2073 } else {
2074 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2077 vtop--;
2082 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2083 and 'long long' cases. */
2084 void gen_cvt_itof(int t)
2086 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2087 save_reg(TREG_ST0);
2088 gv(RC_INT);
2089 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2090 /* signed long long to float/double/long double (unsigned case
2091 is handled generically) */
2092 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2093 o(0x242cdf); /* fildll (%rsp) */
2094 o(0x08c48348); /* add $8, %rsp */
2095 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2096 (VT_INT | VT_UNSIGNED)) {
2097 /* unsigned int to float/double/long double */
2098 o(0x6a); /* push $0 */
2099 g(0x00);
2100 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2101 o(0x242cdf); /* fildll (%rsp) */
2102 o(0x10c48348); /* add $16, %rsp */
2103 } else {
2104 /* int to float/double/long double */
2105 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2106 o(0x2404db); /* fildl (%rsp) */
2107 o(0x08c48348); /* add $8, %rsp */
2109 vtop->r = TREG_ST0;
2110 } else {
2111 int r = get_reg(RC_FLOAT);
2112 gv(RC_INT);
2113 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2114 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2115 (VT_INT | VT_UNSIGNED) ||
2116 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2117 o(0x48); /* REX */
2119 o(0x2a0f);
2120 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2121 vtop->r = r;
2125 /* convert from one floating point type to another */
2126 void gen_cvt_ftof(int t)
2128 int ft, bt, tbt;
2130 ft = vtop->type.t;
2131 bt = ft & VT_BTYPE;
2132 tbt = t & VT_BTYPE;
2134 if (bt == VT_FLOAT) {
2135 gv(RC_FLOAT);
2136 if (tbt == VT_DOUBLE) {
2137 o(0x140f); /* unpcklps */
2138 o(0xc0 + REG_VALUE(vtop->r)*9);
2139 o(0x5a0f); /* cvtps2pd */
2140 o(0xc0 + REG_VALUE(vtop->r)*9);
2141 } else if (tbt == VT_LDOUBLE) {
2142 save_reg(RC_ST0);
2143 /* movss %xmm0,-0x10(%rsp) */
2144 o(0x110ff3);
2145 o(0x44 + REG_VALUE(vtop->r)*8);
2146 o(0xf024);
2147 o(0xf02444d9); /* flds -0x10(%rsp) */
2148 vtop->r = TREG_ST0;
2150 } else if (bt == VT_DOUBLE) {
2151 gv(RC_FLOAT);
2152 if (tbt == VT_FLOAT) {
2153 o(0x140f66); /* unpcklpd */
2154 o(0xc0 + REG_VALUE(vtop->r)*9);
2155 o(0x5a0f66); /* cvtpd2ps */
2156 o(0xc0 + REG_VALUE(vtop->r)*9);
2157 } else if (tbt == VT_LDOUBLE) {
2158 save_reg(RC_ST0);
2159 /* movsd %xmm0,-0x10(%rsp) */
2160 o(0x110ff2);
2161 o(0x44 + REG_VALUE(vtop->r)*8);
2162 o(0xf024);
2163 o(0xf02444dd); /* fldl -0x10(%rsp) */
2164 vtop->r = TREG_ST0;
2166 } else {
2167 int r;
2168 gv(RC_ST0);
2169 r = get_reg(RC_FLOAT);
2170 if (tbt == VT_DOUBLE) {
2171 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2172 /* movsd -0x10(%rsp),%xmm0 */
2173 o(0x100ff2);
2174 o(0x44 + REG_VALUE(r)*8);
2175 o(0xf024);
2176 vtop->r = r;
2177 } else if (tbt == VT_FLOAT) {
2178 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2179 /* movss -0x10(%rsp),%xmm0 */
2180 o(0x100ff3);
2181 o(0x44 + REG_VALUE(r)*8);
2182 o(0xf024);
2183 vtop->r = r;
2188 /* convert fp to int 't' type */
2189 void gen_cvt_ftoi(int t)
2191 int ft, bt, size, r;
2192 ft = vtop->type.t;
2193 bt = ft & VT_BTYPE;
2194 if (bt == VT_LDOUBLE) {
2195 gen_cvt_ftof(VT_DOUBLE);
2196 bt = VT_DOUBLE;
2199 gv(RC_FLOAT);
2200 if (t != VT_INT)
2201 size = 8;
2202 else
2203 size = 4;
2205 r = get_reg(RC_INT);
2206 if (bt == VT_FLOAT) {
2207 o(0xf3);
2208 } else if (bt == VT_DOUBLE) {
2209 o(0xf2);
2210 } else {
2211 assert(0);
2213 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2214 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2215 vtop->r = r;
2218 // Generate sign extension from 32 to 64 bits:
2219 ST_FUNC void gen_cvt_sxtw(void)
2221 int r = gv(RC_INT);
2222 /* x86_64 specific: movslq */
2223 o(0x6348);
2224 o(0xc0 + (REG_VALUE(r) << 3) + REG_VALUE(r));
2227 /* char/short to int conversion */
2228 ST_FUNC void gen_cvt_csti(int t)
2230 int r, sz, xl, ll;
2231 r = gv(RC_INT);
2232 sz = !(t & VT_UNSIGNED);
2233 xl = (t & VT_BTYPE) == VT_SHORT;
2234 ll = (vtop->type.t & VT_BTYPE) == VT_LLONG;
2235 orex(ll, r, 0, 0xc0b60f /* mov[sz] %a[xl], %eax */
2236 | (sz << 3 | xl) << 8
2237 | (REG_VALUE(r) << 3 | REG_VALUE(r)) << 16
2241 /* computed goto support */
2242 void ggoto(void)
2244 gcall_or_jmp(1);
2245 vtop--;
2248 /* Save the stack pointer onto the stack and return the location of its address */
2249 ST_FUNC void gen_vla_sp_save(int addr) {
2250 /* mov %rsp,addr(%rbp)*/
2251 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2254 /* Restore the SP from a location on the stack */
2255 ST_FUNC void gen_vla_sp_restore(int addr) {
2256 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2259 #ifdef TCC_TARGET_PE
2260 /* Save result of gen_vla_alloc onto the stack */
2261 ST_FUNC void gen_vla_result(int addr) {
2262 /* mov %rax,addr(%rbp)*/
2263 gen_modrm64(0x89, TREG_RAX, VT_LOCAL, NULL, addr);
2265 #endif
2267 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2268 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2269 int use_call = 0;
2271 #if defined(CONFIG_TCC_BCHECK)
2272 use_call = tcc_state->do_bounds_check;
2273 #endif
2274 #ifdef TCC_TARGET_PE /* alloca does more than just adjust %rsp on Windows */
2275 use_call = 1;
2276 #endif
2277 if (use_call)
2279 vpush_global_sym(&func_old_type, TOK_alloca);
2280 vswap(); /* Move alloca ref past allocation size */
2281 gfunc_call(1);
2283 else {
2284 int r;
2285 r = gv(RC_INT); /* allocation size */
2286 /* sub r,%rsp */
2287 o(0x2b48);
2288 o(0xe0 | REG_VALUE(r));
2289 /* We align to 16 bytes rather than align */
2290 /* and ~15, %rsp */
2291 o(0xf0e48348);
2292 vpop();
2297 /* end of x86-64 code generator */
2298 /*************************************************************/
2299 #endif /* ! TARGET_DEFS_ONLY */
2300 /******************************************************/