backtrace: test with DLLs
[tinycc.git] / x86_64-gen.c
blob44c34ab0c43516ead7a00023b0fba91494ce2c7f
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
35 #define RC_RAX 0x0004
36 #define RC_RCX 0x0008
37 #define RC_RDX 0x0010
38 #define RC_ST0 0x0080 /* only for long double */
39 #define RC_R8 0x0100
40 #define RC_R9 0x0200
41 #define RC_R10 0x0400
42 #define RC_R11 0x0800
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_IRE2 RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_FRE2 RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
57 enum {
58 TREG_RAX = 0,
59 TREG_RCX = 1,
60 TREG_RDX = 2,
61 TREG_RSP = 4,
62 TREG_RSI = 6,
63 TREG_RDI = 7,
65 TREG_R8 = 8,
66 TREG_R9 = 9,
67 TREG_R10 = 10,
68 TREG_R11 = 11,
70 TREG_XMM0 = 16,
71 TREG_XMM1 = 17,
72 TREG_XMM2 = 18,
73 TREG_XMM3 = 19,
74 TREG_XMM4 = 20,
75 TREG_XMM5 = 21,
76 TREG_XMM6 = 22,
77 TREG_XMM7 = 23,
79 TREG_ST0 = 24,
81 TREG_MEM = 0x20
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_IRE2 TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_FRE2 TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
97 #define PTR_SIZE 8
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
103 #define MAX_ALIGN 16
105 /* define if return values need to be extended explicitely
106 at caller side (for interfacing with non-TCC compilers) */
107 #define PROMOTE_RET
108 /******************************************************/
109 #else /* ! TARGET_DEFS_ONLY */
110 /******************************************************/
111 #define USING_GLOBALS
112 #include "tcc.h"
113 #include <assert.h>
115 ST_DATA const int reg_classes[NB_REGS] = {
116 /* eax */ RC_INT | RC_RAX,
117 /* ecx */ RC_INT | RC_RCX,
118 /* edx */ RC_INT | RC_RDX,
124 RC_R8,
125 RC_R9,
126 RC_R10,
127 RC_R11,
132 /* xmm0 */ RC_FLOAT | RC_XMM0,
133 /* xmm1 */ RC_FLOAT | RC_XMM1,
134 /* xmm2 */ RC_FLOAT | RC_XMM2,
135 /* xmm3 */ RC_FLOAT | RC_XMM3,
136 /* xmm4 */ RC_FLOAT | RC_XMM4,
137 /* xmm5 */ RC_FLOAT | RC_XMM5,
138 /* xmm6 an xmm7 are included so gv() can be used on them,
139 but they are not tagged with RC_FLOAT because they are
140 callee saved on Windows */
141 RC_XMM6,
142 RC_XMM7,
143 /* st0 */ RC_ST0
146 static unsigned long func_sub_sp_offset;
147 static int func_ret_sub;
149 /* XXX: make it faster ? */
150 ST_FUNC void g(int c)
152 int ind1;
153 if (nocode_wanted)
154 return;
155 ind1 = ind + 1;
156 if (ind1 > cur_text_section->data_allocated)
157 section_realloc(cur_text_section, ind1);
158 cur_text_section->data[ind] = c;
159 ind = ind1;
162 ST_FUNC void o(unsigned int c)
164 while (c) {
165 g(c);
166 c = c >> 8;
170 ST_FUNC void gen_le16(int v)
172 g(v);
173 g(v >> 8);
176 ST_FUNC void gen_le32(int c)
178 g(c);
179 g(c >> 8);
180 g(c >> 16);
181 g(c >> 24);
184 ST_FUNC void gen_le64(int64_t c)
186 g(c);
187 g(c >> 8);
188 g(c >> 16);
189 g(c >> 24);
190 g(c >> 32);
191 g(c >> 40);
192 g(c >> 48);
193 g(c >> 56);
196 static void orex(int ll, int r, int r2, int b)
198 if ((r & VT_VALMASK) >= VT_CONST)
199 r = 0;
200 if ((r2 & VT_VALMASK) >= VT_CONST)
201 r2 = 0;
202 if (ll || REX_BASE(r) || REX_BASE(r2))
203 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
204 o(b);
207 /* output a symbol and patch all calls to it */
208 ST_FUNC void gsym_addr(int t, int a)
210 while (t) {
211 unsigned char *ptr = cur_text_section->data + t;
212 uint32_t n = read32le(ptr); /* next value */
213 write32le(ptr, a < 0 ? -a : a - t - 4);
214 t = n;
218 static int is64_type(int t)
220 return ((t & VT_BTYPE) == VT_PTR ||
221 (t & VT_BTYPE) == VT_FUNC ||
222 (t & VT_BTYPE) == VT_LLONG);
225 /* instruction + 4 bytes data. Return the address of the data */
226 static int oad(int c, int s)
228 int t;
229 if (nocode_wanted)
230 return s;
231 o(c);
232 t = ind;
233 gen_le32(s);
234 return t;
237 /* generate jmp to a label */
238 #define gjmp2(instr,lbl) oad(instr,lbl)
240 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
242 if (r & VT_SYM)
243 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
244 gen_le32(c);
247 /* output constant with relocation if 'r & VT_SYM' is true */
248 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
250 if (r & VT_SYM)
251 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
252 gen_le64(c);
255 /* output constant with relocation if 'r & VT_SYM' is true */
256 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
258 if (r & VT_SYM)
259 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
260 gen_le32(c-4);
263 /* output got address with relocation */
264 static void gen_gotpcrel(int r, Sym *sym, int c)
266 #ifdef TCC_TARGET_PE
267 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
268 get_tok_str(sym->v, NULL), c, r,
269 cur_text_section->data[ind-3],
270 cur_text_section->data[ind-2],
271 cur_text_section->data[ind-1]
273 #endif
274 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
275 gen_le32(0);
276 if (c) {
277 /* we use add c, %xxx for displacement */
278 orex(1, r, 0, 0x81);
279 o(0xc0 + REG_VALUE(r));
280 gen_le32(c);
284 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
286 op_reg = REG_VALUE(op_reg) << 3;
287 if ((r & VT_VALMASK) == VT_CONST) {
288 /* constant memory reference */
289 if (!(r & VT_SYM)) {
290 /* Absolute memory reference */
291 o(0x04 | op_reg); /* [sib] | destreg */
292 oad(0x25, c); /* disp32 */
293 } else {
294 o(0x05 | op_reg); /* (%rip)+disp32 | destreg */
295 if (is_got) {
296 gen_gotpcrel(r, sym, c);
297 } else {
298 gen_addrpc32(r, sym, c);
301 } else if ((r & VT_VALMASK) == VT_LOCAL) {
302 /* currently, we use only ebp as base */
303 if (c == (char)c) {
304 /* short reference */
305 o(0x45 | op_reg);
306 g(c);
307 } else {
308 oad(0x85 | op_reg, c);
310 } else if ((r & VT_VALMASK) >= TREG_MEM) {
311 if (c) {
312 g(0x80 | op_reg | REG_VALUE(r));
313 gen_le32(c);
314 } else {
315 g(0x00 | op_reg | REG_VALUE(r));
317 } else {
318 g(0x00 | op_reg | REG_VALUE(r));
322 /* generate a modrm reference. 'op_reg' contains the additional 3
323 opcode bits */
324 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
326 gen_modrm_impl(op_reg, r, sym, c, 0);
329 /* generate a modrm reference. 'op_reg' contains the additional 3
330 opcode bits */
331 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
333 int is_got;
334 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
335 orex(1, r, op_reg, opcode);
336 gen_modrm_impl(op_reg, r, sym, c, is_got);
340 /* load 'r' from value 'sv' */
341 void load(int r, SValue *sv)
343 int v, t, ft, fc, fr;
344 SValue v1;
346 #ifdef TCC_TARGET_PE
347 SValue v2;
348 sv = pe_getimport(sv, &v2);
349 #endif
351 fr = sv->r;
352 ft = sv->type.t & ~VT_DEFSIGN;
353 fc = sv->c.i;
354 if (fc != sv->c.i && (fr & VT_SYM))
355 tcc_error("64 bit addend in load");
357 ft &= ~(VT_VOLATILE | VT_CONSTANT);
359 #ifndef TCC_TARGET_PE
360 /* we use indirect access via got */
361 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
362 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
363 /* use the result register as a temporal register */
364 int tr = r | TREG_MEM;
365 if (is_float(ft)) {
366 /* we cannot use float registers as a temporal register */
367 tr = get_reg(RC_INT) | TREG_MEM;
369 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
371 /* load from the temporal register */
372 fr = tr | VT_LVAL;
374 #endif
376 v = fr & VT_VALMASK;
377 if (fr & VT_LVAL) {
378 int b, ll;
379 if (v == VT_LLOCAL) {
380 v1.type.t = VT_PTR;
381 v1.r = VT_LOCAL | VT_LVAL;
382 v1.c.i = fc;
383 fr = r;
384 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
385 fr = get_reg(RC_INT);
386 load(fr, &v1);
388 if (fc != sv->c.i) {
389 /* If the addends doesn't fit into a 32bit signed
390 we must use a 64bit move. We've checked above
391 that this doesn't have a sym associated. */
392 v1.type.t = VT_LLONG;
393 v1.r = VT_CONST;
394 v1.c.i = sv->c.i;
395 fr = r;
396 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
397 fr = get_reg(RC_INT);
398 load(fr, &v1);
399 fc = 0;
401 ll = 0;
402 /* Like GCC we can load from small enough properly sized
403 structs and unions as well.
404 XXX maybe move to generic operand handling, but should
405 occur only with asm, so tccasm.c might also be a better place */
406 if ((ft & VT_BTYPE) == VT_STRUCT) {
407 int align;
408 switch (type_size(&sv->type, &align)) {
409 case 1: ft = VT_BYTE; break;
410 case 2: ft = VT_SHORT; break;
411 case 4: ft = VT_INT; break;
412 case 8: ft = VT_LLONG; break;
413 default:
414 tcc_error("invalid aggregate type for register load");
415 break;
418 if ((ft & VT_BTYPE) == VT_FLOAT) {
419 b = 0x6e0f66;
420 r = REG_VALUE(r); /* movd */
421 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
422 b = 0x7e0ff3; /* movq */
423 r = REG_VALUE(r);
424 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
425 b = 0xdb, r = 5; /* fldt */
426 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
427 b = 0xbe0f; /* movsbl */
428 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
429 b = 0xb60f; /* movzbl */
430 } else if ((ft & VT_TYPE) == VT_SHORT) {
431 b = 0xbf0f; /* movswl */
432 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
433 b = 0xb70f; /* movzwl */
434 } else {
435 assert(((ft & VT_BTYPE) == VT_INT)
436 || ((ft & VT_BTYPE) == VT_LLONG)
437 || ((ft & VT_BTYPE) == VT_PTR)
438 || ((ft & VT_BTYPE) == VT_FUNC)
440 ll = is64_type(ft);
441 b = 0x8b;
443 if (ll) {
444 gen_modrm64(b, r, fr, sv->sym, fc);
445 } else {
446 orex(ll, fr, r, b);
447 gen_modrm(r, fr, sv->sym, fc);
449 } else {
450 if (v == VT_CONST) {
451 if (fr & VT_SYM) {
452 #ifdef TCC_TARGET_PE
453 orex(1,0,r,0x8d);
454 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
455 gen_addrpc32(fr, sv->sym, fc);
456 #else
457 if (sv->sym->type.t & VT_STATIC) {
458 orex(1,0,r,0x8d);
459 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
460 gen_addrpc32(fr, sv->sym, fc);
461 } else {
462 orex(1,0,r,0x8b);
463 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
464 gen_gotpcrel(r, sv->sym, fc);
466 #endif
467 } else if (is64_type(ft)) {
468 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
469 gen_le64(sv->c.i);
470 } else {
471 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
472 gen_le32(fc);
474 } else if (v == VT_LOCAL) {
475 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
476 gen_modrm(r, VT_LOCAL, sv->sym, fc);
477 } else if (v == VT_CMP) {
478 if (fc & 0x100)
480 v = vtop->cmp_r;
481 fc &= ~0x100;
482 /* This was a float compare. If the parity bit is
483 set the result was unordered, meaning false for everything
484 except TOK_NE, and true for TOK_NE. */
485 orex(0, r, 0, 0xb0 + REG_VALUE(r)); /* mov $0/1,%al */
486 g(v ^ fc ^ (v == TOK_NE));
487 o(0x037a + (REX_BASE(r) << 8));
489 orex(0,r,0, 0x0f); /* setxx %br */
490 o(fc);
491 o(0xc0 + REG_VALUE(r));
492 orex(0,r,0, 0x0f);
493 o(0xc0b6 + REG_VALUE(r) * 0x900); /* movzbl %al, %eax */
494 } else if (v == VT_JMP || v == VT_JMPI) {
495 t = v & 1;
496 orex(0,r,0,0);
497 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
498 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
499 gsym(fc);
500 orex(0,r,0,0);
501 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
502 } else if (v != r) {
503 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
504 if (v == TREG_ST0) {
505 /* gen_cvt_ftof(VT_DOUBLE); */
506 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
507 /* movsd -0x10(%rsp),%xmmN */
508 o(0x100ff2);
509 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
510 o(0xf024);
511 } else {
512 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
513 if ((ft & VT_BTYPE) == VT_FLOAT) {
514 o(0x100ff3);
515 } else {
516 assert((ft & VT_BTYPE) == VT_DOUBLE);
517 o(0x100ff2);
519 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
521 } else if (r == TREG_ST0) {
522 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
523 /* gen_cvt_ftof(VT_LDOUBLE); */
524 /* movsd %xmmN,-0x10(%rsp) */
525 o(0x110ff2);
526 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
527 o(0xf024);
528 o(0xf02444dd); /* fldl -0x10(%rsp) */
529 } else {
530 orex(is64_type(ft), r, v, 0x89);
531 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
537 /* store register 'r' in lvalue 'v' */
538 void store(int r, SValue *v)
540 int fr, bt, ft, fc;
541 int op64 = 0;
542 /* store the REX prefix in this variable when PIC is enabled */
543 int pic = 0;
545 #ifdef TCC_TARGET_PE
546 SValue v2;
547 v = pe_getimport(v, &v2);
548 #endif
550 fr = v->r & VT_VALMASK;
551 ft = v->type.t;
552 fc = v->c.i;
553 if (fc != v->c.i && (fr & VT_SYM))
554 tcc_error("64 bit addend in store");
555 ft &= ~(VT_VOLATILE | VT_CONSTANT);
556 bt = ft & VT_BTYPE;
558 #ifndef TCC_TARGET_PE
559 /* we need to access the variable via got */
560 if (fr == VT_CONST && (v->r & VT_SYM)) {
561 /* mov xx(%rip), %r11 */
562 o(0x1d8b4c);
563 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
564 pic = is64_type(bt) ? 0x49 : 0x41;
566 #endif
568 /* XXX: incorrect if float reg to reg */
569 if (bt == VT_FLOAT) {
570 o(0x66);
571 o(pic);
572 o(0x7e0f); /* movd */
573 r = REG_VALUE(r);
574 } else if (bt == VT_DOUBLE) {
575 o(0x66);
576 o(pic);
577 o(0xd60f); /* movq */
578 r = REG_VALUE(r);
579 } else if (bt == VT_LDOUBLE) {
580 o(0xc0d9); /* fld %st(0) */
581 o(pic);
582 o(0xdb); /* fstpt */
583 r = 7;
584 } else {
585 if (bt == VT_SHORT)
586 o(0x66);
587 o(pic);
588 if (bt == VT_BYTE || bt == VT_BOOL)
589 orex(0, 0, r, 0x88);
590 else if (is64_type(bt))
591 op64 = 0x89;
592 else
593 orex(0, 0, r, 0x89);
595 if (pic) {
596 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
597 if (op64)
598 o(op64);
599 o(3 + (r << 3));
600 } else if (op64) {
601 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
602 gen_modrm64(op64, r, v->r, v->sym, fc);
603 } else if (fr != r) {
604 orex(1, fr, r, op64);
605 o(0xc0 + fr + r * 8); /* mov r, fr */
607 } else {
608 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
609 gen_modrm(r, v->r, v->sym, fc);
610 } else if (fr != r) {
611 o(0xc0 + fr + r * 8); /* mov r, fr */
616 /* 'is_jmp' is '1' if it is a jump */
617 static void gcall_or_jmp(int is_jmp)
619 int r;
620 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
621 ((vtop->r & VT_SYM) && (vtop->c.i-4) == (int)(vtop->c.i-4))) {
622 /* constant symbolic case -> simple relocation */
623 #ifdef TCC_TARGET_PE
624 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
625 #else
626 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
627 #endif
628 oad(0xe8 + is_jmp, 0); /* call/jmp im */
629 } else {
630 /* otherwise, indirect call */
631 r = TREG_R11;
632 load(r, vtop);
633 o(0x41); /* REX */
634 o(0xff); /* call/jmp *r */
635 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
639 #if defined(CONFIG_TCC_BCHECK)
640 static addr_t func_bound_offset;
641 static unsigned long func_bound_ind;
643 static void gen_bounds_call(int v)
645 Sym *sym = external_global_sym(v, &func_old_type);
646 oad(0xe8, 0);
647 #ifdef TCC_TARGET_PE
648 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
649 #else
650 greloca(cur_text_section, sym, ind-4, R_X86_64_PLT32, -4);
651 #endif
654 /* generate a bounded pointer addition */
655 ST_FUNC void gen_bounded_ptr_add(void)
657 vpush_global_sym(&func_old_type, TOK___bound_ptr_add);
658 vrott(3);
659 gfunc_call(2);
660 vpushi(0);
661 /* returned pointer is in rax */
662 vtop->r = TREG_RAX | VT_BOUNDED;
663 if (nocode_wanted)
664 return;
665 /* relocation offset of the bounding function call point */
666 vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
669 /* patch pointer addition in vtop so that pointer dereferencing is
670 also tested */
671 ST_FUNC void gen_bounded_ptr_deref(void)
673 addr_t func;
674 int size, align;
675 ElfW(Rela) *rel;
676 Sym *sym;
678 if (nocode_wanted)
679 return;
681 size = type_size(&vtop->type, &align);
682 switch(size) {
683 case 1: func = TOK___bound_ptr_indir1; break;
684 case 2: func = TOK___bound_ptr_indir2; break;
685 case 4: func = TOK___bound_ptr_indir4; break;
686 case 8: func = TOK___bound_ptr_indir8; break;
687 case 12: func = TOK___bound_ptr_indir12; break;
688 case 16: func = TOK___bound_ptr_indir16; break;
689 default:
690 /* may happen with struct member access */
691 return;
692 //tcc_error("unhandled size when dereferencing bounded pointer");
693 //func = 0;
694 //break;
696 sym = external_global_sym(func, &func_old_type);
697 if (!sym->c)
698 put_extern_sym(sym, NULL, 0, 0);
699 /* patch relocation */
700 /* XXX: find a better solution ? */
701 rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
702 rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
705 #ifdef TCC_TARGET_PE
706 # define TREG_FASTCALL_1 TREG_RCX
707 #else
708 # define TREG_FASTCALL_1 TREG_RDI
709 #endif
711 static void gen_bounds_prolog(void)
713 /* leave some room for bound checking code */
714 func_bound_offset = lbounds_section->data_offset;
715 func_bound_ind = ind;
716 o(0xb848 + TREG_FASTCALL_1 * 0x100); /*lbound section pointer */
717 gen_le64 (0);
718 oad(0xb8, 0); /* call to function */
721 static void gen_bounds_epilog(void)
723 addr_t saved_ind;
724 addr_t *bounds_ptr;
725 Sym *sym_data;
727 /* add end of table info */
728 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
729 *bounds_ptr = 0;
731 /* generate bound local allocation */
732 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
733 func_bound_offset, lbounds_section->data_offset);
734 saved_ind = ind;
735 ind = func_bound_ind;
736 greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
737 ind = ind + 10;
738 gen_bounds_call(TOK___bound_local_new);
739 ind = saved_ind;
741 /* generate bound check local freeing */
742 o(0x525051); /* save returned value, if any (+ scratch-space for windows) */
743 greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
744 o(0xb848 + TREG_FASTCALL_1 * 0x100); /* mov xxx, %rcx/di */
745 gen_le64 (0);
746 gen_bounds_call(TOK___bound_local_delete);
747 o(0x59585a); /* restore returned value, if any */
749 #endif
751 #ifdef TCC_TARGET_PE
753 static int func_scratch, func_alloca;
755 #define REGN 4
756 static const uint8_t arg_regs[REGN] = {
757 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
760 /* Prepare arguments in R10 and R11 rather than RCX and RDX
761 because gv() will not ever use these */
762 static int arg_prepare_reg(int idx) {
763 if (idx == 0 || idx == 1)
764 /* idx=0: r10, idx=1: r11 */
765 return idx + 10;
766 else
767 return arg_regs[idx];
770 /* Generate function call. The function address is pushed first, then
771 all the parameters in call order. This functions pops all the
772 parameters and the function address. */
774 static void gen_offs_sp(int b, int r, int d)
776 orex(1,0,r & 0x100 ? 0 : r, b);
777 if (d == (char)d) {
778 o(0x2444 | (REG_VALUE(r) << 3));
779 g(d);
780 } else {
781 o(0x2484 | (REG_VALUE(r) << 3));
782 gen_le32(d);
786 static int using_regs(int size)
788 return !(size > 8 || (size & (size - 1)));
791 /* Return the number of registers needed to return the struct, or 0 if
792 returning via struct pointer. */
793 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
795 int size, align;
796 *ret_align = 1; // Never have to re-align return values for x86-64
797 *regsize = 8;
798 size = type_size(vt, &align);
799 if (!using_regs(size))
800 return 0;
801 if (size == 8)
802 ret->t = VT_LLONG;
803 else if (size == 4)
804 ret->t = VT_INT;
805 else if (size == 2)
806 ret->t = VT_SHORT;
807 else
808 ret->t = VT_BYTE;
809 ret->ref = NULL;
810 return 1;
813 static int is_sse_float(int t) {
814 int bt;
815 bt = t & VT_BTYPE;
816 return bt == VT_DOUBLE || bt == VT_FLOAT;
819 static int gfunc_arg_size(CType *type) {
820 int align;
821 if (type->t & (VT_ARRAY|VT_BITFIELD))
822 return 8;
823 return type_size(type, &align);
826 void gfunc_call(int nb_args)
828 int size, r, args_size, i, d, bt, struct_size;
829 int arg;
831 #ifdef CONFIG_TCC_BCHECK
832 if (tcc_state->do_bounds_check)
833 gbound_args(nb_args);
834 #endif
836 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
837 arg = nb_args;
839 /* for struct arguments, we need to call memcpy and the function
840 call breaks register passing arguments we are preparing.
841 So, we process arguments which will be passed by stack first. */
842 struct_size = args_size;
843 for(i = 0; i < nb_args; i++) {
844 SValue *sv;
846 --arg;
847 sv = &vtop[-i];
848 bt = (sv->type.t & VT_BTYPE);
849 size = gfunc_arg_size(&sv->type);
851 if (using_regs(size))
852 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
854 if (bt == VT_STRUCT) {
855 /* align to stack align size */
856 size = (size + 15) & ~15;
857 /* generate structure store */
858 r = get_reg(RC_INT);
859 gen_offs_sp(0x8d, r, struct_size);
860 struct_size += size;
862 /* generate memcpy call */
863 vset(&sv->type, r | VT_LVAL, 0);
864 vpushv(sv);
865 vstore();
866 --vtop;
867 } else if (bt == VT_LDOUBLE) {
868 gv(RC_ST0);
869 gen_offs_sp(0xdb, 0x107, struct_size);
870 struct_size += 16;
874 if (func_scratch < struct_size)
875 func_scratch = struct_size;
877 arg = nb_args;
878 struct_size = args_size;
880 for(i = 0; i < nb_args; i++) {
881 --arg;
882 bt = (vtop->type.t & VT_BTYPE);
884 size = gfunc_arg_size(&vtop->type);
885 if (!using_regs(size)) {
886 /* align to stack align size */
887 size = (size + 15) & ~15;
888 if (arg >= REGN) {
889 d = get_reg(RC_INT);
890 gen_offs_sp(0x8d, d, struct_size);
891 gen_offs_sp(0x89, d, arg*8);
892 } else {
893 d = arg_prepare_reg(arg);
894 gen_offs_sp(0x8d, d, struct_size);
896 struct_size += size;
897 } else {
898 if (is_sse_float(vtop->type.t)) {
899 if (tcc_state->nosse)
900 tcc_error("SSE disabled");
901 if (arg >= REGN) {
902 gv(RC_XMM0);
903 /* movq %xmm0, j*8(%rsp) */
904 gen_offs_sp(0xd60f66, 0x100, arg*8);
905 } else {
906 /* Load directly to xmmN register */
907 gv(RC_XMM0 << arg);
908 d = arg_prepare_reg(arg);
909 /* mov %xmmN, %rxx */
910 o(0x66);
911 orex(1,d,0, 0x7e0f);
912 o(0xc0 + arg*8 + REG_VALUE(d));
914 } else {
915 if (bt == VT_STRUCT) {
916 vtop->type.ref = NULL;
917 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
918 : size > 1 ? VT_SHORT : VT_BYTE;
921 r = gv(RC_INT);
922 if (arg >= REGN) {
923 gen_offs_sp(0x89, r, arg*8);
924 } else {
925 d = arg_prepare_reg(arg);
926 orex(1,d,r,0x89); /* mov */
927 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
931 vtop--;
933 save_regs(0);
934 /* Copy R10 and R11 into RCX and RDX, respectively */
935 if (nb_args > 0) {
936 o(0xd1894c); /* mov %r10, %rcx */
937 if (nb_args > 1) {
938 o(0xda894c); /* mov %r11, %rdx */
942 gcall_or_jmp(0);
944 if ((vtop->r & VT_SYM) && vtop->sym->v == TOK_alloca) {
945 /* need to add the "func_scratch" area after alloca */
946 o(0x48); func_alloca = oad(0x05, func_alloca); /* add $NN, %rax */
947 #ifdef CONFIG_TCC_BCHECK
948 if (tcc_state->do_bounds_check)
949 gen_bounds_call(TOK___bound_alloca_nr); /* new region */
950 #endif
953 vtop--;
957 #define FUNC_PROLOG_SIZE 11
959 /* generate function prolog of type 't' */
960 void gfunc_prolog(Sym *func_sym)
962 CType *func_type = &func_sym->type;
963 int addr, reg_param_index, bt, size;
964 Sym *sym;
965 CType *type;
967 func_ret_sub = 0;
968 func_scratch = 32;
969 func_alloca = 0;
970 loc = 0;
972 addr = PTR_SIZE * 2;
973 ind += FUNC_PROLOG_SIZE;
974 func_sub_sp_offset = ind;
975 reg_param_index = 0;
977 sym = func_type->ref;
979 /* if the function returns a structure, then add an
980 implicit pointer parameter */
981 func_vt = sym->type;
982 func_var = (sym->f.func_type == FUNC_ELLIPSIS);
983 size = gfunc_arg_size(&func_vt);
984 if (!using_regs(size)) {
985 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
986 func_vc = addr;
987 reg_param_index++;
988 addr += 8;
991 /* define parameters */
992 while ((sym = sym->next) != NULL) {
993 type = &sym->type;
994 bt = type->t & VT_BTYPE;
995 size = gfunc_arg_size(type);
996 if (!using_regs(size)) {
997 if (reg_param_index < REGN) {
998 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1000 sym_push(sym->v & ~SYM_FIELD, type,
1001 VT_LLOCAL | VT_LVAL, addr);
1002 } else {
1003 if (reg_param_index < REGN) {
1004 /* save arguments passed by register */
1005 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
1006 if (tcc_state->nosse)
1007 tcc_error("SSE disabled");
1008 o(0xd60f66); /* movq */
1009 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
1010 } else {
1011 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1014 sym_push(sym->v & ~SYM_FIELD, type,
1015 VT_LOCAL | VT_LVAL, addr);
1017 addr += 8;
1018 reg_param_index++;
1021 while (reg_param_index < REGN) {
1022 if (func_type->ref->f.func_type == FUNC_ELLIPSIS) {
1023 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
1024 addr += 8;
1026 reg_param_index++;
1028 #ifdef CONFIG_TCC_BCHECK
1029 if (tcc_state->do_bounds_check)
1030 gen_bounds_prolog();
1031 #endif
1034 /* generate function epilog */
1035 void gfunc_epilog(void)
1037 int v, saved_ind;
1039 /* align local size to word & save local variables */
1040 func_scratch = (func_scratch + 15) & -16;
1041 loc = (loc & -16) - func_scratch;
1043 #ifdef CONFIG_TCC_BCHECK
1044 if (tcc_state->do_bounds_check)
1045 gen_bounds_epilog();
1046 #endif
1048 o(0xc9); /* leave */
1049 if (func_ret_sub == 0) {
1050 o(0xc3); /* ret */
1051 } else {
1052 o(0xc2); /* ret n */
1053 g(func_ret_sub);
1054 g(func_ret_sub >> 8);
1057 saved_ind = ind;
1058 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1059 v = -loc;
1061 if (v >= 4096) {
1062 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type);
1063 oad(0xb8, v); /* mov stacksize, %eax */
1064 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1065 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1066 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1067 } else {
1068 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1069 o(0xec8148); /* sub rsp, stacksize */
1070 gen_le32(v);
1073 /* add the "func_scratch" area after each alloca seen */
1074 gsym_addr(func_alloca, -func_scratch);
1076 cur_text_section->data_offset = saved_ind;
1077 pe_add_unwind_data(ind, saved_ind, v);
1078 ind = cur_text_section->data_offset;
1081 #else
1083 static void gadd_sp(int val)
1085 if (val == (char)val) {
1086 o(0xc48348);
1087 g(val);
1088 } else {
1089 oad(0xc48148, val); /* add $xxx, %rsp */
1093 typedef enum X86_64_Mode {
1094 x86_64_mode_none,
1095 x86_64_mode_memory,
1096 x86_64_mode_integer,
1097 x86_64_mode_sse,
1098 x86_64_mode_x87
1099 } X86_64_Mode;
1101 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1103 if (a == b)
1104 return a;
1105 else if (a == x86_64_mode_none)
1106 return b;
1107 else if (b == x86_64_mode_none)
1108 return a;
1109 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1110 return x86_64_mode_memory;
1111 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1112 return x86_64_mode_integer;
1113 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1114 return x86_64_mode_memory;
1115 else
1116 return x86_64_mode_sse;
1119 static X86_64_Mode classify_x86_64_inner(CType *ty)
1121 X86_64_Mode mode;
1122 Sym *f;
1124 switch (ty->t & VT_BTYPE) {
1125 case VT_VOID: return x86_64_mode_none;
1127 case VT_INT:
1128 case VT_BYTE:
1129 case VT_SHORT:
1130 case VT_LLONG:
1131 case VT_BOOL:
1132 case VT_PTR:
1133 case VT_FUNC:
1134 return x86_64_mode_integer;
1136 case VT_FLOAT:
1137 case VT_DOUBLE: return x86_64_mode_sse;
1139 case VT_LDOUBLE: return x86_64_mode_x87;
1141 case VT_STRUCT:
1142 f = ty->ref;
1144 mode = x86_64_mode_none;
1145 for (f = f->next; f; f = f->next)
1146 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1148 return mode;
1150 assert(0);
1151 return 0;
1154 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1156 X86_64_Mode mode;
1157 int size, align, ret_t = 0;
1159 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1160 *psize = 8;
1161 *palign = 8;
1162 *reg_count = 1;
1163 ret_t = ty->t;
1164 mode = x86_64_mode_integer;
1165 } else {
1166 size = type_size(ty, &align);
1167 *psize = (size + 7) & ~7;
1168 *palign = (align + 7) & ~7;
1170 if (size > 16) {
1171 mode = x86_64_mode_memory;
1172 } else {
1173 mode = classify_x86_64_inner(ty);
1174 switch (mode) {
1175 case x86_64_mode_integer:
1176 if (size > 8) {
1177 *reg_count = 2;
1178 ret_t = VT_QLONG;
1179 } else {
1180 *reg_count = 1;
1181 if (size > 4)
1182 ret_t = VT_LLONG;
1183 else if (size > 2)
1184 ret_t = VT_INT;
1185 else if (size > 1)
1186 ret_t = VT_SHORT;
1187 else
1188 ret_t = VT_BYTE;
1189 if ((ty->t & VT_BTYPE) == VT_STRUCT || (ty->t & VT_UNSIGNED))
1190 ret_t |= VT_UNSIGNED;
1192 break;
1194 case x86_64_mode_x87:
1195 *reg_count = 1;
1196 ret_t = VT_LDOUBLE;
1197 break;
1199 case x86_64_mode_sse:
1200 if (size > 8) {
1201 *reg_count = 2;
1202 ret_t = VT_QFLOAT;
1203 } else {
1204 *reg_count = 1;
1205 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1207 break;
1208 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1213 if (ret) {
1214 ret->ref = NULL;
1215 ret->t = ret_t;
1218 return mode;
1221 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1223 /* This definition must be synced with stdarg.h */
1224 enum __va_arg_type {
1225 __va_gen_reg, __va_float_reg, __va_stack
1227 int size, align, reg_count;
1228 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1229 switch (mode) {
1230 default: return __va_stack;
1231 case x86_64_mode_integer: return __va_gen_reg;
1232 case x86_64_mode_sse: return __va_float_reg;
1236 /* Return the number of registers needed to return the struct, or 0 if
1237 returning via struct pointer. */
1238 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1240 int size, align, reg_count;
1241 *ret_align = 1; // Never have to re-align return values for x86-64
1242 *regsize = 8;
1243 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1246 #define REGN 6
1247 static const uint8_t arg_regs[REGN] = {
1248 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1251 static int arg_prepare_reg(int idx) {
1252 if (idx == 2 || idx == 3)
1253 /* idx=2: r10, idx=3: r11 */
1254 return idx + 8;
1255 else
1256 return arg_regs[idx];
1259 /* Generate function call. The function address is pushed first, then
1260 all the parameters in call order. This functions pops all the
1261 parameters and the function address. */
1262 void gfunc_call(int nb_args)
1264 X86_64_Mode mode;
1265 CType type;
1266 int size, align, r, args_size, stack_adjust, i, reg_count;
1267 int nb_reg_args = 0;
1268 int nb_sse_args = 0;
1269 int sse_reg, gen_reg;
1270 char _onstack[nb_args ? nb_args : 1], *onstack = _onstack;
1272 #ifdef CONFIG_TCC_BCHECK
1273 if (tcc_state->do_bounds_check)
1274 gbound_args(nb_args);
1275 #endif
1277 /* calculate the number of integer/float register arguments, remember
1278 arguments to be passed via stack (in onstack[]), and also remember
1279 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1280 to be done in a left-to-right pass over arguments. */
1281 stack_adjust = 0;
1282 for(i = nb_args - 1; i >= 0; i--) {
1283 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1284 if (mode == x86_64_mode_sse && nb_sse_args + reg_count <= 8) {
1285 nb_sse_args += reg_count;
1286 onstack[i] = 0;
1287 } else if (mode == x86_64_mode_integer && nb_reg_args + reg_count <= REGN) {
1288 nb_reg_args += reg_count;
1289 onstack[i] = 0;
1290 } else if (mode == x86_64_mode_none) {
1291 onstack[i] = 0;
1292 } else {
1293 if (align == 16 && (stack_adjust &= 15)) {
1294 onstack[i] = 2;
1295 stack_adjust = 0;
1296 } else
1297 onstack[i] = 1;
1298 stack_adjust += size;
1302 if (nb_sse_args && tcc_state->nosse)
1303 tcc_error("SSE disabled but floating point arguments passed");
1305 /* fetch cpu flag before generating any code */
1306 if ((vtop->r & VT_VALMASK) == VT_CMP)
1307 gv(RC_INT);
1309 /* for struct arguments, we need to call memcpy and the function
1310 call breaks register passing arguments we are preparing.
1311 So, we process arguments which will be passed by stack first. */
1312 gen_reg = nb_reg_args;
1313 sse_reg = nb_sse_args;
1314 args_size = 0;
1315 stack_adjust &= 15;
1316 for (i = 0; i < nb_args;) {
1317 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1318 if (!onstack[i]) {
1319 ++i;
1320 continue;
1322 /* Possibly adjust stack to align SSE boundary. We're processing
1323 args from right to left while allocating happens left to right
1324 (stack grows down), so the adjustment needs to happen _after_
1325 an argument that requires it. */
1326 if (stack_adjust) {
1327 o(0x50); /* push %rax; aka sub $8,%rsp */
1328 args_size += 8;
1329 stack_adjust = 0;
1331 if (onstack[i] == 2)
1332 stack_adjust = 1;
1334 vrotb(i+1);
1336 switch (vtop->type.t & VT_BTYPE) {
1337 case VT_STRUCT:
1338 /* allocate the necessary size on stack */
1339 o(0x48);
1340 oad(0xec81, size); /* sub $xxx, %rsp */
1341 /* generate structure store */
1342 r = get_reg(RC_INT);
1343 orex(1, r, 0, 0x89); /* mov %rsp, r */
1344 o(0xe0 + REG_VALUE(r));
1345 vset(&vtop->type, r | VT_LVAL, 0);
1346 vswap();
1347 vstore();
1348 break;
1350 case VT_LDOUBLE:
1351 gv(RC_ST0);
1352 oad(0xec8148, size); /* sub $xxx, %rsp */
1353 o(0x7cdb); /* fstpt 0(%rsp) */
1354 g(0x24);
1355 g(0x00);
1356 break;
1358 case VT_FLOAT:
1359 case VT_DOUBLE:
1360 assert(mode == x86_64_mode_sse);
1361 r = gv(RC_FLOAT);
1362 o(0x50); /* push $rax */
1363 /* movq %xmmN, (%rsp) */
1364 o(0xd60f66);
1365 o(0x04 + REG_VALUE(r)*8);
1366 o(0x24);
1367 break;
1369 default:
1370 assert(mode == x86_64_mode_integer);
1371 /* simple type */
1372 /* XXX: implicit cast ? */
1373 r = gv(RC_INT);
1374 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1375 break;
1377 args_size += size;
1379 vpop();
1380 --nb_args;
1381 onstack++;
1384 /* XXX This should be superfluous. */
1385 save_regs(0); /* save used temporary registers */
1387 /* then, we prepare register passing arguments.
1388 Note that we cannot set RDX and RCX in this loop because gv()
1389 may break these temporary registers. Let's use R10 and R11
1390 instead of them */
1391 assert(gen_reg <= REGN);
1392 assert(sse_reg <= 8);
1393 for(i = 0; i < nb_args; i++) {
1394 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1395 /* Alter stack entry type so that gv() knows how to treat it */
1396 vtop->type = type;
1397 if (mode == x86_64_mode_sse) {
1398 if (reg_count == 2) {
1399 sse_reg -= 2;
1400 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1401 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1402 /* movaps %xmm1, %xmmN */
1403 o(0x280f);
1404 o(0xc1 + ((sse_reg+1) << 3));
1405 /* movaps %xmm0, %xmmN */
1406 o(0x280f);
1407 o(0xc0 + (sse_reg << 3));
1409 } else {
1410 assert(reg_count == 1);
1411 --sse_reg;
1412 /* Load directly to register */
1413 gv(RC_XMM0 << sse_reg);
1415 } else if (mode == x86_64_mode_integer) {
1416 /* simple type */
1417 /* XXX: implicit cast ? */
1418 int d;
1419 gen_reg -= reg_count;
1420 r = gv(RC_INT);
1421 d = arg_prepare_reg(gen_reg);
1422 orex(1,d,r,0x89); /* mov */
1423 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1424 if (reg_count == 2) {
1425 d = arg_prepare_reg(gen_reg+1);
1426 orex(1,d,vtop->r2,0x89); /* mov */
1427 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1430 vtop--;
1432 assert(gen_reg == 0);
1433 assert(sse_reg == 0);
1435 /* We shouldn't have many operands on the stack anymore, but the
1436 call address itself is still there, and it might be in %eax
1437 (or edx/ecx) currently, which the below writes would clobber.
1438 So evict all remaining operands here. */
1439 save_regs(0);
1441 /* Copy R10 and R11 into RDX and RCX, respectively */
1442 if (nb_reg_args > 2) {
1443 o(0xd2894c); /* mov %r10, %rdx */
1444 if (nb_reg_args > 3) {
1445 o(0xd9894c); /* mov %r11, %rcx */
1449 if (vtop->type.ref->f.func_type != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1450 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1451 gcall_or_jmp(0);
1452 if (args_size)
1453 gadd_sp(args_size);
1454 vtop--;
1457 #define FUNC_PROLOG_SIZE 11
1459 static void push_arg_reg(int i) {
1460 loc -= 8;
1461 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1464 /* generate function prolog of type 't' */
1465 void gfunc_prolog(Sym *func_sym)
1467 CType *func_type = &func_sym->type;
1468 X86_64_Mode mode;
1469 int i, addr, align, size, reg_count;
1470 int param_addr = 0, reg_param_index, sse_param_index;
1471 Sym *sym;
1472 CType *type;
1474 sym = func_type->ref;
1475 addr = PTR_SIZE * 2;
1476 loc = 0;
1477 ind += FUNC_PROLOG_SIZE;
1478 func_sub_sp_offset = ind;
1479 func_ret_sub = 0;
1481 if (sym->f.func_type == FUNC_ELLIPSIS) {
1482 int seen_reg_num, seen_sse_num, seen_stack_size;
1483 seen_reg_num = seen_sse_num = 0;
1484 /* frame pointer and return address */
1485 seen_stack_size = PTR_SIZE * 2;
1486 /* count the number of seen parameters */
1487 sym = func_type->ref;
1488 while ((sym = sym->next) != NULL) {
1489 type = &sym->type;
1490 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1491 switch (mode) {
1492 default:
1493 stack_arg:
1494 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1495 break;
1497 case x86_64_mode_integer:
1498 if (seen_reg_num + reg_count > REGN)
1499 goto stack_arg;
1500 seen_reg_num += reg_count;
1501 break;
1503 case x86_64_mode_sse:
1504 if (seen_sse_num + reg_count > 8)
1505 goto stack_arg;
1506 seen_sse_num += reg_count;
1507 break;
1511 loc -= 16;
1512 /* movl $0x????????, -0x10(%rbp) */
1513 o(0xf045c7);
1514 gen_le32(seen_reg_num * 8);
1515 /* movl $0x????????, -0xc(%rbp) */
1516 o(0xf445c7);
1517 gen_le32(seen_sse_num * 16 + 48);
1518 /* movl $0x????????, -0x8(%rbp) */
1519 o(0xf845c7);
1520 gen_le32(seen_stack_size);
1522 /* save all register passing arguments */
1523 for (i = 0; i < 8; i++) {
1524 loc -= 16;
1525 if (!tcc_state->nosse) {
1526 o(0xd60f66); /* movq */
1527 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1529 /* movq $0, loc+8(%rbp) */
1530 o(0x85c748);
1531 gen_le32(loc + 8);
1532 gen_le32(0);
1534 for (i = 0; i < REGN; i++) {
1535 push_arg_reg(REGN-1-i);
1539 sym = func_type->ref;
1540 reg_param_index = 0;
1541 sse_param_index = 0;
1543 /* if the function returns a structure, then add an
1544 implicit pointer parameter */
1545 func_vt = sym->type;
1546 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1547 if (mode == x86_64_mode_memory) {
1548 push_arg_reg(reg_param_index);
1549 func_vc = loc;
1550 reg_param_index++;
1552 /* define parameters */
1553 while ((sym = sym->next) != NULL) {
1554 type = &sym->type;
1555 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1556 switch (mode) {
1557 case x86_64_mode_sse:
1558 if (tcc_state->nosse)
1559 tcc_error("SSE disabled but floating point arguments used");
1560 if (sse_param_index + reg_count <= 8) {
1561 /* save arguments passed by register */
1562 loc -= reg_count * 8;
1563 param_addr = loc;
1564 for (i = 0; i < reg_count; ++i) {
1565 o(0xd60f66); /* movq */
1566 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1567 ++sse_param_index;
1569 } else {
1570 addr = (addr + align - 1) & -align;
1571 param_addr = addr;
1572 addr += size;
1574 break;
1576 case x86_64_mode_memory:
1577 case x86_64_mode_x87:
1578 addr = (addr + align - 1) & -align;
1579 param_addr = addr;
1580 addr += size;
1581 break;
1583 case x86_64_mode_integer: {
1584 if (reg_param_index + reg_count <= REGN) {
1585 /* save arguments passed by register */
1586 loc -= reg_count * 8;
1587 param_addr = loc;
1588 for (i = 0; i < reg_count; ++i) {
1589 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1590 ++reg_param_index;
1592 } else {
1593 addr = (addr + align - 1) & -align;
1594 param_addr = addr;
1595 addr += size;
1597 break;
1599 default: break; /* nothing to be done for x86_64_mode_none */
1601 sym_push(sym->v & ~SYM_FIELD, type,
1602 VT_LOCAL | VT_LVAL, param_addr);
1605 #ifdef CONFIG_TCC_BCHECK
1606 if (tcc_state->do_bounds_check)
1607 gen_bounds_prolog();
1608 #endif
1611 /* generate function epilog */
1612 void gfunc_epilog(void)
1614 int v, saved_ind;
1616 #ifdef CONFIG_TCC_BCHECK
1617 if (tcc_state->do_bounds_check)
1618 gen_bounds_epilog();
1619 #endif
1620 o(0xc9); /* leave */
1621 if (func_ret_sub == 0) {
1622 o(0xc3); /* ret */
1623 } else {
1624 o(0xc2); /* ret n */
1625 g(func_ret_sub);
1626 g(func_ret_sub >> 8);
1628 /* align local size to word & save local variables */
1629 v = (-loc + 15) & -16;
1630 saved_ind = ind;
1631 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1632 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1633 o(0xec8148); /* sub rsp, stacksize */
1634 gen_le32(v);
1635 ind = saved_ind;
1638 #endif /* not PE */
1640 ST_FUNC void gen_fill_nops(int bytes)
1642 while (bytes--)
1643 g(0x90);
1646 /* generate a jump to a label */
1647 int gjmp(int t)
1649 return gjmp2(0xe9, t);
1652 /* generate a jump to a fixed address */
1653 void gjmp_addr(int a)
1655 int r;
1656 r = a - ind - 2;
1657 if (r == (char)r) {
1658 g(0xeb);
1659 g(r);
1660 } else {
1661 oad(0xe9, a - ind - 5);
1665 ST_FUNC int gjmp_append(int n, int t)
1667 void *p;
1668 /* insert vtop->c jump list in t */
1669 if (n) {
1670 uint32_t n1 = n, n2;
1671 while ((n2 = read32le(p = cur_text_section->data + n1)))
1672 n1 = n2;
1673 write32le(p, t);
1674 t = n;
1676 return t;
1679 ST_FUNC int gjmp_cond(int op, int t)
1681 if (op & 0x100)
1683 /* This was a float compare. If the parity flag is set
1684 the result was unordered. For anything except != this
1685 means false and we don't jump (anding both conditions).
1686 For != this means true (oring both).
1687 Take care about inverting the test. We need to jump
1688 to our target if the result was unordered and test wasn't NE,
1689 otherwise if unordered we don't want to jump. */
1690 int v = vtop->cmp_r;
1691 op &= ~0x100;
1692 if (op ^ v ^ (v != TOK_NE))
1693 o(0x067a); /* jp +6 */
1694 else
1696 g(0x0f);
1697 t = gjmp2(0x8a, t); /* jp t */
1700 g(0x0f);
1701 t = gjmp2(op - 16, t);
1702 return t;
1705 /* generate an integer binary operation */
1706 void gen_opi(int op)
1708 int r, fr, opc, c;
1709 int ll, uu, cc;
1711 ll = is64_type(vtop[-1].type.t);
1712 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1713 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1715 switch(op) {
1716 case '+':
1717 case TOK_ADDC1: /* add with carry generation */
1718 opc = 0;
1719 gen_op8:
1720 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1721 /* constant case */
1722 vswap();
1723 r = gv(RC_INT);
1724 vswap();
1725 c = vtop->c.i;
1726 if (c == (char)c) {
1727 /* XXX: generate inc and dec for smaller code ? */
1728 orex(ll, r, 0, 0x83);
1729 o(0xc0 | (opc << 3) | REG_VALUE(r));
1730 g(c);
1731 } else {
1732 orex(ll, r, 0, 0x81);
1733 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1735 } else {
1736 gv2(RC_INT, RC_INT);
1737 r = vtop[-1].r;
1738 fr = vtop[0].r;
1739 orex(ll, r, fr, (opc << 3) | 0x01);
1740 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1742 vtop--;
1743 if (op >= TOK_ULT && op <= TOK_GT)
1744 vset_VT_CMP(op);
1745 break;
1746 case '-':
1747 case TOK_SUBC1: /* sub with carry generation */
1748 opc = 5;
1749 goto gen_op8;
1750 case TOK_ADDC2: /* add with carry use */
1751 opc = 2;
1752 goto gen_op8;
1753 case TOK_SUBC2: /* sub with carry use */
1754 opc = 3;
1755 goto gen_op8;
1756 case '&':
1757 opc = 4;
1758 goto gen_op8;
1759 case '^':
1760 opc = 6;
1761 goto gen_op8;
1762 case '|':
1763 opc = 1;
1764 goto gen_op8;
1765 case '*':
1766 gv2(RC_INT, RC_INT);
1767 r = vtop[-1].r;
1768 fr = vtop[0].r;
1769 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1770 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1771 vtop--;
1772 break;
1773 case TOK_SHL:
1774 opc = 4;
1775 goto gen_shift;
1776 case TOK_SHR:
1777 opc = 5;
1778 goto gen_shift;
1779 case TOK_SAR:
1780 opc = 7;
1781 gen_shift:
1782 opc = 0xc0 | (opc << 3);
1783 if (cc) {
1784 /* constant case */
1785 vswap();
1786 r = gv(RC_INT);
1787 vswap();
1788 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1789 o(opc | REG_VALUE(r));
1790 g(vtop->c.i & (ll ? 63 : 31));
1791 } else {
1792 /* we generate the shift in ecx */
1793 gv2(RC_INT, RC_RCX);
1794 r = vtop[-1].r;
1795 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1796 o(opc | REG_VALUE(r));
1798 vtop--;
1799 break;
1800 case TOK_UDIV:
1801 case TOK_UMOD:
1802 uu = 1;
1803 goto divmod;
1804 case '/':
1805 case '%':
1806 case TOK_PDIV:
1807 uu = 0;
1808 divmod:
1809 /* first operand must be in eax */
1810 /* XXX: need better constraint for second operand */
1811 gv2(RC_RAX, RC_RCX);
1812 r = vtop[-1].r;
1813 fr = vtop[0].r;
1814 vtop--;
1815 save_reg(TREG_RDX);
1816 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1817 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1818 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1819 if (op == '%' || op == TOK_UMOD)
1820 r = TREG_RDX;
1821 else
1822 r = TREG_RAX;
1823 vtop->r = r;
1824 break;
1825 default:
1826 opc = 7;
1827 goto gen_op8;
1831 void gen_opl(int op)
1833 gen_opi(op);
1836 /* generate a floating point operation 'v = t1 op t2' instruction. The
1837 two operands are guaranteed to have the same floating point type */
1838 /* XXX: need to use ST1 too */
1839 void gen_opf(int op)
1841 int a, ft, fc, swapped, r;
1842 int float_type =
1843 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1845 /* convert constants to memory references */
1846 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1847 vswap();
1848 gv(float_type);
1849 vswap();
1851 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1852 gv(float_type);
1854 /* must put at least one value in the floating point register */
1855 if ((vtop[-1].r & VT_LVAL) &&
1856 (vtop[0].r & VT_LVAL)) {
1857 vswap();
1858 gv(float_type);
1859 vswap();
1861 swapped = 0;
1862 /* swap the stack if needed so that t1 is the register and t2 is
1863 the memory reference */
1864 if (vtop[-1].r & VT_LVAL) {
1865 vswap();
1866 swapped = 1;
1868 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1869 if (op >= TOK_ULT && op <= TOK_GT) {
1870 /* load on stack second operand */
1871 load(TREG_ST0, vtop);
1872 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1873 if (op == TOK_GE || op == TOK_GT)
1874 swapped = !swapped;
1875 else if (op == TOK_EQ || op == TOK_NE)
1876 swapped = 0;
1877 if (swapped)
1878 o(0xc9d9); /* fxch %st(1) */
1879 if (op == TOK_EQ || op == TOK_NE)
1880 o(0xe9da); /* fucompp */
1881 else
1882 o(0xd9de); /* fcompp */
1883 o(0xe0df); /* fnstsw %ax */
1884 if (op == TOK_EQ) {
1885 o(0x45e480); /* and $0x45, %ah */
1886 o(0x40fC80); /* cmp $0x40, %ah */
1887 } else if (op == TOK_NE) {
1888 o(0x45e480); /* and $0x45, %ah */
1889 o(0x40f480); /* xor $0x40, %ah */
1890 op = TOK_NE;
1891 } else if (op == TOK_GE || op == TOK_LE) {
1892 o(0x05c4f6); /* test $0x05, %ah */
1893 op = TOK_EQ;
1894 } else {
1895 o(0x45c4f6); /* test $0x45, %ah */
1896 op = TOK_EQ;
1898 vtop--;
1899 vset_VT_CMP(op);
1900 } else {
1901 /* no memory reference possible for long double operations */
1902 load(TREG_ST0, vtop);
1903 swapped = !swapped;
1905 switch(op) {
1906 default:
1907 case '+':
1908 a = 0;
1909 break;
1910 case '-':
1911 a = 4;
1912 if (swapped)
1913 a++;
1914 break;
1915 case '*':
1916 a = 1;
1917 break;
1918 case '/':
1919 a = 6;
1920 if (swapped)
1921 a++;
1922 break;
1924 ft = vtop->type.t;
1925 fc = vtop->c.i;
1926 o(0xde); /* fxxxp %st, %st(1) */
1927 o(0xc1 + (a << 3));
1928 vtop--;
1930 } else {
1931 if (op >= TOK_ULT && op <= TOK_GT) {
1932 /* if saved lvalue, then we must reload it */
1933 r = vtop->r;
1934 fc = vtop->c.i;
1935 if ((r & VT_VALMASK) == VT_LLOCAL) {
1936 SValue v1;
1937 r = get_reg(RC_INT);
1938 v1.type.t = VT_PTR;
1939 v1.r = VT_LOCAL | VT_LVAL;
1940 v1.c.i = fc;
1941 load(r, &v1);
1942 fc = 0;
1943 vtop->r = r = r | VT_LVAL;
1946 if (op == TOK_EQ || op == TOK_NE) {
1947 swapped = 0;
1948 } else {
1949 if (op == TOK_LE || op == TOK_LT)
1950 swapped = !swapped;
1951 if (op == TOK_LE || op == TOK_GE) {
1952 op = 0x93; /* setae */
1953 } else {
1954 op = 0x97; /* seta */
1958 if (swapped) {
1959 gv(RC_FLOAT);
1960 vswap();
1962 assert(!(vtop[-1].r & VT_LVAL));
1964 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1965 o(0x66);
1966 if (op == TOK_EQ || op == TOK_NE)
1967 o(0x2e0f); /* ucomisd */
1968 else
1969 o(0x2f0f); /* comisd */
1971 if (vtop->r & VT_LVAL) {
1972 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1973 } else {
1974 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1977 vtop--;
1978 vset_VT_CMP(op | 0x100);
1979 vtop->cmp_r = op;
1980 } else {
1981 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1982 switch(op) {
1983 default:
1984 case '+':
1985 a = 0;
1986 break;
1987 case '-':
1988 a = 4;
1989 break;
1990 case '*':
1991 a = 1;
1992 break;
1993 case '/':
1994 a = 6;
1995 break;
1997 ft = vtop->type.t;
1998 fc = vtop->c.i;
1999 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2001 r = vtop->r;
2002 /* if saved lvalue, then we must reload it */
2003 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2004 SValue v1;
2005 r = get_reg(RC_INT);
2006 v1.type.t = VT_PTR;
2007 v1.r = VT_LOCAL | VT_LVAL;
2008 v1.c.i = fc;
2009 load(r, &v1);
2010 fc = 0;
2011 vtop->r = r = r | VT_LVAL;
2014 assert(!(vtop[-1].r & VT_LVAL));
2015 if (swapped) {
2016 assert(vtop->r & VT_LVAL);
2017 gv(RC_FLOAT);
2018 vswap();
2021 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2022 o(0xf2);
2023 } else {
2024 o(0xf3);
2026 o(0x0f);
2027 o(0x58 + a);
2029 if (vtop->r & VT_LVAL) {
2030 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2031 } else {
2032 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2035 vtop--;
2040 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2041 and 'long long' cases. */
2042 void gen_cvt_itof(int t)
2044 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2045 save_reg(TREG_ST0);
2046 gv(RC_INT);
2047 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2048 /* signed long long to float/double/long double (unsigned case
2049 is handled generically) */
2050 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2051 o(0x242cdf); /* fildll (%rsp) */
2052 o(0x08c48348); /* add $8, %rsp */
2053 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2054 (VT_INT | VT_UNSIGNED)) {
2055 /* unsigned int to float/double/long double */
2056 o(0x6a); /* push $0 */
2057 g(0x00);
2058 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2059 o(0x242cdf); /* fildll (%rsp) */
2060 o(0x10c48348); /* add $16, %rsp */
2061 } else {
2062 /* int to float/double/long double */
2063 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2064 o(0x2404db); /* fildl (%rsp) */
2065 o(0x08c48348); /* add $8, %rsp */
2067 vtop->r = TREG_ST0;
2068 } else {
2069 int r = get_reg(RC_FLOAT);
2070 gv(RC_INT);
2071 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2072 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2073 (VT_INT | VT_UNSIGNED) ||
2074 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2075 o(0x48); /* REX */
2077 o(0x2a0f);
2078 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2079 vtop->r = r;
2083 /* convert from one floating point type to another */
2084 void gen_cvt_ftof(int t)
2086 int ft, bt, tbt;
2088 ft = vtop->type.t;
2089 bt = ft & VT_BTYPE;
2090 tbt = t & VT_BTYPE;
2092 if (bt == VT_FLOAT) {
2093 gv(RC_FLOAT);
2094 if (tbt == VT_DOUBLE) {
2095 o(0x140f); /* unpcklps */
2096 o(0xc0 + REG_VALUE(vtop->r)*9);
2097 o(0x5a0f); /* cvtps2pd */
2098 o(0xc0 + REG_VALUE(vtop->r)*9);
2099 } else if (tbt == VT_LDOUBLE) {
2100 save_reg(RC_ST0);
2101 /* movss %xmm0,-0x10(%rsp) */
2102 o(0x110ff3);
2103 o(0x44 + REG_VALUE(vtop->r)*8);
2104 o(0xf024);
2105 o(0xf02444d9); /* flds -0x10(%rsp) */
2106 vtop->r = TREG_ST0;
2108 } else if (bt == VT_DOUBLE) {
2109 gv(RC_FLOAT);
2110 if (tbt == VT_FLOAT) {
2111 o(0x140f66); /* unpcklpd */
2112 o(0xc0 + REG_VALUE(vtop->r)*9);
2113 o(0x5a0f66); /* cvtpd2ps */
2114 o(0xc0 + REG_VALUE(vtop->r)*9);
2115 } else if (tbt == VT_LDOUBLE) {
2116 save_reg(RC_ST0);
2117 /* movsd %xmm0,-0x10(%rsp) */
2118 o(0x110ff2);
2119 o(0x44 + REG_VALUE(vtop->r)*8);
2120 o(0xf024);
2121 o(0xf02444dd); /* fldl -0x10(%rsp) */
2122 vtop->r = TREG_ST0;
2124 } else {
2125 int r;
2126 gv(RC_ST0);
2127 r = get_reg(RC_FLOAT);
2128 if (tbt == VT_DOUBLE) {
2129 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2130 /* movsd -0x10(%rsp),%xmm0 */
2131 o(0x100ff2);
2132 o(0x44 + REG_VALUE(r)*8);
2133 o(0xf024);
2134 vtop->r = r;
2135 } else if (tbt == VT_FLOAT) {
2136 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2137 /* movss -0x10(%rsp),%xmm0 */
2138 o(0x100ff3);
2139 o(0x44 + REG_VALUE(r)*8);
2140 o(0xf024);
2141 vtop->r = r;
2146 /* convert fp to int 't' type */
2147 void gen_cvt_ftoi(int t)
2149 int ft, bt, size, r;
2150 ft = vtop->type.t;
2151 bt = ft & VT_BTYPE;
2152 if (bt == VT_LDOUBLE) {
2153 gen_cvt_ftof(VT_DOUBLE);
2154 bt = VT_DOUBLE;
2157 gv(RC_FLOAT);
2158 if (t != VT_INT)
2159 size = 8;
2160 else
2161 size = 4;
2163 r = get_reg(RC_INT);
2164 if (bt == VT_FLOAT) {
2165 o(0xf3);
2166 } else if (bt == VT_DOUBLE) {
2167 o(0xf2);
2168 } else {
2169 assert(0);
2171 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2172 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2173 vtop->r = r;
2176 // Generate sign extension from 32 to 64 bits:
2177 ST_FUNC void gen_cvt_sxtw(void)
2179 int r = gv(RC_INT);
2180 /* x86_64 specific: movslq */
2181 o(0x6348);
2182 o(0xc0 + (REG_VALUE(r) << 3) + REG_VALUE(r));
2185 /* char/short to int conversion */
2186 ST_FUNC void gen_cvt_csti(int t)
2188 int r, sz, xl, ll;
2189 r = gv(RC_INT);
2190 sz = !(t & VT_UNSIGNED);
2191 xl = (t & VT_BTYPE) == VT_SHORT;
2192 ll = (vtop->type.t & VT_BTYPE) == VT_LLONG;
2193 orex(ll, r, 0, 0xc0b60f /* mov[sz] %a[xl], %eax */
2194 | (sz << 3 | xl) << 8
2195 | (REG_VALUE(r) << 3 | REG_VALUE(r)) << 16
2199 /* computed goto support */
2200 void ggoto(void)
2202 gcall_or_jmp(1);
2203 vtop--;
2206 /* Save the stack pointer onto the stack and return the location of its address */
2207 ST_FUNC void gen_vla_sp_save(int addr) {
2208 /* mov %rsp,addr(%rbp)*/
2209 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2212 /* Restore the SP from a location on the stack */
2213 ST_FUNC void gen_vla_sp_restore(int addr) {
2214 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2217 #ifdef TCC_TARGET_PE
2218 /* Save result of gen_vla_alloc onto the stack */
2219 ST_FUNC void gen_vla_result(int addr) {
2220 /* mov %rax,addr(%rbp)*/
2221 gen_modrm64(0x89, TREG_RAX, VT_LOCAL, NULL, addr);
2223 #endif
2225 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2226 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2227 int use_call = 0;
2229 #if defined(CONFIG_TCC_BCHECK)
2230 use_call = tcc_state->do_bounds_check;
2231 #endif
2232 #ifdef TCC_TARGET_PE /* alloca does more than just adjust %rsp on Windows */
2233 use_call = 1;
2234 #endif
2235 if (use_call)
2237 vpush_global_sym(&func_old_type, TOK_alloca);
2238 vswap(); /* Move alloca ref past allocation size */
2239 gfunc_call(1);
2241 else {
2242 int r;
2243 r = gv(RC_INT); /* allocation size */
2244 /* sub r,%rsp */
2245 o(0x2b48);
2246 o(0xe0 | REG_VALUE(r));
2247 /* We align to 16 bytes rather than align */
2248 /* and ~15, %rsp */
2249 o(0xf0e48348);
2250 vpop();
2255 /* end of x86-64 code generator */
2256 /*************************************************************/
2257 #endif /* ! TARGET_DEFS_ONLY */
2258 /******************************************************/