tccelf: use rodata_section, use more rodata in tcc itself
[tinycc.git] / x86_64-gen.c
blob95eda80fef1d639b835b4fa6d8e5985d8dd67456
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
35 #define RC_RAX 0x0004
36 #define RC_RCX 0x0008
37 #define RC_RDX 0x0010
38 #define RC_ST0 0x0080 /* only for long double */
39 #define RC_R8 0x0100
40 #define RC_R9 0x0200
41 #define RC_R10 0x0400
42 #define RC_R11 0x0800
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_IRE2 RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_FRE2 RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
57 enum {
58 TREG_RAX = 0,
59 TREG_RCX = 1,
60 TREG_RDX = 2,
61 TREG_RSP = 4,
62 TREG_RSI = 6,
63 TREG_RDI = 7,
65 TREG_R8 = 8,
66 TREG_R9 = 9,
67 TREG_R10 = 10,
68 TREG_R11 = 11,
70 TREG_XMM0 = 16,
71 TREG_XMM1 = 17,
72 TREG_XMM2 = 18,
73 TREG_XMM3 = 19,
74 TREG_XMM4 = 20,
75 TREG_XMM5 = 21,
76 TREG_XMM6 = 22,
77 TREG_XMM7 = 23,
79 TREG_ST0 = 24,
81 TREG_MEM = 0x20
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_IRE2 TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_FRE2 TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
97 #define PTR_SIZE 8
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
103 #define MAX_ALIGN 16
105 /* define if return values need to be extended explicitely
106 at caller side (for interfacing with non-TCC compilers) */
107 #define PROMOTE_RET
108 /******************************************************/
109 #else /* ! TARGET_DEFS_ONLY */
110 /******************************************************/
111 #define USING_GLOBALS
112 #include "tcc.h"
113 #include <assert.h>
115 ST_DATA const char * const target_machine_defs =
116 "__x86_64__\0"
117 "__amd64__\0"
120 ST_DATA const int reg_classes[NB_REGS] = {
121 /* eax */ RC_INT | RC_RAX,
122 /* ecx */ RC_INT | RC_RCX,
123 /* edx */ RC_INT | RC_RDX,
129 RC_R8,
130 RC_R9,
131 RC_R10,
132 RC_R11,
137 /* xmm0 */ RC_FLOAT | RC_XMM0,
138 /* xmm1 */ RC_FLOAT | RC_XMM1,
139 /* xmm2 */ RC_FLOAT | RC_XMM2,
140 /* xmm3 */ RC_FLOAT | RC_XMM3,
141 /* xmm4 */ RC_FLOAT | RC_XMM4,
142 /* xmm5 */ RC_FLOAT | RC_XMM5,
143 /* xmm6 an xmm7 are included so gv() can be used on them,
144 but they are not tagged with RC_FLOAT because they are
145 callee saved on Windows */
146 RC_XMM6,
147 RC_XMM7,
148 /* st0 */ RC_ST0
151 static unsigned long func_sub_sp_offset;
152 static int func_ret_sub;
154 #if defined(CONFIG_TCC_BCHECK)
155 static addr_t func_bound_offset;
156 static unsigned long func_bound_ind;
157 ST_DATA int func_bound_add_epilog;
158 #endif
160 #ifdef TCC_TARGET_PE
161 static int func_scratch, func_alloca;
162 #endif
164 /* XXX: make it faster ? */
165 ST_FUNC void g(int c)
167 int ind1;
168 if (nocode_wanted)
169 return;
170 ind1 = ind + 1;
171 if (ind1 > cur_text_section->data_allocated)
172 section_realloc(cur_text_section, ind1);
173 cur_text_section->data[ind] = c;
174 ind = ind1;
177 ST_FUNC void o(unsigned int c)
179 while (c) {
180 g(c);
181 c = c >> 8;
185 ST_FUNC void gen_le16(int v)
187 g(v);
188 g(v >> 8);
191 ST_FUNC void gen_le32(int c)
193 g(c);
194 g(c >> 8);
195 g(c >> 16);
196 g(c >> 24);
199 ST_FUNC void gen_le64(int64_t c)
201 g(c);
202 g(c >> 8);
203 g(c >> 16);
204 g(c >> 24);
205 g(c >> 32);
206 g(c >> 40);
207 g(c >> 48);
208 g(c >> 56);
211 static void orex(int ll, int r, int r2, int b)
213 if ((r & VT_VALMASK) >= VT_CONST)
214 r = 0;
215 if ((r2 & VT_VALMASK) >= VT_CONST)
216 r2 = 0;
217 if (ll || REX_BASE(r) || REX_BASE(r2))
218 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
219 o(b);
222 /* output a symbol and patch all calls to it */
223 ST_FUNC void gsym_addr(int t, int a)
225 while (t) {
226 unsigned char *ptr = cur_text_section->data + t;
227 uint32_t n = read32le(ptr); /* next value */
228 write32le(ptr, a < 0 ? -a : a - t - 4);
229 t = n;
233 static int is64_type(int t)
235 return ((t & VT_BTYPE) == VT_PTR ||
236 (t & VT_BTYPE) == VT_FUNC ||
237 (t & VT_BTYPE) == VT_LLONG);
240 /* instruction + 4 bytes data. Return the address of the data */
241 static int oad(int c, int s)
243 int t;
244 if (nocode_wanted)
245 return s;
246 o(c);
247 t = ind;
248 gen_le32(s);
249 return t;
252 /* generate jmp to a label */
253 #define gjmp2(instr,lbl) oad(instr,lbl)
255 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
257 if (r & VT_SYM)
258 greloca(cur_text_section, sym, ind, R_X86_64_32S, c), c=0;
259 gen_le32(c);
262 /* output constant with relocation if 'r & VT_SYM' is true */
263 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
265 if (r & VT_SYM)
266 greloca(cur_text_section, sym, ind, R_X86_64_64, c), c=0;
267 gen_le64(c);
270 /* output constant with relocation if 'r & VT_SYM' is true */
271 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
273 if (r & VT_SYM)
274 greloca(cur_text_section, sym, ind, R_X86_64_PC32, c-4), c=4;
275 gen_le32(c-4);
278 /* output got address with relocation */
279 static void gen_gotpcrel(int r, Sym *sym, int c)
281 #ifdef TCC_TARGET_PE
282 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
283 get_tok_str(sym->v, NULL), c, r,
284 cur_text_section->data[ind-3],
285 cur_text_section->data[ind-2],
286 cur_text_section->data[ind-1]
288 #endif
289 greloca(cur_text_section, sym, ind, R_X86_64_GOTPCREL, -4);
290 gen_le32(0);
291 if (c) {
292 /* we use add c, %xxx for displacement */
293 orex(1, r, 0, 0x81);
294 o(0xc0 + REG_VALUE(r));
295 gen_le32(c);
299 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
301 op_reg = REG_VALUE(op_reg) << 3;
302 if ((r & VT_VALMASK) == VT_CONST) {
303 /* constant memory reference */
304 if (!(r & VT_SYM)) {
305 /* Absolute memory reference */
306 o(0x04 | op_reg); /* [sib] | destreg */
307 oad(0x25, c); /* disp32 */
308 } else {
309 o(0x05 | op_reg); /* (%rip)+disp32 | destreg */
310 if (is_got) {
311 gen_gotpcrel(r, sym, c);
312 } else {
313 gen_addrpc32(r, sym, c);
316 } else if ((r & VT_VALMASK) == VT_LOCAL) {
317 /* currently, we use only ebp as base */
318 if (c == (char)c) {
319 /* short reference */
320 o(0x45 | op_reg);
321 g(c);
322 } else {
323 oad(0x85 | op_reg, c);
325 } else if ((r & VT_VALMASK) >= TREG_MEM) {
326 if (c) {
327 g(0x80 | op_reg | REG_VALUE(r));
328 gen_le32(c);
329 } else {
330 g(0x00 | op_reg | REG_VALUE(r));
332 } else {
333 g(0x00 | op_reg | REG_VALUE(r));
337 /* generate a modrm reference. 'op_reg' contains the additional 3
338 opcode bits */
339 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
341 gen_modrm_impl(op_reg, r, sym, c, 0);
344 /* generate a modrm reference. 'op_reg' contains the additional 3
345 opcode bits */
346 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
348 int is_got;
349 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
350 orex(1, r, op_reg, opcode);
351 gen_modrm_impl(op_reg, r, sym, c, is_got);
355 /* load 'r' from value 'sv' */
356 void load(int r, SValue *sv)
358 int v, t, ft, fc, fr;
359 SValue v1;
361 #ifdef TCC_TARGET_PE
362 SValue v2;
363 sv = pe_getimport(sv, &v2);
364 #endif
366 fr = sv->r;
367 ft = sv->type.t & ~VT_DEFSIGN;
368 fc = sv->c.i;
369 if (fc != sv->c.i && (fr & VT_SYM))
370 tcc_error("64 bit addend in load");
372 ft &= ~(VT_VOLATILE | VT_CONSTANT);
374 #ifndef TCC_TARGET_PE
375 /* we use indirect access via got */
376 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
377 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
378 /* use the result register as a temporal register */
379 int tr = r | TREG_MEM;
380 if (is_float(ft)) {
381 /* we cannot use float registers as a temporal register */
382 tr = get_reg(RC_INT) | TREG_MEM;
384 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
386 /* load from the temporal register */
387 fr = tr | VT_LVAL;
389 #endif
391 v = fr & VT_VALMASK;
392 if (fr & VT_LVAL) {
393 int b, ll;
394 if (v == VT_LLOCAL) {
395 v1.type.t = VT_PTR;
396 v1.r = VT_LOCAL | VT_LVAL;
397 v1.c.i = fc;
398 fr = r;
399 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
400 fr = get_reg(RC_INT);
401 load(fr, &v1);
403 if (fc != sv->c.i) {
404 /* If the addends doesn't fit into a 32bit signed
405 we must use a 64bit move. We've checked above
406 that this doesn't have a sym associated. */
407 v1.type.t = VT_LLONG;
408 v1.r = VT_CONST;
409 v1.c.i = sv->c.i;
410 fr = r;
411 if (!(reg_classes[fr] & (RC_INT|RC_R11)))
412 fr = get_reg(RC_INT);
413 load(fr, &v1);
414 fc = 0;
416 ll = 0;
417 /* Like GCC we can load from small enough properly sized
418 structs and unions as well.
419 XXX maybe move to generic operand handling, but should
420 occur only with asm, so tccasm.c might also be a better place */
421 if ((ft & VT_BTYPE) == VT_STRUCT) {
422 int align;
423 switch (type_size(&sv->type, &align)) {
424 case 1: ft = VT_BYTE; break;
425 case 2: ft = VT_SHORT; break;
426 case 4: ft = VT_INT; break;
427 case 8: ft = VT_LLONG; break;
428 default:
429 tcc_error("invalid aggregate type for register load");
430 break;
433 if ((ft & VT_BTYPE) == VT_FLOAT) {
434 b = 0x6e0f66;
435 r = REG_VALUE(r); /* movd */
436 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
437 b = 0x7e0ff3; /* movq */
438 r = REG_VALUE(r);
439 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
440 b = 0xdb, r = 5; /* fldt */
441 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
442 b = 0xbe0f; /* movsbl */
443 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
444 b = 0xb60f; /* movzbl */
445 } else if ((ft & VT_TYPE) == VT_SHORT) {
446 b = 0xbf0f; /* movswl */
447 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
448 b = 0xb70f; /* movzwl */
449 } else if ((ft & VT_TYPE) == (VT_VOID)) {
450 /* Can happen with zero size structs */
451 return;
452 } else {
453 assert(((ft & VT_BTYPE) == VT_INT)
454 || ((ft & VT_BTYPE) == VT_LLONG)
455 || ((ft & VT_BTYPE) == VT_PTR)
456 || ((ft & VT_BTYPE) == VT_FUNC)
458 ll = is64_type(ft);
459 b = 0x8b;
461 if (ll) {
462 gen_modrm64(b, r, fr, sv->sym, fc);
463 } else {
464 orex(ll, fr, r, b);
465 gen_modrm(r, fr, sv->sym, fc);
467 } else {
468 if (v == VT_CONST) {
469 if (fr & VT_SYM) {
470 #ifdef TCC_TARGET_PE
471 orex(1,0,r,0x8d);
472 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
473 gen_addrpc32(fr, sv->sym, fc);
474 #else
475 if (sv->sym->type.t & VT_STATIC) {
476 orex(1,0,r,0x8d);
477 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
478 gen_addrpc32(fr, sv->sym, fc);
479 } else {
480 orex(1,0,r,0x8b);
481 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
482 gen_gotpcrel(r, sv->sym, fc);
484 #endif
485 } else if (is64_type(ft)) {
486 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
487 gen_le64(sv->c.i);
488 } else {
489 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
490 gen_le32(fc);
492 } else if (v == VT_LOCAL) {
493 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
494 gen_modrm(r, VT_LOCAL, sv->sym, fc);
495 } else if (v == VT_CMP) {
496 if (fc & 0x100)
498 v = vtop->cmp_r;
499 fc &= ~0x100;
500 /* This was a float compare. If the parity bit is
501 set the result was unordered, meaning false for everything
502 except TOK_NE, and true for TOK_NE. */
503 orex(0, r, 0, 0xb0 + REG_VALUE(r)); /* mov $0/1,%al */
504 g(v ^ fc ^ (v == TOK_NE));
505 o(0x037a + (REX_BASE(r) << 8));
507 orex(0,r,0, 0x0f); /* setxx %br */
508 o(fc);
509 o(0xc0 + REG_VALUE(r));
510 orex(0,r,0, 0x0f);
511 o(0xc0b6 + REG_VALUE(r) * 0x900); /* movzbl %al, %eax */
512 } else if (v == VT_JMP || v == VT_JMPI) {
513 t = v & 1;
514 orex(0,r,0,0);
515 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
516 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
517 gsym(fc);
518 orex(0,r,0,0);
519 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
520 } else if (v != r) {
521 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
522 if (v == TREG_ST0) {
523 /* gen_cvt_ftof(VT_DOUBLE); */
524 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
525 /* movsd -0x10(%rsp),%xmmN */
526 o(0x100ff2);
527 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
528 o(0xf024);
529 } else {
530 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
531 if ((ft & VT_BTYPE) == VT_FLOAT) {
532 o(0x100ff3);
533 } else {
534 assert((ft & VT_BTYPE) == VT_DOUBLE);
535 o(0x100ff2);
537 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
539 } else if (r == TREG_ST0) {
540 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
541 /* gen_cvt_ftof(VT_LDOUBLE); */
542 /* movsd %xmmN,-0x10(%rsp) */
543 o(0x110ff2);
544 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
545 o(0xf024);
546 o(0xf02444dd); /* fldl -0x10(%rsp) */
547 } else {
548 orex(is64_type(ft), r, v, 0x89);
549 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
555 /* store register 'r' in lvalue 'v' */
556 void store(int r, SValue *v)
558 int fr, bt, ft, fc;
559 int op64 = 0;
560 /* store the REX prefix in this variable when PIC is enabled */
561 int pic = 0;
563 #ifdef TCC_TARGET_PE
564 SValue v2;
565 v = pe_getimport(v, &v2);
566 #endif
568 fr = v->r & VT_VALMASK;
569 ft = v->type.t;
570 fc = v->c.i;
571 if (fc != v->c.i && (fr & VT_SYM))
572 tcc_error("64 bit addend in store");
573 ft &= ~(VT_VOLATILE | VT_CONSTANT);
574 bt = ft & VT_BTYPE;
576 #ifndef TCC_TARGET_PE
577 /* we need to access the variable via got */
578 if (fr == VT_CONST
579 && (v->r & VT_SYM)
580 && !(v->sym->type.t & VT_STATIC)) {
581 /* mov xx(%rip), %r11 */
582 o(0x1d8b4c);
583 gen_gotpcrel(TREG_R11, v->sym, v->c.i);
584 pic = is64_type(bt) ? 0x49 : 0x41;
586 #endif
588 /* XXX: incorrect if float reg to reg */
589 if (bt == VT_FLOAT) {
590 o(0x66);
591 o(pic);
592 o(0x7e0f); /* movd */
593 r = REG_VALUE(r);
594 } else if (bt == VT_DOUBLE) {
595 o(0x66);
596 o(pic);
597 o(0xd60f); /* movq */
598 r = REG_VALUE(r);
599 } else if (bt == VT_LDOUBLE) {
600 o(0xc0d9); /* fld %st(0) */
601 o(pic);
602 o(0xdb); /* fstpt */
603 r = 7;
604 } else {
605 if (bt == VT_SHORT)
606 o(0x66);
607 o(pic);
608 if (bt == VT_BYTE || bt == VT_BOOL)
609 orex(0, 0, r, 0x88);
610 else if (is64_type(bt))
611 op64 = 0x89;
612 else
613 orex(0, 0, r, 0x89);
615 if (pic) {
616 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
617 if (op64)
618 o(op64);
619 o(3 + (r << 3));
620 } else if (op64) {
621 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
622 gen_modrm64(op64, r, v->r, v->sym, fc);
623 } else if (fr != r) {
624 orex(1, fr, r, op64);
625 o(0xc0 + fr + r * 8); /* mov r, fr */
627 } else {
628 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
629 gen_modrm(r, v->r, v->sym, fc);
630 } else if (fr != r) {
631 o(0xc0 + fr + r * 8); /* mov r, fr */
636 /* 'is_jmp' is '1' if it is a jump */
637 static void gcall_or_jmp(int is_jmp)
639 int r;
640 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
641 ((vtop->r & VT_SYM) && (vtop->c.i-4) == (int)(vtop->c.i-4))) {
642 /* constant symbolic case -> simple relocation */
643 #ifdef TCC_TARGET_PE
644 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PC32, (int)(vtop->c.i-4));
645 #else
646 greloca(cur_text_section, vtop->sym, ind + 1, R_X86_64_PLT32, (int)(vtop->c.i-4));
647 #endif
648 oad(0xe8 + is_jmp, 0); /* call/jmp im */
649 } else {
650 /* otherwise, indirect call */
651 r = TREG_R11;
652 load(r, vtop);
653 o(0x41); /* REX */
654 o(0xff); /* call/jmp *r */
655 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
659 #if defined(CONFIG_TCC_BCHECK)
661 static void gen_bounds_call(int v)
663 Sym *sym = external_helper_sym(v);
664 oad(0xe8, 0);
665 #ifdef TCC_TARGET_PE
666 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
667 #else
668 greloca(cur_text_section, sym, ind-4, R_X86_64_PLT32, -4);
669 #endif
672 #ifdef TCC_TARGET_PE
673 # define TREG_FASTCALL_1 TREG_RCX
674 #else
675 # define TREG_FASTCALL_1 TREG_RDI
676 #endif
678 static void gen_bounds_prolog(void)
680 /* leave some room for bound checking code */
681 func_bound_offset = lbounds_section->data_offset;
682 func_bound_ind = ind;
683 func_bound_add_epilog = 0;
684 o(0x0d8d48 + ((TREG_FASTCALL_1 == TREG_RDI) * 0x300000)); /*lbound section pointer */
685 gen_le32 (0);
686 oad(0xb8, 0); /* call to function */
689 static void gen_bounds_epilog(void)
691 addr_t saved_ind;
692 addr_t *bounds_ptr;
693 Sym *sym_data;
694 int offset_modified = func_bound_offset != lbounds_section->data_offset;
696 if (!offset_modified && !func_bound_add_epilog)
697 return;
699 /* add end of table info */
700 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
701 *bounds_ptr = 0;
703 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
704 func_bound_offset, lbounds_section->data_offset);
706 /* generate bound local allocation */
707 if (offset_modified) {
708 saved_ind = ind;
709 ind = func_bound_ind;
710 greloca(cur_text_section, sym_data, ind + 3, R_X86_64_PC32, -4);
711 ind = ind + 7;
712 gen_bounds_call(TOK___bound_local_new);
713 ind = saved_ind;
716 /* generate bound check local freeing */
717 o(0x5250); /* save returned value, if any */
718 greloca(cur_text_section, sym_data, ind + 3, R_X86_64_PC32, -4);
719 o(0x0d8d48 + ((TREG_FASTCALL_1 == TREG_RDI) * 0x300000)); /* lea xxx(%rip), %rcx/rdi */
720 gen_le32 (0);
721 gen_bounds_call(TOK___bound_local_delete);
722 o(0x585a); /* restore returned value, if any */
724 #endif
726 #ifdef TCC_TARGET_PE
728 #define REGN 4
729 static const uint8_t arg_regs[REGN] = {
730 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
733 /* Prepare arguments in R10 and R11 rather than RCX and RDX
734 because gv() will not ever use these */
735 static int arg_prepare_reg(int idx) {
736 if (idx == 0 || idx == 1)
737 /* idx=0: r10, idx=1: r11 */
738 return idx + 10;
739 else
740 return idx >= 0 && idx < REGN ? arg_regs[idx] : 0;
743 /* Generate function call. The function address is pushed first, then
744 all the parameters in call order. This functions pops all the
745 parameters and the function address. */
747 static void gen_offs_sp(int b, int r, int d)
749 orex(1,0,r & 0x100 ? 0 : r, b);
750 if (d == (char)d) {
751 o(0x2444 | (REG_VALUE(r) << 3));
752 g(d);
753 } else {
754 o(0x2484 | (REG_VALUE(r) << 3));
755 gen_le32(d);
759 static int using_regs(int size)
761 return !(size > 8 || (size & (size - 1)));
764 /* Return the number of registers needed to return the struct, or 0 if
765 returning via struct pointer. */
766 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
768 int size, align;
769 *ret_align = 1; // Never have to re-align return values for x86-64
770 *regsize = 8;
771 size = type_size(vt, &align);
772 if (!using_regs(size))
773 return 0;
774 if (size == 8)
775 ret->t = VT_LLONG;
776 else if (size == 4)
777 ret->t = VT_INT;
778 else if (size == 2)
779 ret->t = VT_SHORT;
780 else
781 ret->t = VT_BYTE;
782 ret->ref = NULL;
783 return 1;
786 static int is_sse_float(int t) {
787 int bt;
788 bt = t & VT_BTYPE;
789 return bt == VT_DOUBLE || bt == VT_FLOAT;
792 static int gfunc_arg_size(CType *type) {
793 int align;
794 if (type->t & (VT_ARRAY|VT_BITFIELD))
795 return 8;
796 return type_size(type, &align);
799 void gfunc_call(int nb_args)
801 int size, r, args_size, i, d, bt, struct_size;
802 int arg;
804 #ifdef CONFIG_TCC_BCHECK
805 if (tcc_state->do_bounds_check)
806 gbound_args(nb_args);
807 #endif
809 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
810 arg = nb_args;
812 /* for struct arguments, we need to call memcpy and the function
813 call breaks register passing arguments we are preparing.
814 So, we process arguments which will be passed by stack first. */
815 struct_size = args_size;
816 for(i = 0; i < nb_args; i++) {
817 SValue *sv;
819 --arg;
820 sv = &vtop[-i];
821 bt = (sv->type.t & VT_BTYPE);
822 size = gfunc_arg_size(&sv->type);
824 if (using_regs(size))
825 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
827 if (bt == VT_STRUCT) {
828 /* align to stack align size */
829 size = (size + 15) & ~15;
830 /* generate structure store */
831 r = get_reg(RC_INT);
832 gen_offs_sp(0x8d, r, struct_size);
833 struct_size += size;
835 /* generate memcpy call */
836 vset(&sv->type, r | VT_LVAL, 0);
837 vpushv(sv);
838 vstore();
839 --vtop;
840 } else if (bt == VT_LDOUBLE) {
841 gv(RC_ST0);
842 gen_offs_sp(0xdb, 0x107, struct_size);
843 struct_size += 16;
847 if (func_scratch < struct_size)
848 func_scratch = struct_size;
850 arg = nb_args;
851 struct_size = args_size;
853 for(i = 0; i < nb_args; i++) {
854 --arg;
855 bt = (vtop->type.t & VT_BTYPE);
857 size = gfunc_arg_size(&vtop->type);
858 if (!using_regs(size)) {
859 /* align to stack align size */
860 size = (size + 15) & ~15;
861 if (arg >= REGN) {
862 d = get_reg(RC_INT);
863 gen_offs_sp(0x8d, d, struct_size);
864 gen_offs_sp(0x89, d, arg*8);
865 } else {
866 d = arg_prepare_reg(arg);
867 gen_offs_sp(0x8d, d, struct_size);
869 struct_size += size;
870 } else {
871 if (is_sse_float(vtop->type.t)) {
872 if (tcc_state->nosse)
873 tcc_error("SSE disabled");
874 if (arg >= REGN) {
875 gv(RC_XMM0);
876 /* movq %xmm0, j*8(%rsp) */
877 gen_offs_sp(0xd60f66, 0x100, arg*8);
878 } else {
879 /* Load directly to xmmN register */
880 gv(RC_XMM0 << arg);
881 d = arg_prepare_reg(arg);
882 /* mov %xmmN, %rxx */
883 o(0x66);
884 orex(1,d,0, 0x7e0f);
885 o(0xc0 + arg*8 + REG_VALUE(d));
887 } else {
888 if (bt == VT_STRUCT) {
889 vtop->type.ref = NULL;
890 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
891 : size > 1 ? VT_SHORT : VT_BYTE;
894 r = gv(RC_INT);
895 if (arg >= REGN) {
896 gen_offs_sp(0x89, r, arg*8);
897 } else {
898 d = arg_prepare_reg(arg);
899 orex(1,d,r,0x89); /* mov */
900 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
904 vtop--;
906 save_regs(0);
907 /* Copy R10 and R11 into RCX and RDX, respectively */
908 if (nb_args > 0) {
909 o(0xd1894c); /* mov %r10, %rcx */
910 if (nb_args > 1) {
911 o(0xda894c); /* mov %r11, %rdx */
915 gcall_or_jmp(0);
917 if ((vtop->r & VT_SYM) && vtop->sym->v == TOK_alloca) {
918 /* need to add the "func_scratch" area after alloca */
919 o(0x48); func_alloca = oad(0x05, func_alloca); /* add $NN, %rax */
920 #ifdef CONFIG_TCC_BCHECK
921 if (tcc_state->do_bounds_check)
922 gen_bounds_call(TOK___bound_alloca_nr); /* new region */
923 #endif
925 vtop--;
929 #define FUNC_PROLOG_SIZE 11
931 /* generate function prolog of type 't' */
932 void gfunc_prolog(Sym *func_sym)
934 CType *func_type = &func_sym->type;
935 int addr, reg_param_index, bt, size;
936 Sym *sym;
937 CType *type;
939 func_ret_sub = 0;
940 func_scratch = 32;
941 func_alloca = 0;
942 loc = 0;
944 addr = PTR_SIZE * 2;
945 ind += FUNC_PROLOG_SIZE;
946 func_sub_sp_offset = ind;
947 reg_param_index = 0;
949 sym = func_type->ref;
951 /* if the function returns a structure, then add an
952 implicit pointer parameter */
953 size = gfunc_arg_size(&func_vt);
954 if (!using_regs(size)) {
955 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
956 func_vc = addr;
957 reg_param_index++;
958 addr += 8;
961 /* define parameters */
962 while ((sym = sym->next) != NULL) {
963 type = &sym->type;
964 bt = type->t & VT_BTYPE;
965 size = gfunc_arg_size(type);
966 if (!using_regs(size)) {
967 if (reg_param_index < REGN) {
968 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
970 sym_push(sym->v & ~SYM_FIELD, type,
971 VT_LLOCAL | VT_LVAL, addr);
972 } else {
973 if (reg_param_index < REGN) {
974 /* save arguments passed by register */
975 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
976 if (tcc_state->nosse)
977 tcc_error("SSE disabled");
978 o(0xd60f66); /* movq */
979 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
980 } else {
981 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
984 sym_push(sym->v & ~SYM_FIELD, type,
985 VT_LOCAL | VT_LVAL, addr);
987 addr += 8;
988 reg_param_index++;
991 while (reg_param_index < REGN) {
992 if (func_var) {
993 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
994 addr += 8;
996 reg_param_index++;
998 #ifdef CONFIG_TCC_BCHECK
999 if (tcc_state->do_bounds_check)
1000 gen_bounds_prolog();
1001 #endif
1004 /* generate function epilog */
1005 void gfunc_epilog(void)
1007 int v, saved_ind;
1009 /* align local size to word & save local variables */
1010 func_scratch = (func_scratch + 15) & -16;
1011 loc = (loc & -16) - func_scratch;
1013 #ifdef CONFIG_TCC_BCHECK
1014 if (tcc_state->do_bounds_check)
1015 gen_bounds_epilog();
1016 #endif
1018 o(0xc9); /* leave */
1019 if (func_ret_sub == 0) {
1020 o(0xc3); /* ret */
1021 } else {
1022 o(0xc2); /* ret n */
1023 g(func_ret_sub);
1024 g(func_ret_sub >> 8);
1027 saved_ind = ind;
1028 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1029 v = -loc;
1031 if (v >= 4096) {
1032 Sym *sym = external_helper_sym(TOK___chkstk);
1033 oad(0xb8, v); /* mov stacksize, %eax */
1034 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1035 greloca(cur_text_section, sym, ind-4, R_X86_64_PC32, -4);
1036 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1037 } else {
1038 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1039 o(0xec8148); /* sub rsp, stacksize */
1040 gen_le32(v);
1043 /* add the "func_scratch" area after each alloca seen */
1044 gsym_addr(func_alloca, -func_scratch);
1046 cur_text_section->data_offset = saved_ind;
1047 pe_add_unwind_data(ind, saved_ind, v);
1048 ind = cur_text_section->data_offset;
1051 #else
1053 static void gadd_sp(int val)
1055 if (val == (char)val) {
1056 o(0xc48348);
1057 g(val);
1058 } else {
1059 oad(0xc48148, val); /* add $xxx, %rsp */
1063 typedef enum X86_64_Mode {
1064 x86_64_mode_none,
1065 x86_64_mode_memory,
1066 x86_64_mode_integer,
1067 x86_64_mode_sse,
1068 x86_64_mode_x87
1069 } X86_64_Mode;
1071 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b)
1073 if (a == b)
1074 return a;
1075 else if (a == x86_64_mode_none)
1076 return b;
1077 else if (b == x86_64_mode_none)
1078 return a;
1079 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
1080 return x86_64_mode_memory;
1081 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
1082 return x86_64_mode_integer;
1083 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
1084 return x86_64_mode_memory;
1085 else
1086 return x86_64_mode_sse;
1089 static X86_64_Mode classify_x86_64_inner(CType *ty)
1091 X86_64_Mode mode;
1092 Sym *f;
1094 switch (ty->t & VT_BTYPE) {
1095 case VT_VOID: return x86_64_mode_none;
1097 case VT_INT:
1098 case VT_BYTE:
1099 case VT_SHORT:
1100 case VT_LLONG:
1101 case VT_BOOL:
1102 case VT_PTR:
1103 case VT_FUNC:
1104 return x86_64_mode_integer;
1106 case VT_FLOAT:
1107 case VT_DOUBLE: return x86_64_mode_sse;
1109 case VT_LDOUBLE: return x86_64_mode_x87;
1111 case VT_STRUCT:
1112 f = ty->ref;
1114 mode = x86_64_mode_none;
1115 for (f = f->next; f; f = f->next)
1116 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
1118 return mode;
1120 assert(0);
1121 return 0;
1124 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count)
1126 X86_64_Mode mode;
1127 int size, align, ret_t = 0;
1129 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
1130 *psize = 8;
1131 *palign = 8;
1132 *reg_count = 1;
1133 ret_t = ty->t;
1134 mode = x86_64_mode_integer;
1135 } else {
1136 size = type_size(ty, &align);
1137 *psize = (size + 7) & ~7;
1138 *palign = (align + 7) & ~7;
1140 if (size > 16) {
1141 mode = x86_64_mode_memory;
1142 } else {
1143 mode = classify_x86_64_inner(ty);
1144 switch (mode) {
1145 case x86_64_mode_integer:
1146 if (size > 8) {
1147 *reg_count = 2;
1148 ret_t = VT_QLONG;
1149 } else {
1150 *reg_count = 1;
1151 if (size > 4)
1152 ret_t = VT_LLONG;
1153 else if (size > 2)
1154 ret_t = VT_INT;
1155 else if (size > 1)
1156 ret_t = VT_SHORT;
1157 else
1158 ret_t = VT_BYTE;
1159 if ((ty->t & VT_BTYPE) == VT_STRUCT || (ty->t & VT_UNSIGNED))
1160 ret_t |= VT_UNSIGNED;
1162 break;
1164 case x86_64_mode_x87:
1165 *reg_count = 1;
1166 ret_t = VT_LDOUBLE;
1167 break;
1169 case x86_64_mode_sse:
1170 if (size > 8) {
1171 *reg_count = 2;
1172 ret_t = VT_QFLOAT;
1173 } else {
1174 *reg_count = 1;
1175 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1177 break;
1178 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1183 if (ret) {
1184 ret->ref = NULL;
1185 ret->t = ret_t;
1188 return mode;
1191 ST_FUNC int classify_x86_64_va_arg(CType *ty)
1193 /* This definition must be synced with stdarg.h */
1194 enum __va_arg_type {
1195 __va_gen_reg, __va_float_reg, __va_stack
1197 int size, align, reg_count;
1198 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1199 switch (mode) {
1200 default: return __va_stack;
1201 case x86_64_mode_integer: return __va_gen_reg;
1202 case x86_64_mode_sse: return __va_float_reg;
1206 /* Return the number of registers needed to return the struct, or 0 if
1207 returning via struct pointer. */
1208 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *ret_align, int *regsize)
1210 int size, align, reg_count;
1211 *ret_align = 1; // Never have to re-align return values for x86-64
1212 *regsize = 8;
1213 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) != x86_64_mode_memory);
1216 #define REGN 6
1217 static const uint8_t arg_regs[REGN] = {
1218 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1221 static int arg_prepare_reg(int idx) {
1222 if (idx == 2 || idx == 3)
1223 /* idx=2: r10, idx=3: r11 */
1224 return idx + 8;
1225 else
1226 return idx >= 0 && idx < REGN ? arg_regs[idx] : 0;
1229 /* Generate function call. The function address is pushed first, then
1230 all the parameters in call order. This functions pops all the
1231 parameters and the function address. */
1232 void gfunc_call(int nb_args)
1234 X86_64_Mode mode;
1235 CType type;
1236 int size, align, r, args_size, stack_adjust, i, reg_count, k;
1237 int nb_reg_args = 0;
1238 int nb_sse_args = 0;
1239 int sse_reg, gen_reg;
1240 char *onstack = tcc_malloc((nb_args + 1) * sizeof (char));
1242 #ifdef CONFIG_TCC_BCHECK
1243 if (tcc_state->do_bounds_check)
1244 gbound_args(nb_args);
1245 #endif
1247 /* calculate the number of integer/float register arguments, remember
1248 arguments to be passed via stack (in onstack[]), and also remember
1249 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1250 to be done in a left-to-right pass over arguments. */
1251 stack_adjust = 0;
1252 for(i = nb_args - 1; i >= 0; i--) {
1253 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1254 if (size == 0) continue;
1255 if (mode == x86_64_mode_sse && nb_sse_args + reg_count <= 8) {
1256 nb_sse_args += reg_count;
1257 onstack[i] = 0;
1258 } else if (mode == x86_64_mode_integer && nb_reg_args + reg_count <= REGN) {
1259 nb_reg_args += reg_count;
1260 onstack[i] = 0;
1261 } else if (mode == x86_64_mode_none) {
1262 onstack[i] = 0;
1263 } else {
1264 if (align == 16 && (stack_adjust &= 15)) {
1265 onstack[i] = 2;
1266 stack_adjust = 0;
1267 } else
1268 onstack[i] = 1;
1269 stack_adjust += size;
1273 if (nb_sse_args && tcc_state->nosse)
1274 tcc_error("SSE disabled but floating point arguments passed");
1276 /* fetch cpu flag before generating any code */
1277 if ((vtop->r & VT_VALMASK) == VT_CMP)
1278 gv(RC_INT);
1280 /* for struct arguments, we need to call memcpy and the function
1281 call breaks register passing arguments we are preparing.
1282 So, we process arguments which will be passed by stack first. */
1283 gen_reg = nb_reg_args;
1284 sse_reg = nb_sse_args;
1285 args_size = 0;
1286 stack_adjust &= 15;
1287 for (i = k = 0; i < nb_args;) {
1288 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1289 if (size) {
1290 if (!onstack[i + k]) {
1291 ++i;
1292 continue;
1294 /* Possibly adjust stack to align SSE boundary. We're processing
1295 args from right to left while allocating happens left to right
1296 (stack grows down), so the adjustment needs to happen _after_
1297 an argument that requires it. */
1298 if (stack_adjust) {
1299 o(0x50); /* push %rax; aka sub $8,%rsp */
1300 args_size += 8;
1301 stack_adjust = 0;
1303 if (onstack[i + k] == 2)
1304 stack_adjust = 1;
1307 vrotb(i+1);
1309 switch (vtop->type.t & VT_BTYPE) {
1310 case VT_STRUCT:
1311 /* allocate the necessary size on stack */
1312 o(0x48);
1313 oad(0xec81, size); /* sub $xxx, %rsp */
1314 /* generate structure store */
1315 r = get_reg(RC_INT);
1316 orex(1, r, 0, 0x89); /* mov %rsp, r */
1317 o(0xe0 + REG_VALUE(r));
1318 vset(&vtop->type, r | VT_LVAL, 0);
1319 vswap();
1320 vstore();
1321 break;
1323 case VT_LDOUBLE:
1324 gv(RC_ST0);
1325 oad(0xec8148, size); /* sub $xxx, %rsp */
1326 o(0x7cdb); /* fstpt 0(%rsp) */
1327 g(0x24);
1328 g(0x00);
1329 break;
1331 case VT_FLOAT:
1332 case VT_DOUBLE:
1333 assert(mode == x86_64_mode_sse);
1334 r = gv(RC_FLOAT);
1335 o(0x50); /* push $rax */
1336 /* movq %xmmN, (%rsp) */
1337 o(0xd60f66);
1338 o(0x04 + REG_VALUE(r)*8);
1339 o(0x24);
1340 break;
1342 default:
1343 assert(mode == x86_64_mode_integer);
1344 /* simple type */
1345 /* XXX: implicit cast ? */
1346 r = gv(RC_INT);
1347 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1348 break;
1350 args_size += size;
1352 vpop();
1353 --nb_args;
1354 k++;
1357 tcc_free(onstack);
1359 /* XXX This should be superfluous. */
1360 save_regs(0); /* save used temporary registers */
1362 /* then, we prepare register passing arguments.
1363 Note that we cannot set RDX and RCX in this loop because gv()
1364 may break these temporary registers. Let's use R10 and R11
1365 instead of them */
1366 assert(gen_reg <= REGN);
1367 assert(sse_reg <= 8);
1368 for(i = 0; i < nb_args; i++) {
1369 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1370 if (size == 0) continue;
1371 /* Alter stack entry type so that gv() knows how to treat it */
1372 vtop->type = type;
1373 if (mode == x86_64_mode_sse) {
1374 if (reg_count == 2) {
1375 sse_reg -= 2;
1376 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1377 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1378 /* movaps %xmm1, %xmmN */
1379 o(0x280f);
1380 o(0xc1 + ((sse_reg+1) << 3));
1381 /* movaps %xmm0, %xmmN */
1382 o(0x280f);
1383 o(0xc0 + (sse_reg << 3));
1385 } else {
1386 assert(reg_count == 1);
1387 --sse_reg;
1388 /* Load directly to register */
1389 gv(RC_XMM0 << sse_reg);
1391 } else if (mode == x86_64_mode_integer) {
1392 /* simple type */
1393 /* XXX: implicit cast ? */
1394 int d;
1395 gen_reg -= reg_count;
1396 r = gv(RC_INT);
1397 d = arg_prepare_reg(gen_reg);
1398 orex(1,d,r,0x89); /* mov */
1399 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1400 if (reg_count == 2) {
1401 d = arg_prepare_reg(gen_reg+1);
1402 orex(1,d,vtop->r2,0x89); /* mov */
1403 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1406 vtop--;
1408 assert(gen_reg == 0);
1409 assert(sse_reg == 0);
1411 /* We shouldn't have many operands on the stack anymore, but the
1412 call address itself is still there, and it might be in %eax
1413 (or edx/ecx) currently, which the below writes would clobber.
1414 So evict all remaining operands here. */
1415 save_regs(0);
1417 /* Copy R10 and R11 into RDX and RCX, respectively */
1418 if (nb_reg_args > 2) {
1419 o(0xd2894c); /* mov %r10, %rdx */
1420 if (nb_reg_args > 3) {
1421 o(0xd9894c); /* mov %r11, %rcx */
1425 if (vtop->type.ref->f.func_type != FUNC_NEW) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1426 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1427 gcall_or_jmp(0);
1428 if (args_size)
1429 gadd_sp(args_size);
1430 vtop--;
1433 #define FUNC_PROLOG_SIZE 11
1435 static void push_arg_reg(int i) {
1436 loc -= 8;
1437 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1440 /* generate function prolog of type 't' */
1441 void gfunc_prolog(Sym *func_sym)
1443 CType *func_type = &func_sym->type;
1444 X86_64_Mode mode;
1445 int i, addr, align, size, reg_count;
1446 int param_addr = 0, reg_param_index, sse_param_index;
1447 Sym *sym;
1448 CType *type;
1450 sym = func_type->ref;
1451 addr = PTR_SIZE * 2;
1452 loc = 0;
1453 ind += FUNC_PROLOG_SIZE;
1454 func_sub_sp_offset = ind;
1455 func_ret_sub = 0;
1457 if (func_var) {
1458 int seen_reg_num, seen_sse_num, seen_stack_size;
1459 seen_reg_num = seen_sse_num = 0;
1460 /* frame pointer and return address */
1461 seen_stack_size = PTR_SIZE * 2;
1462 /* count the number of seen parameters */
1463 sym = func_type->ref;
1464 while ((sym = sym->next) != NULL) {
1465 type = &sym->type;
1466 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1467 switch (mode) {
1468 default:
1469 stack_arg:
1470 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1471 break;
1473 case x86_64_mode_integer:
1474 if (seen_reg_num + reg_count > REGN)
1475 goto stack_arg;
1476 seen_reg_num += reg_count;
1477 break;
1479 case x86_64_mode_sse:
1480 if (seen_sse_num + reg_count > 8)
1481 goto stack_arg;
1482 seen_sse_num += reg_count;
1483 break;
1487 loc -= 24;
1488 /* movl $0x????????, -0x18(%rbp) */
1489 o(0xe845c7);
1490 gen_le32(seen_reg_num * 8);
1491 /* movl $0x????????, -0x14(%rbp) */
1492 o(0xec45c7);
1493 gen_le32(seen_sse_num * 16 + 48);
1494 /* leaq $0x????????, %r11 */
1495 o(0x9d8d4c);
1496 gen_le32(seen_stack_size);
1497 /* movq %r11, -0x10(%rbp) */
1498 o(0xf05d894c);
1499 /* leaq $-192(%rbp), %r11 */
1500 o(0x9d8d4c);
1501 gen_le32(-176 - 24);
1502 /* movq %r11, -0x8(%rbp) */
1503 o(0xf85d894c);
1505 /* save all register passing arguments */
1506 for (i = 0; i < 8; i++) {
1507 loc -= 16;
1508 if (!tcc_state->nosse) {
1509 o(0xd60f66); /* movq */
1510 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1512 /* movq $0, loc+8(%rbp) */
1513 o(0x85c748);
1514 gen_le32(loc + 8);
1515 gen_le32(0);
1517 for (i = 0; i < REGN; i++) {
1518 push_arg_reg(REGN-1-i);
1522 sym = func_type->ref;
1523 reg_param_index = 0;
1524 sse_param_index = 0;
1526 /* if the function returns a structure, then add an
1527 implicit pointer parameter */
1528 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1529 if (mode == x86_64_mode_memory) {
1530 push_arg_reg(reg_param_index);
1531 func_vc = loc;
1532 reg_param_index++;
1534 /* define parameters */
1535 while ((sym = sym->next) != NULL) {
1536 type = &sym->type;
1537 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1538 switch (mode) {
1539 case x86_64_mode_sse:
1540 if (tcc_state->nosse)
1541 tcc_error("SSE disabled but floating point arguments used");
1542 if (sse_param_index + reg_count <= 8) {
1543 /* save arguments passed by register */
1544 loc -= reg_count * 8;
1545 param_addr = loc;
1546 for (i = 0; i < reg_count; ++i) {
1547 o(0xd60f66); /* movq */
1548 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1549 ++sse_param_index;
1551 } else {
1552 addr = (addr + align - 1) & -align;
1553 param_addr = addr;
1554 addr += size;
1556 break;
1558 case x86_64_mode_memory:
1559 case x86_64_mode_x87:
1560 addr = (addr + align - 1) & -align;
1561 param_addr = addr;
1562 addr += size;
1563 break;
1565 case x86_64_mode_integer: {
1566 if (reg_param_index + reg_count <= REGN) {
1567 /* save arguments passed by register */
1568 loc -= reg_count * 8;
1569 param_addr = loc;
1570 for (i = 0; i < reg_count; ++i) {
1571 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1572 ++reg_param_index;
1574 } else {
1575 addr = (addr + align - 1) & -align;
1576 param_addr = addr;
1577 addr += size;
1579 break;
1581 default: break; /* nothing to be done for x86_64_mode_none */
1583 sym_push(sym->v & ~SYM_FIELD, type,
1584 VT_LOCAL | VT_LVAL, param_addr);
1587 #ifdef CONFIG_TCC_BCHECK
1588 if (tcc_state->do_bounds_check)
1589 gen_bounds_prolog();
1590 #endif
1593 /* generate function epilog */
1594 void gfunc_epilog(void)
1596 int v, saved_ind;
1598 #ifdef CONFIG_TCC_BCHECK
1599 if (tcc_state->do_bounds_check)
1600 gen_bounds_epilog();
1601 #endif
1602 o(0xc9); /* leave */
1603 if (func_ret_sub == 0) {
1604 o(0xc3); /* ret */
1605 } else {
1606 o(0xc2); /* ret n */
1607 g(func_ret_sub);
1608 g(func_ret_sub >> 8);
1610 /* align local size to word & save local variables */
1611 v = (-loc + 15) & -16;
1612 saved_ind = ind;
1613 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1614 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1615 o(0xec8148); /* sub rsp, stacksize */
1616 gen_le32(v);
1617 ind = saved_ind;
1620 #endif /* not PE */
1622 ST_FUNC void gen_fill_nops(int bytes)
1624 while (bytes--)
1625 g(0x90);
1628 /* generate a jump to a label */
1629 int gjmp(int t)
1631 return gjmp2(0xe9, t);
1634 /* generate a jump to a fixed address */
1635 void gjmp_addr(int a)
1637 int r;
1638 r = a - ind - 2;
1639 if (r == (char)r) {
1640 g(0xeb);
1641 g(r);
1642 } else {
1643 oad(0xe9, a - ind - 5);
1647 ST_FUNC int gjmp_append(int n, int t)
1649 void *p;
1650 /* insert vtop->c jump list in t */
1651 if (n) {
1652 uint32_t n1 = n, n2;
1653 while ((n2 = read32le(p = cur_text_section->data + n1)))
1654 n1 = n2;
1655 write32le(p, t);
1656 t = n;
1658 return t;
1661 ST_FUNC int gjmp_cond(int op, int t)
1663 if (op & 0x100)
1665 /* This was a float compare. If the parity flag is set
1666 the result was unordered. For anything except != this
1667 means false and we don't jump (anding both conditions).
1668 For != this means true (oring both).
1669 Take care about inverting the test. We need to jump
1670 to our target if the result was unordered and test wasn't NE,
1671 otherwise if unordered we don't want to jump. */
1672 int v = vtop->cmp_r;
1673 op &= ~0x100;
1674 if (op ^ v ^ (v != TOK_NE))
1675 o(0x067a); /* jp +6 */
1676 else
1678 g(0x0f);
1679 t = gjmp2(0x8a, t); /* jp t */
1682 g(0x0f);
1683 t = gjmp2(op - 16, t);
1684 return t;
1687 /* generate an integer binary operation */
1688 void gen_opi(int op)
1690 int r, fr, opc, c;
1691 int ll, uu, cc;
1693 ll = is64_type(vtop[-1].type.t);
1694 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1695 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1697 switch(op) {
1698 case '+':
1699 case TOK_ADDC1: /* add with carry generation */
1700 opc = 0;
1701 gen_op8:
1702 if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
1703 /* constant case */
1704 vswap();
1705 r = gv(RC_INT);
1706 vswap();
1707 c = vtop->c.i;
1708 if (c == (char)c) {
1709 /* XXX: generate inc and dec for smaller code ? */
1710 orex(ll, r, 0, 0x83);
1711 o(0xc0 | (opc << 3) | REG_VALUE(r));
1712 g(c);
1713 } else {
1714 orex(ll, r, 0, 0x81);
1715 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1717 } else {
1718 gv2(RC_INT, RC_INT);
1719 r = vtop[-1].r;
1720 fr = vtop[0].r;
1721 orex(ll, r, fr, (opc << 3) | 0x01);
1722 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1724 vtop--;
1725 if (op >= TOK_ULT && op <= TOK_GT)
1726 vset_VT_CMP(op);
1727 break;
1728 case '-':
1729 case TOK_SUBC1: /* sub with carry generation */
1730 opc = 5;
1731 goto gen_op8;
1732 case TOK_ADDC2: /* add with carry use */
1733 opc = 2;
1734 goto gen_op8;
1735 case TOK_SUBC2: /* sub with carry use */
1736 opc = 3;
1737 goto gen_op8;
1738 case '&':
1739 opc = 4;
1740 goto gen_op8;
1741 case '^':
1742 opc = 6;
1743 goto gen_op8;
1744 case '|':
1745 opc = 1;
1746 goto gen_op8;
1747 case '*':
1748 gv2(RC_INT, RC_INT);
1749 r = vtop[-1].r;
1750 fr = vtop[0].r;
1751 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1752 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1753 vtop--;
1754 break;
1755 case TOK_SHL:
1756 opc = 4;
1757 goto gen_shift;
1758 case TOK_SHR:
1759 opc = 5;
1760 goto gen_shift;
1761 case TOK_SAR:
1762 opc = 7;
1763 gen_shift:
1764 opc = 0xc0 | (opc << 3);
1765 if (cc) {
1766 /* constant case */
1767 vswap();
1768 r = gv(RC_INT);
1769 vswap();
1770 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1771 o(opc | REG_VALUE(r));
1772 g(vtop->c.i & (ll ? 63 : 31));
1773 } else {
1774 /* we generate the shift in ecx */
1775 gv2(RC_INT, RC_RCX);
1776 r = vtop[-1].r;
1777 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1778 o(opc | REG_VALUE(r));
1780 vtop--;
1781 break;
1782 case TOK_UDIV:
1783 case TOK_UMOD:
1784 uu = 1;
1785 goto divmod;
1786 case '/':
1787 case '%':
1788 case TOK_PDIV:
1789 uu = 0;
1790 divmod:
1791 /* first operand must be in eax */
1792 /* XXX: need better constraint for second operand */
1793 gv2(RC_RAX, RC_RCX);
1794 r = vtop[-1].r;
1795 fr = vtop[0].r;
1796 vtop--;
1797 save_reg(TREG_RDX);
1798 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1799 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1800 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1801 if (op == '%' || op == TOK_UMOD)
1802 r = TREG_RDX;
1803 else
1804 r = TREG_RAX;
1805 vtop->r = r;
1806 break;
1807 default:
1808 opc = 7;
1809 goto gen_op8;
1813 void gen_opl(int op)
1815 gen_opi(op);
1818 void vpush_const(int t, int v)
1820 CType ctype = { t | VT_CONSTANT, 0 };
1821 vpushsym(&ctype, external_global_sym(v, &ctype));
1822 vtop->r |= VT_LVAL;
1825 /* generate a floating point operation 'v = t1 op t2' instruction. The
1826 two operands are guaranteed to have the same floating point type */
1827 /* XXX: need to use ST1 too */
1828 void gen_opf(int op)
1830 int a, ft, fc, swapped, r;
1831 int bt = vtop->type.t & VT_BTYPE;
1832 int float_type = bt == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1834 if (op == TOK_NEG) { /* unary minus */
1835 gv(float_type);
1836 if (float_type == RC_ST0) {
1837 o(0xe0d9); /* fchs */
1838 } else {
1839 /* -0.0, in libtcc1.c */
1840 vpush_const(bt, bt == VT_FLOAT ? TOK___mzerosf : TOK___mzerodf);
1841 gv(RC_FLOAT);
1842 if (bt == VT_DOUBLE)
1843 o(0x66);
1844 /* xorp[sd] %xmm1, %xmm0 */
1845 o(0xc0570f | (REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8) << 16);
1846 vtop--;
1848 return;
1851 /* convert constants to memory references */
1852 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1853 vswap();
1854 gv(float_type);
1855 vswap();
1857 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1858 gv(float_type);
1860 /* must put at least one value in the floating point register */
1861 if ((vtop[-1].r & VT_LVAL) &&
1862 (vtop[0].r & VT_LVAL)) {
1863 vswap();
1864 gv(float_type);
1865 vswap();
1867 swapped = 0;
1868 /* swap the stack if needed so that t1 is the register and t2 is
1869 the memory reference */
1870 if (vtop[-1].r & VT_LVAL) {
1871 vswap();
1872 swapped = 1;
1874 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1875 if (op >= TOK_ULT && op <= TOK_GT) {
1876 /* load on stack second operand */
1877 load(TREG_ST0, vtop);
1878 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1879 if (op == TOK_GE || op == TOK_GT)
1880 swapped = !swapped;
1881 else if (op == TOK_EQ || op == TOK_NE)
1882 swapped = 0;
1883 if (swapped)
1884 o(0xc9d9); /* fxch %st(1) */
1885 if (op == TOK_EQ || op == TOK_NE)
1886 o(0xe9da); /* fucompp */
1887 else
1888 o(0xd9de); /* fcompp */
1889 o(0xe0df); /* fnstsw %ax */
1890 if (op == TOK_EQ) {
1891 o(0x45e480); /* and $0x45, %ah */
1892 o(0x40fC80); /* cmp $0x40, %ah */
1893 } else if (op == TOK_NE) {
1894 o(0x45e480); /* and $0x45, %ah */
1895 o(0x40f480); /* xor $0x40, %ah */
1896 op = TOK_NE;
1897 } else if (op == TOK_GE || op == TOK_LE) {
1898 o(0x05c4f6); /* test $0x05, %ah */
1899 op = TOK_EQ;
1900 } else {
1901 o(0x45c4f6); /* test $0x45, %ah */
1902 op = TOK_EQ;
1904 vtop--;
1905 vset_VT_CMP(op);
1906 } else {
1907 /* no memory reference possible for long double operations */
1908 load(TREG_ST0, vtop);
1909 swapped = !swapped;
1911 switch(op) {
1912 default:
1913 case '+':
1914 a = 0;
1915 break;
1916 case '-':
1917 a = 4;
1918 if (swapped)
1919 a++;
1920 break;
1921 case '*':
1922 a = 1;
1923 break;
1924 case '/':
1925 a = 6;
1926 if (swapped)
1927 a++;
1928 break;
1930 ft = vtop->type.t;
1931 fc = vtop->c.i;
1932 o(0xde); /* fxxxp %st, %st(1) */
1933 o(0xc1 + (a << 3));
1934 vtop--;
1936 } else {
1937 if (op >= TOK_ULT && op <= TOK_GT) {
1938 /* if saved lvalue, then we must reload it */
1939 r = vtop->r;
1940 fc = vtop->c.i;
1941 if ((r & VT_VALMASK) == VT_LLOCAL) {
1942 SValue v1;
1943 r = get_reg(RC_INT);
1944 v1.type.t = VT_PTR;
1945 v1.r = VT_LOCAL | VT_LVAL;
1946 v1.c.i = fc;
1947 load(r, &v1);
1948 fc = 0;
1949 vtop->r = r = r | VT_LVAL;
1952 if (op == TOK_EQ || op == TOK_NE) {
1953 swapped = 0;
1954 } else {
1955 if (op == TOK_LE || op == TOK_LT)
1956 swapped = !swapped;
1957 if (op == TOK_LE || op == TOK_GE) {
1958 op = 0x93; /* setae */
1959 } else {
1960 op = 0x97; /* seta */
1964 if (swapped) {
1965 gv(RC_FLOAT);
1966 vswap();
1968 assert(!(vtop[-1].r & VT_LVAL));
1970 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1971 o(0x66);
1972 if (op == TOK_EQ || op == TOK_NE)
1973 o(0x2e0f); /* ucomisd */
1974 else
1975 o(0x2f0f); /* comisd */
1977 if (vtop->r & VT_LVAL) {
1978 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1979 } else {
1980 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1983 vtop--;
1984 vset_VT_CMP(op | 0x100);
1985 vtop->cmp_r = op;
1986 } else {
1987 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1988 switch(op) {
1989 default:
1990 case '+':
1991 a = 0;
1992 break;
1993 case '-':
1994 a = 4;
1995 break;
1996 case '*':
1997 a = 1;
1998 break;
1999 case '/':
2000 a = 6;
2001 break;
2003 ft = vtop->type.t;
2004 fc = vtop->c.i;
2005 assert((ft & VT_BTYPE) != VT_LDOUBLE);
2007 r = vtop->r;
2008 /* if saved lvalue, then we must reload it */
2009 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
2010 SValue v1;
2011 r = get_reg(RC_INT);
2012 v1.type.t = VT_PTR;
2013 v1.r = VT_LOCAL | VT_LVAL;
2014 v1.c.i = fc;
2015 load(r, &v1);
2016 fc = 0;
2017 vtop->r = r = r | VT_LVAL;
2020 assert(!(vtop[-1].r & VT_LVAL));
2021 if (swapped) {
2022 assert(vtop->r & VT_LVAL);
2023 gv(RC_FLOAT);
2024 vswap();
2027 if ((ft & VT_BTYPE) == VT_DOUBLE) {
2028 o(0xf2);
2029 } else {
2030 o(0xf3);
2032 o(0x0f);
2033 o(0x58 + a);
2035 if (vtop->r & VT_LVAL) {
2036 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
2037 } else {
2038 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
2041 vtop--;
2046 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2047 and 'long long' cases. */
2048 void gen_cvt_itof(int t)
2050 if ((t & VT_BTYPE) == VT_LDOUBLE) {
2051 save_reg(TREG_ST0);
2052 gv(RC_INT);
2053 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
2054 /* signed long long to float/double/long double (unsigned case
2055 is handled generically) */
2056 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2057 o(0x242cdf); /* fildll (%rsp) */
2058 o(0x08c48348); /* add $8, %rsp */
2059 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2060 (VT_INT | VT_UNSIGNED)) {
2061 /* unsigned int to float/double/long double */
2062 o(0x6a); /* push $0 */
2063 g(0x00);
2064 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2065 o(0x242cdf); /* fildll (%rsp) */
2066 o(0x10c48348); /* add $16, %rsp */
2067 } else {
2068 /* int to float/double/long double */
2069 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
2070 o(0x2404db); /* fildl (%rsp) */
2071 o(0x08c48348); /* add $8, %rsp */
2073 vtop->r = TREG_ST0;
2074 } else {
2075 int r = get_reg(RC_FLOAT);
2076 gv(RC_INT);
2077 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
2078 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2079 (VT_INT | VT_UNSIGNED) ||
2080 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
2081 o(0x48); /* REX */
2083 o(0x2a0f);
2084 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
2085 vtop->r = r;
2089 /* convert from one floating point type to another */
2090 void gen_cvt_ftof(int t)
2092 int ft, bt, tbt;
2094 ft = vtop->type.t;
2095 bt = ft & VT_BTYPE;
2096 tbt = t & VT_BTYPE;
2098 if (bt == VT_FLOAT) {
2099 gv(RC_FLOAT);
2100 if (tbt == VT_DOUBLE) {
2101 o(0x140f); /* unpcklps */
2102 o(0xc0 + REG_VALUE(vtop->r)*9);
2103 o(0x5a0f); /* cvtps2pd */
2104 o(0xc0 + REG_VALUE(vtop->r)*9);
2105 } else if (tbt == VT_LDOUBLE) {
2106 save_reg(RC_ST0);
2107 /* movss %xmm0,-0x10(%rsp) */
2108 o(0x110ff3);
2109 o(0x44 + REG_VALUE(vtop->r)*8);
2110 o(0xf024);
2111 o(0xf02444d9); /* flds -0x10(%rsp) */
2112 vtop->r = TREG_ST0;
2114 } else if (bt == VT_DOUBLE) {
2115 gv(RC_FLOAT);
2116 if (tbt == VT_FLOAT) {
2117 o(0x140f66); /* unpcklpd */
2118 o(0xc0 + REG_VALUE(vtop->r)*9);
2119 o(0x5a0f66); /* cvtpd2ps */
2120 o(0xc0 + REG_VALUE(vtop->r)*9);
2121 } else if (tbt == VT_LDOUBLE) {
2122 save_reg(RC_ST0);
2123 /* movsd %xmm0,-0x10(%rsp) */
2124 o(0x110ff2);
2125 o(0x44 + REG_VALUE(vtop->r)*8);
2126 o(0xf024);
2127 o(0xf02444dd); /* fldl -0x10(%rsp) */
2128 vtop->r = TREG_ST0;
2130 } else {
2131 int r;
2132 gv(RC_ST0);
2133 r = get_reg(RC_FLOAT);
2134 if (tbt == VT_DOUBLE) {
2135 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2136 /* movsd -0x10(%rsp),%xmm0 */
2137 o(0x100ff2);
2138 o(0x44 + REG_VALUE(r)*8);
2139 o(0xf024);
2140 vtop->r = r;
2141 } else if (tbt == VT_FLOAT) {
2142 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2143 /* movss -0x10(%rsp),%xmm0 */
2144 o(0x100ff3);
2145 o(0x44 + REG_VALUE(r)*8);
2146 o(0xf024);
2147 vtop->r = r;
2152 /* convert fp to int 't' type */
2153 void gen_cvt_ftoi(int t)
2155 int ft, bt, size, r;
2156 ft = vtop->type.t;
2157 bt = ft & VT_BTYPE;
2158 if (bt == VT_LDOUBLE) {
2159 gen_cvt_ftof(VT_DOUBLE);
2160 bt = VT_DOUBLE;
2163 gv(RC_FLOAT);
2164 if (t != VT_INT)
2165 size = 8;
2166 else
2167 size = 4;
2169 r = get_reg(RC_INT);
2170 if (bt == VT_FLOAT) {
2171 o(0xf3);
2172 } else if (bt == VT_DOUBLE) {
2173 o(0xf2);
2174 } else {
2175 assert(0);
2177 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2178 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2179 vtop->r = r;
2182 // Generate sign extension from 32 to 64 bits:
2183 ST_FUNC void gen_cvt_sxtw(void)
2185 int r = gv(RC_INT);
2186 /* x86_64 specific: movslq */
2187 o(0x6348);
2188 o(0xc0 + (REG_VALUE(r) << 3) + REG_VALUE(r));
2191 /* char/short to int conversion */
2192 ST_FUNC void gen_cvt_csti(int t)
2194 int r, sz, xl, ll;
2195 r = gv(RC_INT);
2196 sz = !(t & VT_UNSIGNED);
2197 xl = (t & VT_BTYPE) == VT_SHORT;
2198 ll = (vtop->type.t & VT_BTYPE) == VT_LLONG;
2199 orex(ll, r, 0, 0xc0b60f /* mov[sz] %a[xl], %eax */
2200 | (sz << 3 | xl) << 8
2201 | (REG_VALUE(r) << 3 | REG_VALUE(r)) << 16
2205 /* increment tcov counter */
2206 ST_FUNC void gen_increment_tcov (SValue *sv)
2208 o(0x058348); /* addq $1, xxx(%rip) */
2209 greloca(cur_text_section, sv->sym, ind, R_X86_64_PC32, -5);
2210 gen_le32(0);
2211 o(1);
2214 /* computed goto support */
2215 void ggoto(void)
2217 gcall_or_jmp(1);
2218 vtop--;
2221 /* Save the stack pointer onto the stack and return the location of its address */
2222 ST_FUNC void gen_vla_sp_save(int addr) {
2223 /* mov %rsp,addr(%rbp)*/
2224 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2227 /* Restore the SP from a location on the stack */
2228 ST_FUNC void gen_vla_sp_restore(int addr) {
2229 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2232 #ifdef TCC_TARGET_PE
2233 /* Save result of gen_vla_alloc onto the stack */
2234 ST_FUNC void gen_vla_result(int addr) {
2235 /* mov %rax,addr(%rbp)*/
2236 gen_modrm64(0x89, TREG_RAX, VT_LOCAL, NULL, addr);
2238 #endif
2240 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2241 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2242 int use_call = 0;
2244 #if defined(CONFIG_TCC_BCHECK)
2245 use_call = tcc_state->do_bounds_check;
2246 #endif
2247 #ifdef TCC_TARGET_PE /* alloca does more than just adjust %rsp on Windows */
2248 use_call = 1;
2249 #endif
2250 if (use_call)
2252 vpush_helper_func(TOK_alloca);
2253 vswap(); /* Move alloca ref past allocation size */
2254 gfunc_call(1);
2256 else {
2257 int r;
2258 r = gv(RC_INT); /* allocation size */
2259 /* sub r,%rsp */
2260 o(0x2b48);
2261 o(0xe0 | REG_VALUE(r));
2262 /* We align to 16 bytes rather than align */
2263 /* and ~15, %rsp */
2264 o(0xf0e48348);
2265 vpop();
2270 /* end of x86-64 code generator */
2271 /*************************************************************/
2272 #endif /* ! TARGET_DEFS_ONLY */
2273 /******************************************************/