renamed registers
[tinycc.git] / i386-gen.c
blob88e75b5df70c0749a0749c84fd359a70cecc0136
1 /*
2 * X86 code generator for TCC
3 *
4 * Copyright (c) 2001, 2002 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 /* number of available registers */
22 #define NB_REGS 4
24 /* a register can belong to several classes. The classes must be
25 sorted from more general to more precise (see gv2() code which does
26 assumptions on it). */
27 #define RC_INT 0x0001 /* generic integer register */
28 #define RC_FLOAT 0x0002 /* generic float register */
29 #define RC_EAX 0x0004
30 #define RC_ST0 0x0008
31 #define RC_ECX 0x0010
32 #define RC_EDX 0x0020
33 #define RC_IRET RC_EAX /* function return: integer register */
34 #define RC_LRET RC_EDX /* function return: second integer register */
35 #define RC_FRET RC_ST0 /* function return: float register */
37 /* pretty names for the registers */
38 enum {
39 TREG_EAX = 0,
40 TREG_ECX,
41 TREG_EDX,
42 TREG_ST0,
45 int reg_classes[NB_REGS] = {
46 /* eax */ RC_INT | RC_EAX,
47 /* ecx */ RC_INT | RC_ECX,
48 /* edx */ RC_INT | RC_EDX,
49 /* st0 */ RC_FLOAT | RC_ST0,
52 /* return registers for function */
53 #define REG_IRET TREG_EAX /* single word int return register */
54 #define REG_LRET TREG_EDX /* second word return register (for long long) */
55 #define REG_FRET TREG_ST0 /* float return register */
57 /* defined if function parameters must be evaluated in reverse order */
58 #define INVERT_FUNC_PARAMS
60 /* defined if structures are passed as pointers. Otherwise structures
61 are directly pushed on stack. */
62 //#define FUNC_STRUCT_PARAM_AS_PTR
64 /* pointer size, in bytes */
65 #define PTR_SIZE 4
67 /* long double size and alignment, in bytes */
68 #define LDOUBLE_SIZE 12
69 #define LDOUBLE_ALIGN 4
70 /* maximum alignment (for aligned attribute support) */
71 #define MAX_ALIGN 8
73 /* relocation type for 32 bit data relocation */
74 #define R_DATA_32 R_386_32
76 /* function call context */
77 typedef struct GFuncContext {
78 int args_size;
79 int func_call; /* func call type (FUNC_STDCALL or FUNC_CDECL) */
80 } GFuncContext;
82 /******************************************************/
84 static unsigned long func_sub_sp_offset;
85 static unsigned long func_bound_offset;
86 static int func_ret_sub;
88 /* XXX: make it faster ? */
89 void g(int c)
91 int ind1;
92 ind1 = ind + 1;
93 if (ind1 > cur_text_section->data_allocated)
94 section_realloc(cur_text_section, ind1);
95 cur_text_section->data[ind] = c;
96 ind = ind1;
99 void o(int c)
101 while (c) {
102 g(c);
103 c = c / 256;
107 void gen_le32(int c)
109 g(c);
110 g(c >> 8);
111 g(c >> 16);
112 g(c >> 24);
115 /* output a symbol and patch all calls to it */
116 void gsym_addr(int t, int a)
118 int n, *ptr;
119 while (t) {
120 ptr = (int *)(cur_text_section->data + t);
121 n = *ptr; /* next value */
122 *ptr = a - t - 4;
123 t = n;
127 void gsym(int t)
129 gsym_addr(t, ind);
132 /* psym is used to put an instruction with a data field which is a
133 reference to a symbol. It is in fact the same as oad ! */
134 #define psym oad
136 /* instruction + 4 bytes data. Return the address of the data */
137 static int oad(int c, int s)
139 int ind1;
141 o(c);
142 ind1 = ind + 4;
143 if (ind1 > cur_text_section->data_allocated)
144 section_realloc(cur_text_section, ind1);
145 *(int *)(cur_text_section->data + ind) = s;
146 s = ind;
147 ind = ind1;
148 return s;
151 /* output constant with relocation if 'r & VT_SYM' is true */
152 static void gen_addr32(int r, Sym *sym, int c)
154 if (r & VT_SYM)
155 greloc(cur_text_section, sym, ind, R_386_32);
156 gen_le32(c);
159 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
160 opcode bits */
161 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
163 op_reg = op_reg << 3;
164 if ((r & VT_VALMASK) == VT_CONST) {
165 /* constant memory reference */
166 o(0x05 | op_reg);
167 gen_addr32(r, sym, c);
168 } else if ((r & VT_VALMASK) == VT_LOCAL) {
169 /* currently, we use only ebp as base */
170 if (c == (char)c) {
171 /* short reference */
172 o(0x45 | op_reg);
173 g(c);
174 } else {
175 oad(0x85 | op_reg, c);
177 } else {
178 g(0x00 | op_reg | (r & VT_VALMASK));
183 /* load 'r' from value 'sv' */
184 void load(int r, SValue *sv)
186 int v, t, ft, fc, fr;
187 SValue v1;
189 fr = sv->r;
190 ft = sv->type.t;
191 fc = sv->c.ul;
193 v = fr & VT_VALMASK;
194 if (fr & VT_LVAL) {
195 if (v == VT_LLOCAL) {
196 v1.type.t = VT_INT;
197 v1.r = VT_LOCAL | VT_LVAL;
198 v1.c.ul = fc;
199 load(r, &v1);
200 fr = r;
202 if ((ft & VT_BTYPE) == VT_FLOAT) {
203 o(0xd9); /* flds */
204 r = 0;
205 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
206 o(0xdd); /* fldl */
207 r = 0;
208 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
209 o(0xdb); /* fldt */
210 r = 5;
211 } else if ((ft & VT_TYPE) == VT_BYTE) {
212 o(0xbe0f); /* movsbl */
213 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
214 o(0xb60f); /* movzbl */
215 } else if ((ft & VT_TYPE) == VT_SHORT) {
216 o(0xbf0f); /* movswl */
217 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
218 o(0xb70f); /* movzwl */
219 } else {
220 o(0x8b); /* movl */
222 gen_modrm(r, fr, sv->sym, fc);
223 } else {
224 if (v == VT_CONST) {
225 o(0xb8 + r); /* mov $xx, r */
226 gen_addr32(fr, sv->sym, fc);
227 } else if (v == VT_LOCAL) {
228 o(0x8d); /* lea xxx(%ebp), r */
229 gen_modrm(r, VT_LOCAL, sv->sym, fc);
230 } else if (v == VT_CMP) {
231 oad(0xb8 + r, 0); /* mov $0, r */
232 o(0x0f); /* setxx %br */
233 o(fc);
234 o(0xc0 + r);
235 } else if (v == VT_JMP || v == VT_JMPI) {
236 t = v & 1;
237 oad(0xb8 + r, t); /* mov $1, r */
238 o(0x05eb); /* jmp after */
239 gsym(fc);
240 oad(0xb8 + r, t ^ 1); /* mov $0, r */
241 } else if (v != r) {
242 o(0x89);
243 o(0xc0 + r + v * 8); /* mov v, r */
248 /* store register 'r' in lvalue 'v' */
249 void store(int r, SValue *v)
251 int fr, bt, ft, fc;
253 ft = v->type.t;
254 fc = v->c.ul;
255 fr = v->r & VT_VALMASK;
256 bt = ft & VT_BTYPE;
257 /* XXX: incorrect if float reg to reg */
258 if (bt == VT_FLOAT) {
259 o(0xd9); /* fsts */
260 r = 2;
261 } else if (bt == VT_DOUBLE) {
262 o(0xdd); /* fstpl */
263 r = 2;
264 } else if (bt == VT_LDOUBLE) {
265 o(0xc0d9); /* fld %st(0) */
266 o(0xdb); /* fstpt */
267 r = 7;
268 } else {
269 if (bt == VT_SHORT)
270 o(0x66);
271 if (bt == VT_BYTE)
272 o(0x88);
273 else
274 o(0x89);
276 if (fr == VT_CONST ||
277 fr == VT_LOCAL ||
278 (v->r & VT_LVAL)) {
279 gen_modrm(r, v->r, v->sym, fc);
280 } else if (fr != r) {
281 o(0xc0 + fr + r * 8); /* mov r, fr */
285 /* start function call and return function call context */
286 void gfunc_start(GFuncContext *c, int func_call)
288 c->args_size = 0;
289 c->func_call = func_call;
292 /* push function parameter which is in (vtop->t, vtop->c). Stack entry
293 is then popped. */
294 void gfunc_param(GFuncContext *c)
296 int size, align, r;
298 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
299 size = type_size(&vtop->type, &align);
300 /* align to stack align size */
301 size = (size + 3) & ~3;
302 /* allocate the necessary size on stack */
303 oad(0xec81, size); /* sub $xxx, %esp */
304 /* generate structure store */
305 r = get_reg(RC_INT);
306 o(0x89); /* mov %esp, r */
307 o(0xe0 + r);
308 vset(&vtop->type, r | VT_LVAL, 0);
309 vswap();
310 vstore();
311 c->args_size += size;
312 } else if (is_float(vtop->type.t)) {
313 gv(RC_FLOAT); /* only one float register */
314 if ((vtop->type.t & VT_BTYPE) == VT_FLOAT)
315 size = 4;
316 else if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
317 size = 8;
318 else
319 size = 12;
320 oad(0xec81, size); /* sub $xxx, %esp */
321 if (size == 12)
322 o(0x7cdb);
323 else
324 o(0x5cd9 + size - 4); /* fstp[s|l] 0(%esp) */
325 g(0x24);
326 g(0x00);
327 c->args_size += size;
328 } else {
329 /* simple type (currently always same size) */
330 /* XXX: implicit cast ? */
331 r = gv(RC_INT);
332 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
333 size = 8;
334 o(0x50 + vtop->r2); /* push r */
335 } else {
336 size = 4;
338 o(0x50 + r); /* push r */
339 c->args_size += size;
341 vtop--;
344 static void gadd_sp(int val)
346 if (val == (char)val) {
347 o(0xc483);
348 g(val);
349 } else {
350 oad(0xc481, val); /* add $xxx, %esp */
354 /* 'is_jmp' is '1' if it is a jump */
355 static void gcall_or_jmp(int is_jmp)
357 int r;
358 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
359 /* constant case */
360 if (vtop->r & VT_SYM) {
361 /* relocation case */
362 greloc(cur_text_section, vtop->sym,
363 ind + 1, R_386_PC32);
364 } else {
365 /* put an empty PC32 relocation */
366 put_elf_reloc(symtab_section, cur_text_section,
367 ind + 1, R_386_PC32, 0);
369 oad(0xe8 + is_jmp, vtop->c.ul - 4); /* call/jmp im */
370 } else {
371 /* otherwise, indirect call */
372 r = gv(RC_INT);
373 o(0xff); /* call/jmp *r */
374 o(0xd0 + r + (is_jmp << 4));
378 /* generate function call with address in (vtop->t, vtop->c) and free function
379 context. Stack entry is popped */
380 void gfunc_call(GFuncContext *c)
382 gcall_or_jmp(0);
383 if (c->args_size && c->func_call == FUNC_CDECL)
384 gadd_sp(c->args_size);
385 vtop--;
388 /* generate function prolog of type 't' */
389 void gfunc_prolog(CType *func_type)
391 int addr, align, size, func_call;
392 Sym *sym;
393 CType *type;
395 sym = func_type->ref;
396 func_call = sym->r;
397 addr = 8;
398 /* if the function returns a structure, then add an
399 implicit pointer parameter */
400 func_vt = sym->type;
401 if ((func_vt.t & VT_BTYPE) == VT_STRUCT) {
402 func_vc = addr;
403 addr += 4;
405 /* define parameters */
406 while ((sym = sym->next) != NULL) {
407 type = &sym->type;
408 sym_push(sym->v & ~SYM_FIELD, type,
409 VT_LOCAL | VT_LVAL, addr);
410 size = type_size(type, &align);
411 size = (size + 3) & ~3;
412 #ifdef FUNC_STRUCT_PARAM_AS_PTR
413 /* structs are passed as pointer */
414 if ((type->t & VT_BTYPE) == VT_STRUCT) {
415 size = 4;
417 #endif
418 addr += size;
420 func_ret_sub = 0;
421 /* pascal type call ? */
422 if (func_call == FUNC_STDCALL)
423 func_ret_sub = addr - 8;
424 o(0xe58955); /* push %ebp, mov %esp, %ebp */
425 func_sub_sp_offset = oad(0xec81, 0); /* sub $xxx, %esp */
426 /* leave some room for bound checking code */
427 if (do_bounds_check) {
428 oad(0xb8, 0); /* lbound section pointer */
429 oad(0xb8, 0); /* call to function */
430 func_bound_offset = lbounds_section->data_offset;
434 /* generate function epilog */
435 void gfunc_epilog(void)
437 #ifdef CONFIG_TCC_BCHECK
438 if (do_bounds_check && func_bound_offset != lbounds_section->data_offset) {
439 int saved_ind;
440 int *bounds_ptr;
441 Sym *sym, *sym_data;
442 /* add end of table info */
443 bounds_ptr = section_ptr_add(lbounds_section, sizeof(int));
444 *bounds_ptr = 0;
445 /* generate bound local allocation */
446 saved_ind = ind;
447 ind = func_sub_sp_offset + 4;
448 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
449 func_bound_offset, lbounds_section->data_offset);
450 greloc(cur_text_section, sym_data,
451 ind + 1, R_386_32);
452 oad(0xb8, 0); /* mov %eax, xxx */
453 sym = external_global_sym(TOK___bound_local_new, &func_old_type, 0);
454 greloc(cur_text_section, sym,
455 ind + 1, R_386_PC32);
456 oad(0xe8, -4);
457 ind = saved_ind;
458 /* generate bound check local freeing */
459 o(0x5250); /* save returned value, if any */
460 greloc(cur_text_section, sym_data,
461 ind + 1, R_386_32);
462 oad(0xb8, 0); /* mov %eax, xxx */
463 sym = external_global_sym(TOK___bound_local_delete, &func_old_type, 0);
464 greloc(cur_text_section, sym,
465 ind + 1, R_386_PC32);
466 oad(0xe8, -4);
467 o(0x585a); /* restore returned value, if any */
469 #endif
470 o(0xc9); /* leave */
471 if (func_ret_sub == 0) {
472 o(0xc3); /* ret */
473 } else {
474 o(0xc2); /* ret n */
475 g(func_ret_sub);
476 g(func_ret_sub >> 8);
478 /* align local size to word & save local variables */
479 *(int *)(cur_text_section->data + func_sub_sp_offset) = (-loc + 3) & -4;
482 /* generate a jump to a label */
483 int gjmp(int t)
485 return psym(0xe9, t);
488 /* generate a jump to a fixed address */
489 void gjmp_addr(int a)
491 int r;
492 r = a - ind - 2;
493 if (r == (char)r) {
494 g(0xeb);
495 g(r);
496 } else {
497 oad(0xe9, a - ind - 5);
501 /* generate a test. set 'inv' to invert test. Stack entry is popped */
502 int gtst(int inv, int t)
504 int v, *p;
506 v = vtop->r & VT_VALMASK;
507 if (v == VT_CMP) {
508 /* fast case : can jump directly since flags are set */
509 g(0x0f);
510 t = psym((vtop->c.i - 16) ^ inv, t);
511 } else if (v == VT_JMP || v == VT_JMPI) {
512 /* && or || optimization */
513 if ((v & 1) == inv) {
514 /* insert vtop->c jump list in t */
515 p = &vtop->c.i;
516 while (*p != 0)
517 p = (int *)(cur_text_section->data + *p);
518 *p = t;
519 t = vtop->c.i;
520 } else {
521 t = gjmp(t);
522 gsym(vtop->c.i);
524 } else {
525 if (is_float(vtop->type.t)) {
526 vpushi(0);
527 gen_op(TOK_NE);
529 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
530 /* constant jmp optimization */
531 if ((vtop->c.i != 0) != inv)
532 t = gjmp(t);
533 } else {
534 v = gv(RC_INT);
535 o(0x85);
536 o(0xc0 + v * 9);
537 g(0x0f);
538 t = psym(0x85 ^ inv, t);
541 vtop--;
542 return t;
545 /* generate an integer binary operation */
546 void gen_opi(int op)
548 int r, fr, opc, c;
550 switch(op) {
551 case '+':
552 case TOK_ADDC1: /* add with carry generation */
553 opc = 0;
554 gen_op8:
555 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
556 /* constant case */
557 vswap();
558 r = gv(RC_INT);
559 vswap();
560 c = vtop->c.i;
561 if (c == (char)c) {
562 /* XXX: generate inc and dec for smaller code ? */
563 o(0x83);
564 o(0xc0 | (opc << 3) | r);
565 g(c);
566 } else {
567 o(0x81);
568 oad(0xc0 | (opc << 3) | r, c);
570 } else {
571 gv2(RC_INT, RC_INT);
572 r = vtop[-1].r;
573 fr = vtop[0].r;
574 o((opc << 3) | 0x01);
575 o(0xc0 + r + fr * 8);
577 vtop--;
578 if (op >= TOK_ULT && op <= TOK_GT) {
579 vtop->r = VT_CMP;
580 vtop->c.i = op;
582 break;
583 case '-':
584 case TOK_SUBC1: /* sub with carry generation */
585 opc = 5;
586 goto gen_op8;
587 case TOK_ADDC2: /* add with carry use */
588 opc = 2;
589 goto gen_op8;
590 case TOK_SUBC2: /* sub with carry use */
591 opc = 3;
592 goto gen_op8;
593 case '&':
594 opc = 4;
595 goto gen_op8;
596 case '^':
597 opc = 6;
598 goto gen_op8;
599 case '|':
600 opc = 1;
601 goto gen_op8;
602 case '*':
603 gv2(RC_INT, RC_INT);
604 r = vtop[-1].r;
605 fr = vtop[0].r;
606 vtop--;
607 o(0xaf0f); /* imul fr, r */
608 o(0xc0 + fr + r * 8);
609 break;
610 case TOK_SHL:
611 opc = 4;
612 goto gen_shift;
613 case TOK_SHR:
614 opc = 5;
615 goto gen_shift;
616 case TOK_SAR:
617 opc = 7;
618 gen_shift:
619 opc = 0xc0 | (opc << 3);
620 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
621 /* constant case */
622 vswap();
623 r = gv(RC_INT);
624 vswap();
625 c = vtop->c.i & 0x1f;
626 o(0xc1); /* shl/shr/sar $xxx, r */
627 o(opc | r);
628 g(c);
629 } else {
630 /* we generate the shift in ecx */
631 gv2(RC_INT, RC_ECX);
632 r = vtop[-1].r;
633 o(0xd3); /* shl/shr/sar %cl, r */
634 o(opc | r);
636 vtop--;
637 break;
638 case '/':
639 case TOK_UDIV:
640 case TOK_PDIV:
641 case '%':
642 case TOK_UMOD:
643 case TOK_UMULL:
644 /* first operand must be in eax */
645 /* XXX: need better constraint for second operand */
646 gv2(RC_EAX, RC_ECX);
647 r = vtop[-1].r;
648 fr = vtop[0].r;
649 vtop--;
650 save_reg(TREG_EDX);
651 if (op == TOK_UMULL) {
652 o(0xf7); /* mul fr */
653 o(0xe0 + fr);
654 vtop->r2 = TREG_EDX;
655 r = TREG_EAX;
656 } else {
657 if (op == TOK_UDIV || op == TOK_UMOD) {
658 o(0xf7d231); /* xor %edx, %edx, div fr, %eax */
659 o(0xf0 + fr);
660 } else {
661 o(0xf799); /* cltd, idiv fr, %eax */
662 o(0xf8 + fr);
664 if (op == '%' || op == TOK_UMOD)
665 r = TREG_EDX;
666 else
667 r = TREG_EAX;
669 vtop->r = r;
670 break;
671 default:
672 opc = 7;
673 goto gen_op8;
677 /* generate a floating point operation 'v = t1 op t2' instruction. The
678 two operands are guaranted to have the same floating point type */
679 /* XXX: need to use ST1 too */
680 void gen_opf(int op)
682 int a, ft, fc, swapped, r;
684 /* convert constants to memory references */
685 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
686 vswap();
687 gv(RC_FLOAT);
688 vswap();
690 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
691 gv(RC_FLOAT);
693 /* must put at least one value in the floating point register */
694 if ((vtop[-1].r & VT_LVAL) &&
695 (vtop[0].r & VT_LVAL)) {
696 vswap();
697 gv(RC_FLOAT);
698 vswap();
700 swapped = 0;
701 /* swap the stack if needed so that t1 is the register and t2 is
702 the memory reference */
703 if (vtop[-1].r & VT_LVAL) {
704 vswap();
705 swapped = 1;
707 if (op >= TOK_ULT && op <= TOK_GT) {
708 /* load on stack second operand */
709 load(TREG_ST0, vtop);
710 save_reg(TREG_EAX); /* eax is used by FP comparison code */
711 if (op == TOK_GE || op == TOK_GT)
712 swapped = !swapped;
713 else if (op == TOK_EQ || op == TOK_NE)
714 swapped = 0;
715 if (swapped)
716 o(0xc9d9); /* fxch %st(1) */
717 o(0xe9da); /* fucompp */
718 o(0xe0df); /* fnstsw %ax */
719 if (op == TOK_EQ) {
720 o(0x45e480); /* and $0x45, %ah */
721 o(0x40fC80); /* cmp $0x40, %ah */
722 } else if (op == TOK_NE) {
723 o(0x45e480); /* and $0x45, %ah */
724 o(0x40f480); /* xor $0x40, %ah */
725 op = TOK_NE;
726 } else if (op == TOK_GE || op == TOK_LE) {
727 o(0x05c4f6); /* test $0x05, %ah */
728 op = TOK_EQ;
729 } else {
730 o(0x45c4f6); /* test $0x45, %ah */
731 op = TOK_EQ;
733 vtop--;
734 vtop->r = VT_CMP;
735 vtop->c.i = op;
736 } else {
737 /* no memory reference possible for long double operations */
738 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
739 load(TREG_ST0, vtop);
740 swapped = !swapped;
743 switch(op) {
744 default:
745 case '+':
746 a = 0;
747 break;
748 case '-':
749 a = 4;
750 if (swapped)
751 a++;
752 break;
753 case '*':
754 a = 1;
755 break;
756 case '/':
757 a = 6;
758 if (swapped)
759 a++;
760 break;
762 ft = vtop->type.t;
763 fc = vtop->c.ul;
764 if ((ft & VT_BTYPE) == VT_LDOUBLE) {
765 o(0xde); /* fxxxp %st, %st(1) */
766 o(0xc1 + (a << 3));
767 } else {
768 /* if saved lvalue, then we must reload it */
769 r = vtop->r;
770 if ((r & VT_VALMASK) == VT_LLOCAL) {
771 SValue v1;
772 r = get_reg(RC_INT);
773 v1.type.t = VT_INT;
774 v1.r = VT_LOCAL | VT_LVAL;
775 v1.c.ul = fc;
776 load(r, &v1);
777 fc = 0;
780 if ((ft & VT_BTYPE) == VT_DOUBLE)
781 o(0xdc);
782 else
783 o(0xd8);
784 gen_modrm(a, r, vtop->sym, fc);
786 vtop--;
790 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
791 and 'long long' cases. */
792 void gen_cvt_itof(int t)
794 save_reg(TREG_ST0);
795 gv(RC_INT);
796 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
797 /* signed long long to float/double/long double (unsigned case
798 is handled generically) */
799 o(0x50 + vtop->r2); /* push r2 */
800 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
801 o(0x242cdf); /* fildll (%esp) */
802 o(0x08c483); /* add $8, %esp */
803 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
804 (VT_INT | VT_UNSIGNED)) {
805 /* unsigned int to float/double/long double */
806 o(0x6a); /* push $0 */
807 g(0x00);
808 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
809 o(0x242cdf); /* fildll (%esp) */
810 o(0x08c483); /* add $8, %esp */
811 } else {
812 /* int to float/double/long double */
813 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
814 o(0x2404db); /* fildl (%esp) */
815 o(0x04c483); /* add $4, %esp */
817 vtop->r = TREG_ST0;
820 /* convert fp to int 't' type */
821 /* XXX: handle long long case */
822 void gen_cvt_ftoi(int t)
824 int r, r2, size;
825 Sym *sym;
826 CType ushort_type;
828 ushort_type.t = VT_SHORT | VT_UNSIGNED;
830 gv(RC_FLOAT);
831 if (t != VT_INT)
832 size = 8;
833 else
834 size = 4;
836 o(0x2dd9); /* ldcw xxx */
837 sym = external_global_sym(TOK___tcc_int_fpu_control,
838 &ushort_type, VT_LVAL);
839 greloc(cur_text_section, sym,
840 ind, R_386_32);
841 gen_le32(0);
843 oad(0xec81, size); /* sub $xxx, %esp */
844 if (size == 4)
845 o(0x1cdb); /* fistpl */
846 else
847 o(0x3cdf); /* fistpll */
848 o(0x24);
849 o(0x2dd9); /* ldcw xxx */
850 sym = external_global_sym(TOK___tcc_fpu_control,
851 &ushort_type, VT_LVAL);
852 greloc(cur_text_section, sym,
853 ind, R_386_32);
854 gen_le32(0);
856 r = get_reg(RC_INT);
857 o(0x58 + r); /* pop r */
858 if (size == 8) {
859 if (t == VT_LLONG) {
860 vtop->r = r; /* mark reg as used */
861 r2 = get_reg(RC_INT);
862 o(0x58 + r2); /* pop r2 */
863 vtop->r2 = r2;
864 } else {
865 o(0x04c483); /* add $4, %esp */
868 vtop->r = r;
871 /* convert from one floating point type to another */
872 void gen_cvt_ftof(int t)
874 /* all we have to do on i386 is to put the float in a register */
875 gv(RC_FLOAT);
878 /* computed goto support */
879 void ggoto(void)
881 gcall_or_jmp(1);
882 vtop--;
885 /* bound check support functions */
886 #ifdef CONFIG_TCC_BCHECK
888 /* generate a bounded pointer addition */
889 void gen_bounded_ptr_add(void)
891 Sym *sym;
893 /* prepare fast i386 function call (args in eax and edx) */
894 gv2(RC_EAX, RC_EDX);
895 /* save all temporary registers */
896 vtop -= 2;
897 save_regs(0);
898 /* do a fast function call */
899 sym = external_global_sym(TOK___bound_ptr_add, &func_old_type, 0);
900 greloc(cur_text_section, sym,
901 ind + 1, R_386_PC32);
902 oad(0xe8, -4);
903 /* returned pointer is in eax */
904 vtop++;
905 vtop->r = TREG_EAX | VT_BOUNDED;
906 /* address of bounding function call point */
907 vtop->c.ul = (cur_text_section->reloc->data_offset - sizeof(Elf32_Rel));
910 /* patch pointer addition in vtop so that pointer dereferencing is
911 also tested */
912 void gen_bounded_ptr_deref(void)
914 int func;
915 int size, align;
916 Elf32_Rel *rel;
917 Sym *sym;
919 size = 0;
920 /* XXX: put that code in generic part of tcc */
921 if (!is_float(vtop->type.t)) {
922 if (vtop->r & VT_LVAL_BYTE)
923 size = 1;
924 else if (vtop->r & VT_LVAL_SHORT)
925 size = 2;
927 if (!size)
928 size = type_size(&vtop->type, &align);
929 switch(size) {
930 case 1: func = TOK___bound_ptr_indir1; break;
931 case 2: func = TOK___bound_ptr_indir2; break;
932 case 4: func = TOK___bound_ptr_indir4; break;
933 case 8: func = TOK___bound_ptr_indir8; break;
934 case 12: func = TOK___bound_ptr_indir12; break;
935 case 16: func = TOK___bound_ptr_indir16; break;
936 default:
937 error("unhandled size when derefencing bounded pointer");
938 func = 0;
939 break;
942 /* patch relocation */
943 /* XXX: find a better solution ? */
944 rel = (Elf32_Rel *)(cur_text_section->reloc->data + vtop->c.ul);
945 sym = external_global_sym(func, &func_old_type, 0);
946 if (!sym->c)
947 put_extern_sym(sym, NULL, 0, 0);
948 rel->r_info = ELF32_R_INFO(sym->c, ELF32_R_TYPE(rel->r_info));
950 #endif
952 /* end of X86 code generator */
953 /*************************************************************/