trivial jump size optimization
[tinycc.git] / i386-gen.c
blobe32b96599668e84a107fe21e4c44e4b773872783
1 /*
2 * X86 code generator for TCC
3 *
4 * Copyright (c) 2001, 2002 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 /* number of available registers */
22 #define NB_REGS 4
24 /* a register can belong to several classes. The classes must be
25 sorted from more general to more precise (see gv2() code which does
26 assumptions on it). */
27 #define RC_INT 0x0001 /* generic integer register */
28 #define RC_FLOAT 0x0002 /* generic float register */
29 #define RC_EAX 0x0004
30 #define RC_ST0 0x0008
31 #define RC_ECX 0x0010
32 #define RC_EDX 0x0020
33 #define RC_IRET RC_EAX /* function return: integer register */
34 #define RC_LRET RC_EDX /* function return: second integer register */
35 #define RC_FRET RC_ST0 /* function return: float register */
37 /* pretty names for the registers */
38 enum {
39 REG_EAX = 0,
40 REG_ECX,
41 REG_EDX,
42 REG_ST0,
45 int reg_classes[NB_REGS] = {
46 /* eax */ RC_INT | RC_EAX,
47 /* ecx */ RC_INT | RC_ECX,
48 /* edx */ RC_INT | RC_EDX,
49 /* st0 */ RC_FLOAT | RC_ST0,
52 /* return registers for function */
53 #define REG_IRET REG_EAX /* single word int return register */
54 #define REG_LRET REG_EDX /* second word return register (for long long) */
55 #define REG_FRET REG_ST0 /* float return register */
57 /* defined if function parameters must be evaluated in reverse order */
58 #define INVERT_FUNC_PARAMS
60 /* defined if structures are passed as pointers. Otherwise structures
61 are directly pushed on stack. */
62 //#define FUNC_STRUCT_PARAM_AS_PTR
64 /* pointer size, in bytes */
65 #define PTR_SIZE 4
67 /* long double size and alignment, in bytes */
68 #define LDOUBLE_SIZE 12
69 #define LDOUBLE_ALIGN 4
71 /* relocation type for 32 bit data relocation */
72 #define R_DATA_32 R_386_32
74 /* function call context */
75 typedef struct GFuncContext {
76 int args_size;
77 int func_call; /* func call type (FUNC_STDCALL or FUNC_CDECL) */
78 } GFuncContext;
80 /******************************************************/
82 static unsigned long func_sub_sp_offset;
83 static unsigned long func_bound_offset;
84 static int func_ret_sub;
86 /* XXX: make it faster ? */
87 void g(int c)
89 int ind1;
90 ind1 = ind + 1;
91 if (ind1 > cur_text_section->data_allocated)
92 section_realloc(cur_text_section, ind1);
93 cur_text_section->data[ind] = c;
94 ind = ind1;
97 void o(int c)
99 while (c) {
100 g(c);
101 c = c / 256;
105 void gen_le32(int c)
107 g(c);
108 g(c >> 8);
109 g(c >> 16);
110 g(c >> 24);
113 /* output a symbol and patch all calls to it */
114 void gsym_addr(int t, int a)
116 int n, *ptr;
117 while (t) {
118 ptr = (int *)(cur_text_section->data + t);
119 n = *ptr; /* next value */
120 *ptr = a - t - 4;
121 t = n;
125 void gsym(int t)
127 gsym_addr(t, ind);
130 /* psym is used to put an instruction with a data field which is a
131 reference to a symbol. It is in fact the same as oad ! */
132 #define psym oad
134 /* instruction + 4 bytes data. Return the address of the data */
135 static int oad(int c, int s)
137 int ind1;
139 o(c);
140 ind1 = ind + 4;
141 if (ind1 > cur_text_section->data_allocated)
142 section_realloc(cur_text_section, ind1);
143 *(int *)(cur_text_section->data + ind) = s;
144 s = ind;
145 ind = ind1;
146 return s;
149 /* output constant with relocation if 'r & VT_SYM' is true */
150 static void gen_addr32(int r, Sym *sym, int c)
152 if (r & VT_SYM)
153 greloc(cur_text_section, sym, ind, R_386_32);
154 gen_le32(c);
157 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
158 opcode bits */
159 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
161 op_reg = op_reg << 3;
162 if ((r & VT_VALMASK) == VT_CONST) {
163 /* constant memory reference */
164 o(0x05 | op_reg);
165 gen_addr32(r, sym, c);
166 } else if ((r & VT_VALMASK) == VT_LOCAL) {
167 /* currently, we use only ebp as base */
168 if (c == (char)c) {
169 /* short reference */
170 o(0x45 | op_reg);
171 g(c);
172 } else {
173 oad(0x85 | op_reg, c);
175 } else {
176 g(0x00 | op_reg | (r & VT_VALMASK));
181 /* load 'r' from value 'sv' */
182 void load(int r, SValue *sv)
184 int v, t, ft, fc, fr;
185 SValue v1;
187 fr = sv->r;
188 ft = sv->t;
189 fc = sv->c.ul;
191 v = fr & VT_VALMASK;
192 if (fr & VT_LVAL) {
193 if (v == VT_LLOCAL) {
194 v1.t = VT_INT;
195 v1.r = VT_LOCAL | VT_LVAL;
196 v1.c.ul = fc;
197 load(r, &v1);
198 fr = r;
200 if ((ft & VT_BTYPE) == VT_FLOAT) {
201 o(0xd9); /* flds */
202 r = 0;
203 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
204 o(0xdd); /* fldl */
205 r = 0;
206 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
207 o(0xdb); /* fldt */
208 r = 5;
209 } else if ((ft & VT_TYPE) == VT_BYTE) {
210 o(0xbe0f); /* movsbl */
211 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
212 o(0xb60f); /* movzbl */
213 } else if ((ft & VT_TYPE) == VT_SHORT) {
214 o(0xbf0f); /* movswl */
215 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
216 o(0xb70f); /* movzwl */
217 } else {
218 o(0x8b); /* movl */
220 gen_modrm(r, fr, sv->sym, fc);
221 } else {
222 if (v == VT_CONST) {
223 o(0xb8 + r); /* mov $xx, r */
224 gen_addr32(fr, sv->sym, fc);
225 } else if (v == VT_LOCAL) {
226 o(0x8d); /* lea xxx(%ebp), r */
227 gen_modrm(r, VT_LOCAL, sv->sym, fc);
228 } else if (v == VT_CMP) {
229 oad(0xb8 + r, 0); /* mov $0, r */
230 o(0x0f); /* setxx %br */
231 o(fc);
232 o(0xc0 + r);
233 } else if (v == VT_JMP || v == VT_JMPI) {
234 t = v & 1;
235 oad(0xb8 + r, t); /* mov $1, r */
236 o(0x05eb); /* jmp after */
237 gsym(fc);
238 oad(0xb8 + r, t ^ 1); /* mov $0, r */
239 } else if (v != r) {
240 o(0x89);
241 o(0xc0 + r + v * 8); /* mov v, r */
246 /* store register 'r' in lvalue 'v' */
247 void store(int r, SValue *v)
249 int fr, bt, ft, fc;
251 ft = v->t;
252 fc = v->c.ul;
253 fr = v->r & VT_VALMASK;
254 bt = ft & VT_BTYPE;
255 /* XXX: incorrect if float reg to reg */
256 if (bt == VT_FLOAT) {
257 o(0xd9); /* fsts */
258 r = 2;
259 } else if (bt == VT_DOUBLE) {
260 o(0xdd); /* fstpl */
261 r = 2;
262 } else if (bt == VT_LDOUBLE) {
263 o(0xc0d9); /* fld %st(0) */
264 o(0xdb); /* fstpt */
265 r = 7;
266 } else {
267 if (bt == VT_SHORT)
268 o(0x66);
269 if (bt == VT_BYTE)
270 o(0x88);
271 else
272 o(0x89);
274 if (fr == VT_CONST ||
275 fr == VT_LOCAL ||
276 (v->r & VT_LVAL)) {
277 gen_modrm(r, v->r, v->sym, fc);
278 } else if (fr != r) {
279 o(0xc0 + fr + r * 8); /* mov r, fr */
283 /* start function call and return function call context */
284 void gfunc_start(GFuncContext *c, int func_call)
286 c->args_size = 0;
287 c->func_call = func_call;
290 /* push function parameter which is in (vtop->t, vtop->c). Stack entry
291 is then popped. */
292 void gfunc_param(GFuncContext *c)
294 int size, align, r;
296 if ((vtop->t & VT_BTYPE) == VT_STRUCT) {
297 size = type_size(vtop->t, &align);
298 /* align to stack align size */
299 size = (size + 3) & ~3;
300 /* allocate the necessary size on stack */
301 oad(0xec81, size); /* sub $xxx, %esp */
302 /* generate structure store */
303 r = get_reg(RC_INT);
304 o(0x89); /* mov %esp, r */
305 o(0xe0 + r);
306 vset(vtop->t, r | VT_LVAL, 0);
307 vswap();
308 vstore();
309 c->args_size += size;
310 } else if (is_float(vtop->t)) {
311 gv(RC_FLOAT); /* only one float register */
312 if ((vtop->t & VT_BTYPE) == VT_FLOAT)
313 size = 4;
314 else if ((vtop->t & VT_BTYPE) == VT_DOUBLE)
315 size = 8;
316 else
317 size = 12;
318 oad(0xec81, size); /* sub $xxx, %esp */
319 if (size == 12)
320 o(0x7cdb);
321 else
322 o(0x5cd9 + size - 4); /* fstp[s|l] 0(%esp) */
323 g(0x24);
324 g(0x00);
325 c->args_size += size;
326 } else {
327 /* simple type (currently always same size) */
328 /* XXX: implicit cast ? */
329 r = gv(RC_INT);
330 if ((vtop->t & VT_BTYPE) == VT_LLONG) {
331 size = 8;
332 o(0x50 + vtop->r2); /* push r */
333 } else {
334 size = 4;
336 o(0x50 + r); /* push r */
337 c->args_size += size;
339 vtop--;
342 static void gadd_sp(int val)
344 if (val == (char)val) {
345 o(0xc483);
346 g(val);
347 } else {
348 oad(0xc481, val); /* add $xxx, %esp */
352 /* generate function call with address in (vtop->t, vtop->c) and free function
353 context. Stack entry is popped */
354 void gfunc_call(GFuncContext *c)
356 int r;
357 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
358 /* constant case */
359 if (vtop->r & VT_SYM) {
360 /* relocation case */
361 greloc(cur_text_section, vtop->sym,
362 ind + 1, R_386_PC32);
363 } else {
364 /* put an empty PC32 relocation */
365 put_elf_reloc(symtab_section, cur_text_section,
366 ind + 1, R_386_PC32, 0);
368 oad(0xe8, vtop->c.ul - 4);
369 } else {
370 /* otherwise, indirect call */
371 r = gv(RC_INT);
372 o(0xff); /* call *r */
373 o(0xd0 + r);
375 if (c->args_size && c->func_call == FUNC_CDECL)
376 gadd_sp(c->args_size);
377 vtop--;
380 /* generate function prolog of type 't' */
381 void gfunc_prolog(int t)
383 int addr, align, size, u, func_call;
384 Sym *sym;
386 sym = sym_find((unsigned)t >> VT_STRUCT_SHIFT);
387 func_call = sym->r;
388 addr = 8;
389 /* if the function returns a structure, then add an
390 implicit pointer parameter */
391 func_vt = sym->t;
392 if ((func_vt & VT_BTYPE) == VT_STRUCT) {
393 func_vc = addr;
394 addr += 4;
396 /* define parameters */
397 while ((sym = sym->next) != NULL) {
398 u = sym->t;
399 sym_push(sym->v & ~SYM_FIELD, u,
400 VT_LOCAL | VT_LVAL, addr);
401 size = type_size(u, &align);
402 size = (size + 3) & ~3;
403 #ifdef FUNC_STRUCT_PARAM_AS_PTR
404 /* structs are passed as pointer */
405 if ((u & VT_BTYPE) == VT_STRUCT) {
406 size = 4;
408 #endif
409 addr += size;
411 func_ret_sub = 0;
412 /* pascal type call ? */
413 if (func_call == FUNC_STDCALL)
414 func_ret_sub = addr - 8;
415 o(0xe58955); /* push %ebp, mov %esp, %ebp */
416 func_sub_sp_offset = oad(0xec81, 0); /* sub $xxx, %esp */
417 /* leave some room for bound checking code */
418 if (do_bounds_check) {
419 oad(0xb8, 0); /* lbound section pointer */
420 oad(0xb8, 0); /* call to function */
421 func_bound_offset = lbounds_section->data_offset;
425 /* generate function epilog */
426 void gfunc_epilog(void)
428 #ifdef CONFIG_TCC_BCHECK
429 if (do_bounds_check && func_bound_offset != lbounds_section->data_offset) {
430 int saved_ind;
431 int *bounds_ptr;
432 Sym *sym, *sym_data;
433 /* add end of table info */
434 bounds_ptr = section_ptr_add(lbounds_section, sizeof(int));
435 *bounds_ptr = 0;
436 /* generate bound local allocation */
437 saved_ind = ind;
438 ind = func_sub_sp_offset + 4;
439 sym_data = get_sym_ref(char_pointer_type, lbounds_section,
440 func_bound_offset, lbounds_section->data_offset);
441 greloc(cur_text_section, sym_data,
442 ind + 1, R_386_32);
443 oad(0xb8, 0); /* mov %eax, xxx */
444 sym = external_global_sym(TOK___bound_local_new, func_old_type, 0);
445 greloc(cur_text_section, sym,
446 ind + 1, R_386_PC32);
447 oad(0xe8, -4);
448 ind = saved_ind;
449 /* generate bound check local freeing */
450 o(0x5250); /* save returned value, if any */
451 greloc(cur_text_section, sym_data,
452 ind + 1, R_386_32);
453 oad(0xb8, 0); /* mov %eax, xxx */
454 sym = external_global_sym(TOK___bound_local_delete, func_old_type, 0);
455 greloc(cur_text_section, sym,
456 ind + 1, R_386_PC32);
457 oad(0xe8, -4);
458 o(0x585a); /* restore returned value, if any */
460 #endif
461 o(0xc9); /* leave */
462 if (func_ret_sub == 0) {
463 o(0xc3); /* ret */
464 } else {
465 o(0xc2); /* ret n */
466 g(func_ret_sub);
467 g(func_ret_sub >> 8);
469 /* align local size to word & save local variables */
470 *(int *)(cur_text_section->data + func_sub_sp_offset) = (-loc + 3) & -4;
473 /* generate a jump to a label */
474 int gjmp(int t)
476 return psym(0xe9, t);
479 /* generate a jump to a fixed address */
480 void gjmp_addr(int a)
482 int r;
483 r = a - ind - 2;
484 if (r == (char)r) {
485 g(0xeb);
486 g(r);
487 } else {
488 oad(0xe9, a - ind - 5);
492 /* generate a test. set 'inv' to invert test. Stack entry is popped */
493 int gtst(int inv, int t)
495 int v, *p;
496 v = vtop->r & VT_VALMASK;
497 if (v == VT_CMP) {
498 /* fast case : can jump directly since flags are set */
499 g(0x0f);
500 t = psym((vtop->c.i - 16) ^ inv, t);
501 } else if (v == VT_JMP || v == VT_JMPI) {
502 /* && or || optimization */
503 if ((v & 1) == inv) {
504 /* insert vtop->c jump list in t */
505 p = &vtop->c.i;
506 while (*p != 0)
507 p = (int *)(cur_text_section->data + *p);
508 *p = t;
509 t = vtop->c.i;
510 } else {
511 t = gjmp(t);
512 gsym(vtop->c.i);
514 } else {
515 if (is_float(vtop->t)) {
516 vpushi(0);
517 gen_op(TOK_NE);
519 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
520 /* constant jmp optimization */
521 if ((vtop->c.i != 0) != inv)
522 t = gjmp(t);
523 } else {
524 v = gv(RC_INT);
525 o(0x85);
526 o(0xc0 + v * 9);
527 g(0x0f);
528 t = psym(0x85 ^ inv, t);
531 vtop--;
532 return t;
535 /* generate an integer binary operation */
536 void gen_opi(int op)
538 int r, fr, opc, c;
540 switch(op) {
541 case '+':
542 case TOK_ADDC1: /* add with carry generation */
543 opc = 0;
544 gen_op8:
545 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
546 /* constant case */
547 vswap();
548 r = gv(RC_INT);
549 vswap();
550 c = vtop->c.i;
551 if (c == (char)c) {
552 /* XXX: generate inc and dec for smaller code ? */
553 o(0x83);
554 o(0xc0 | (opc << 3) | r);
555 g(c);
556 } else {
557 o(0x81);
558 oad(0xc0 | (opc << 3) | r, c);
560 } else {
561 gv2(RC_INT, RC_INT);
562 r = vtop[-1].r;
563 fr = vtop[0].r;
564 o((opc << 3) | 0x01);
565 o(0xc0 + r + fr * 8);
567 vtop--;
568 if (op >= TOK_ULT && op <= TOK_GT) {
569 vtop--;
570 vset(VT_INT, VT_CMP, op);
572 break;
573 case '-':
574 case TOK_SUBC1: /* sub with carry generation */
575 opc = 5;
576 goto gen_op8;
577 case TOK_ADDC2: /* add with carry use */
578 opc = 2;
579 goto gen_op8;
580 case TOK_SUBC2: /* sub with carry use */
581 opc = 3;
582 goto gen_op8;
583 case '&':
584 opc = 4;
585 goto gen_op8;
586 case '^':
587 opc = 6;
588 goto gen_op8;
589 case '|':
590 opc = 1;
591 goto gen_op8;
592 case '*':
593 gv2(RC_INT, RC_INT);
594 r = vtop[-1].r;
595 fr = vtop[0].r;
596 vtop--;
597 o(0xaf0f); /* imul fr, r */
598 o(0xc0 + fr + r * 8);
599 break;
600 case TOK_SHL:
601 opc = 4;
602 goto gen_shift;
603 case TOK_SHR:
604 opc = 5;
605 goto gen_shift;
606 case TOK_SAR:
607 opc = 7;
608 gen_shift:
609 opc = 0xc0 | (opc << 3);
610 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
611 /* constant case */
612 vswap();
613 r = gv(RC_INT);
614 vswap();
615 c = vtop->c.i & 0x1f;
616 o(0xc1); /* shl/shr/sar $xxx, r */
617 o(opc | r);
618 g(c);
619 } else {
620 /* we generate the shift in ecx */
621 gv2(RC_INT, RC_ECX);
622 r = vtop[-1].r;
623 o(0xd3); /* shl/shr/sar %cl, r */
624 o(opc | r);
626 vtop--;
627 break;
628 case '/':
629 case TOK_UDIV:
630 case TOK_PDIV:
631 case '%':
632 case TOK_UMOD:
633 case TOK_UMULL:
634 /* first operand must be in eax */
635 /* XXX: need better constraint for second operand */
636 gv2(RC_EAX, RC_ECX);
637 r = vtop[-1].r;
638 fr = vtop[0].r;
639 vtop--;
640 save_reg(REG_EDX);
641 if (op == TOK_UMULL) {
642 o(0xf7); /* mul fr */
643 o(0xe0 + fr);
644 vtop->r2 = REG_EDX;
645 r = REG_EAX;
646 } else {
647 if (op == TOK_UDIV || op == TOK_UMOD) {
648 o(0xf7d231); /* xor %edx, %edx, div fr, %eax */
649 o(0xf0 + fr);
650 } else {
651 o(0xf799); /* cltd, idiv fr, %eax */
652 o(0xf8 + fr);
654 if (op == '%' || op == TOK_UMOD)
655 r = REG_EDX;
656 else
657 r = REG_EAX;
659 vtop->r = r;
660 break;
661 default:
662 opc = 7;
663 goto gen_op8;
667 /* generate a floating point operation 'v = t1 op t2' instruction. The
668 two operands are guaranted to have the same floating point type */
669 /* XXX: need to use ST1 too */
670 void gen_opf(int op)
672 int a, ft, fc, swapped, r;
674 /* convert constants to memory references */
675 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
676 vswap();
677 gv(RC_FLOAT);
678 vswap();
680 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
681 gv(RC_FLOAT);
683 /* must put at least one value in the floating point register */
684 if ((vtop[-1].r & VT_LVAL) &&
685 (vtop[0].r & VT_LVAL)) {
686 vswap();
687 gv(RC_FLOAT);
688 vswap();
690 swapped = 0;
691 /* swap the stack if needed so that t1 is the register and t2 is
692 the memory reference */
693 if (vtop[-1].r & VT_LVAL) {
694 vswap();
695 swapped = 1;
697 if (op >= TOK_ULT && op <= TOK_GT) {
698 /* load on stack second operand */
699 load(REG_ST0, vtop);
700 save_reg(REG_EAX); /* eax is used by FP comparison code */
701 if (op == TOK_GE || op == TOK_GT)
702 swapped = !swapped;
703 else if (op == TOK_EQ || op == TOK_NE)
704 swapped = 0;
705 if (swapped)
706 o(0xc9d9); /* fxch %st(1) */
707 o(0xe9da); /* fucompp */
708 o(0xe0df); /* fnstsw %ax */
709 if (op == TOK_EQ) {
710 o(0x45e480); /* and $0x45, %ah */
711 o(0x40fC80); /* cmp $0x40, %ah */
712 } else if (op == TOK_NE) {
713 o(0x45e480); /* and $0x45, %ah */
714 o(0x40f480); /* xor $0x40, %ah */
715 op = TOK_NE;
716 } else if (op == TOK_GE || op == TOK_LE) {
717 o(0x05c4f6); /* test $0x05, %ah */
718 op = TOK_EQ;
719 } else {
720 o(0x45c4f6); /* test $0x45, %ah */
721 op = TOK_EQ;
723 vtop--;
724 vtop->r = VT_CMP;
725 vtop->c.i = op;
726 } else {
727 /* no memory reference possible for long double operations */
728 if ((vtop->t & VT_BTYPE) == VT_LDOUBLE) {
729 load(REG_ST0, vtop);
730 swapped = !swapped;
733 switch(op) {
734 default:
735 case '+':
736 a = 0;
737 break;
738 case '-':
739 a = 4;
740 if (swapped)
741 a++;
742 break;
743 case '*':
744 a = 1;
745 break;
746 case '/':
747 a = 6;
748 if (swapped)
749 a++;
750 break;
752 ft = vtop->t;
753 fc = vtop->c.ul;
754 if ((ft & VT_BTYPE) == VT_LDOUBLE) {
755 o(0xde); /* fxxxp %st, %st(1) */
756 o(0xc1 + (a << 3));
757 } else {
758 /* if saved lvalue, then we must reload it */
759 r = vtop->r;
760 if ((r & VT_VALMASK) == VT_LLOCAL) {
761 SValue v1;
762 r = get_reg(RC_INT);
763 v1.t = VT_INT;
764 v1.r = VT_LOCAL | VT_LVAL;
765 v1.c.ul = fc;
766 load(r, &v1);
767 fc = 0;
770 if ((ft & VT_BTYPE) == VT_DOUBLE)
771 o(0xdc);
772 else
773 o(0xd8);
774 gen_modrm(a, r, vtop->sym, fc);
776 vtop--;
780 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
781 and 'long long' cases. */
782 void gen_cvt_itof(int t)
784 save_reg(REG_ST0);
785 gv(RC_INT);
786 if ((vtop->t & VT_BTYPE) == VT_LLONG) {
787 /* signed long long to float/double/long double (unsigned case
788 is handled generically) */
789 o(0x50 + vtop->r2); /* push r2 */
790 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
791 o(0x242cdf); /* fildll (%esp) */
792 o(0x08c483); /* add $8, %esp */
793 } else if ((vtop->t & (VT_BTYPE | VT_UNSIGNED)) ==
794 (VT_INT | VT_UNSIGNED)) {
795 /* unsigned int to float/double/long double */
796 o(0x6a); /* push $0 */
797 g(0x00);
798 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
799 o(0x242cdf); /* fildll (%esp) */
800 o(0x08c483); /* add $8, %esp */
801 } else {
802 /* int to float/double/long double */
803 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
804 o(0x2404db); /* fildl (%esp) */
805 o(0x04c483); /* add $4, %esp */
807 vtop->r = REG_ST0;
810 /* convert fp to int 't' type */
811 /* XXX: handle long long case */
812 void gen_cvt_ftoi(int t)
814 int r, r2, size;
815 Sym *sym;
817 gv(RC_FLOAT);
818 if (t != VT_INT)
819 size = 8;
820 else
821 size = 4;
823 o(0x2dd9); /* ldcw xxx */
824 sym = external_global_sym(TOK___tcc_int_fpu_control,
825 VT_SHORT | VT_UNSIGNED, VT_LVAL);
826 greloc(cur_text_section, sym,
827 ind, R_386_32);
828 gen_le32(0);
830 oad(0xec81, size); /* sub $xxx, %esp */
831 if (size == 4)
832 o(0x1cdb); /* fistpl */
833 else
834 o(0x3cdf); /* fistpll */
835 o(0x24);
836 o(0x2dd9); /* ldcw xxx */
837 sym = external_global_sym(TOK___tcc_fpu_control,
838 VT_SHORT | VT_UNSIGNED, VT_LVAL);
839 greloc(cur_text_section, sym,
840 ind, R_386_32);
841 gen_le32(0);
843 r = get_reg(RC_INT);
844 o(0x58 + r); /* pop r */
845 if (size == 8) {
846 if (t == VT_LLONG) {
847 vtop->r = r; /* mark reg as used */
848 r2 = get_reg(RC_INT);
849 o(0x58 + r2); /* pop r2 */
850 vtop->r2 = r2;
851 } else {
852 o(0x04c483); /* add $4, %esp */
855 vtop->r = r;
858 /* convert from one floating point type to another */
859 void gen_cvt_ftof(int t)
861 /* all we have to do on i386 is to put the float in a register */
862 gv(RC_FLOAT);
865 /* bound check support functions */
866 #ifdef CONFIG_TCC_BCHECK
868 /* generate a bounded pointer addition */
869 void gen_bounded_ptr_add(void)
871 Sym *sym;
873 /* prepare fast i386 function call (args in eax and edx) */
874 gv2(RC_EAX, RC_EDX);
875 /* save all temporary registers */
876 vtop -= 2;
877 save_regs(0);
878 /* do a fast function call */
879 sym = external_global_sym(TOK___bound_ptr_add, func_old_type, 0);
880 greloc(cur_text_section, sym,
881 ind + 1, R_386_PC32);
882 oad(0xe8, -4);
883 /* returned pointer is in eax */
884 vtop++;
885 vtop->r = REG_EAX | VT_BOUNDED;
886 /* address of bounding function call point */
887 vtop->c.ul = (cur_text_section->reloc->data_offset - sizeof(Elf32_Rel));
890 /* patch pointer addition in vtop so that pointer dereferencing is
891 also tested */
892 void gen_bounded_ptr_deref(void)
894 int func;
895 int size, align;
896 Elf32_Rel *rel;
897 Sym *sym;
899 size = 0;
900 /* XXX: put that code in generic part of tcc */
901 if (!is_float(vtop->t)) {
902 if (vtop->r & VT_LVAL_BYTE)
903 size = 1;
904 else if (vtop->r & VT_LVAL_SHORT)
905 size = 2;
907 if (!size)
908 size = type_size(vtop->t, &align);
909 switch(size) {
910 case 1: func = TOK___bound_ptr_indir1; break;
911 case 2: func = TOK___bound_ptr_indir2; break;
912 case 4: func = TOK___bound_ptr_indir4; break;
913 case 8: func = TOK___bound_ptr_indir8; break;
914 case 12: func = TOK___bound_ptr_indir12; break;
915 case 16: func = TOK___bound_ptr_indir16; break;
916 default:
917 error("unhandled size when derefencing bounded pointer");
918 func = 0;
919 break;
922 /* patch relocation */
923 /* XXX: find a better solution ? */
924 rel = (Elf32_Rel *)(cur_text_section->reloc->data + vtop->c.ul);
925 sym = external_global_sym(func, func_old_type, 0);
926 if (!sym->c)
927 put_extern_sym(sym, NULL, 0, 0);
928 rel->r_info = ELF32_R_INFO(sym->c, ELF32_R_TYPE(rel->r_info));
930 #endif
932 /* end of X86 code generator */
933 /*************************************************************/