handle computed gotos
[tinycc.git] / i386-gen.c
blob1fb27e449eed5c8d6be762a46ece434c811a29a3
1 /*
2 * X86 code generator for TCC
3 *
4 * Copyright (c) 2001, 2002 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 /* number of available registers */
22 #define NB_REGS 4
24 /* a register can belong to several classes. The classes must be
25 sorted from more general to more precise (see gv2() code which does
26 assumptions on it). */
27 #define RC_INT 0x0001 /* generic integer register */
28 #define RC_FLOAT 0x0002 /* generic float register */
29 #define RC_EAX 0x0004
30 #define RC_ST0 0x0008
31 #define RC_ECX 0x0010
32 #define RC_EDX 0x0020
33 #define RC_IRET RC_EAX /* function return: integer register */
34 #define RC_LRET RC_EDX /* function return: second integer register */
35 #define RC_FRET RC_ST0 /* function return: float register */
37 /* pretty names for the registers */
38 enum {
39 REG_EAX = 0,
40 REG_ECX,
41 REG_EDX,
42 REG_ST0,
45 int reg_classes[NB_REGS] = {
46 /* eax */ RC_INT | RC_EAX,
47 /* ecx */ RC_INT | RC_ECX,
48 /* edx */ RC_INT | RC_EDX,
49 /* st0 */ RC_FLOAT | RC_ST0,
52 /* return registers for function */
53 #define REG_IRET REG_EAX /* single word int return register */
54 #define REG_LRET REG_EDX /* second word return register (for long long) */
55 #define REG_FRET REG_ST0 /* float return register */
57 /* defined if function parameters must be evaluated in reverse order */
58 #define INVERT_FUNC_PARAMS
60 /* defined if structures are passed as pointers. Otherwise structures
61 are directly pushed on stack. */
62 //#define FUNC_STRUCT_PARAM_AS_PTR
64 /* pointer size, in bytes */
65 #define PTR_SIZE 4
67 /* long double size and alignment, in bytes */
68 #define LDOUBLE_SIZE 12
69 #define LDOUBLE_ALIGN 4
71 /* relocation type for 32 bit data relocation */
72 #define R_DATA_32 R_386_32
74 /* function call context */
75 typedef struct GFuncContext {
76 int args_size;
77 int func_call; /* func call type (FUNC_STDCALL or FUNC_CDECL) */
78 } GFuncContext;
80 /******************************************************/
82 static unsigned long func_sub_sp_offset;
83 static unsigned long func_bound_offset;
84 static int func_ret_sub;
86 /* XXX: make it faster ? */
87 void g(int c)
89 int ind1;
90 ind1 = ind + 1;
91 if (ind1 > cur_text_section->data_allocated)
92 section_realloc(cur_text_section, ind1);
93 cur_text_section->data[ind] = c;
94 ind = ind1;
97 void o(int c)
99 while (c) {
100 g(c);
101 c = c / 256;
105 void gen_le32(int c)
107 g(c);
108 g(c >> 8);
109 g(c >> 16);
110 g(c >> 24);
113 /* output a symbol and patch all calls to it */
114 void gsym_addr(int t, int a)
116 int n, *ptr;
117 while (t) {
118 ptr = (int *)(cur_text_section->data + t);
119 n = *ptr; /* next value */
120 *ptr = a - t - 4;
121 t = n;
125 void gsym(int t)
127 gsym_addr(t, ind);
130 /* psym is used to put an instruction with a data field which is a
131 reference to a symbol. It is in fact the same as oad ! */
132 #define psym oad
134 /* instruction + 4 bytes data. Return the address of the data */
135 static int oad(int c, int s)
137 int ind1;
139 o(c);
140 ind1 = ind + 4;
141 if (ind1 > cur_text_section->data_allocated)
142 section_realloc(cur_text_section, ind1);
143 *(int *)(cur_text_section->data + ind) = s;
144 s = ind;
145 ind = ind1;
146 return s;
149 /* output constant with relocation if 'r & VT_SYM' is true */
150 static void gen_addr32(int r, Sym *sym, int c)
152 if (r & VT_SYM)
153 greloc(cur_text_section, sym, ind, R_386_32);
154 gen_le32(c);
157 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
158 opcode bits */
159 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
161 op_reg = op_reg << 3;
162 if ((r & VT_VALMASK) == VT_CONST) {
163 /* constant memory reference */
164 o(0x05 | op_reg);
165 gen_addr32(r, sym, c);
166 } else if ((r & VT_VALMASK) == VT_LOCAL) {
167 /* currently, we use only ebp as base */
168 if (c == (char)c) {
169 /* short reference */
170 o(0x45 | op_reg);
171 g(c);
172 } else {
173 oad(0x85 | op_reg, c);
175 } else {
176 g(0x00 | op_reg | (r & VT_VALMASK));
181 /* load 'r' from value 'sv' */
182 void load(int r, SValue *sv)
184 int v, t, ft, fc, fr;
185 SValue v1;
187 fr = sv->r;
188 ft = sv->t;
189 fc = sv->c.ul;
191 v = fr & VT_VALMASK;
192 if (fr & VT_LVAL) {
193 if (v == VT_LLOCAL) {
194 v1.t = VT_INT;
195 v1.r = VT_LOCAL | VT_LVAL;
196 v1.c.ul = fc;
197 load(r, &v1);
198 fr = r;
200 if ((ft & VT_BTYPE) == VT_FLOAT) {
201 o(0xd9); /* flds */
202 r = 0;
203 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
204 o(0xdd); /* fldl */
205 r = 0;
206 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
207 o(0xdb); /* fldt */
208 r = 5;
209 } else if ((ft & VT_TYPE) == VT_BYTE) {
210 o(0xbe0f); /* movsbl */
211 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
212 o(0xb60f); /* movzbl */
213 } else if ((ft & VT_TYPE) == VT_SHORT) {
214 o(0xbf0f); /* movswl */
215 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
216 o(0xb70f); /* movzwl */
217 } else {
218 o(0x8b); /* movl */
220 gen_modrm(r, fr, sv->sym, fc);
221 } else {
222 if (v == VT_CONST) {
223 o(0xb8 + r); /* mov $xx, r */
224 gen_addr32(fr, sv->sym, fc);
225 } else if (v == VT_LOCAL) {
226 o(0x8d); /* lea xxx(%ebp), r */
227 gen_modrm(r, VT_LOCAL, sv->sym, fc);
228 } else if (v == VT_CMP) {
229 oad(0xb8 + r, 0); /* mov $0, r */
230 o(0x0f); /* setxx %br */
231 o(fc);
232 o(0xc0 + r);
233 } else if (v == VT_JMP || v == VT_JMPI) {
234 t = v & 1;
235 oad(0xb8 + r, t); /* mov $1, r */
236 o(0x05eb); /* jmp after */
237 gsym(fc);
238 oad(0xb8 + r, t ^ 1); /* mov $0, r */
239 } else if (v != r) {
240 o(0x89);
241 o(0xc0 + r + v * 8); /* mov v, r */
246 /* store register 'r' in lvalue 'v' */
247 void store(int r, SValue *v)
249 int fr, bt, ft, fc;
251 ft = v->t;
252 fc = v->c.ul;
253 fr = v->r & VT_VALMASK;
254 bt = ft & VT_BTYPE;
255 /* XXX: incorrect if float reg to reg */
256 if (bt == VT_FLOAT) {
257 o(0xd9); /* fsts */
258 r = 2;
259 } else if (bt == VT_DOUBLE) {
260 o(0xdd); /* fstpl */
261 r = 2;
262 } else if (bt == VT_LDOUBLE) {
263 o(0xc0d9); /* fld %st(0) */
264 o(0xdb); /* fstpt */
265 r = 7;
266 } else {
267 if (bt == VT_SHORT)
268 o(0x66);
269 if (bt == VT_BYTE)
270 o(0x88);
271 else
272 o(0x89);
274 if (fr == VT_CONST ||
275 fr == VT_LOCAL ||
276 (v->r & VT_LVAL)) {
277 gen_modrm(r, v->r, v->sym, fc);
278 } else if (fr != r) {
279 o(0xc0 + fr + r * 8); /* mov r, fr */
283 /* start function call and return function call context */
284 void gfunc_start(GFuncContext *c, int func_call)
286 c->args_size = 0;
287 c->func_call = func_call;
290 /* push function parameter which is in (vtop->t, vtop->c). Stack entry
291 is then popped. */
292 void gfunc_param(GFuncContext *c)
294 int size, align, r;
296 if ((vtop->t & VT_BTYPE) == VT_STRUCT) {
297 size = type_size(vtop->t, &align);
298 /* align to stack align size */
299 size = (size + 3) & ~3;
300 /* allocate the necessary size on stack */
301 oad(0xec81, size); /* sub $xxx, %esp */
302 /* generate structure store */
303 r = get_reg(RC_INT);
304 o(0x89); /* mov %esp, r */
305 o(0xe0 + r);
306 vset(vtop->t, r | VT_LVAL, 0);
307 vswap();
308 vstore();
309 c->args_size += size;
310 } else if (is_float(vtop->t)) {
311 gv(RC_FLOAT); /* only one float register */
312 if ((vtop->t & VT_BTYPE) == VT_FLOAT)
313 size = 4;
314 else if ((vtop->t & VT_BTYPE) == VT_DOUBLE)
315 size = 8;
316 else
317 size = 12;
318 oad(0xec81, size); /* sub $xxx, %esp */
319 if (size == 12)
320 o(0x7cdb);
321 else
322 o(0x5cd9 + size - 4); /* fstp[s|l] 0(%esp) */
323 g(0x24);
324 g(0x00);
325 c->args_size += size;
326 } else {
327 /* simple type (currently always same size) */
328 /* XXX: implicit cast ? */
329 r = gv(RC_INT);
330 if ((vtop->t & VT_BTYPE) == VT_LLONG) {
331 size = 8;
332 o(0x50 + vtop->r2); /* push r */
333 } else {
334 size = 4;
336 o(0x50 + r); /* push r */
337 c->args_size += size;
339 vtop--;
342 static void gadd_sp(int val)
344 if (val == (char)val) {
345 o(0xc483);
346 g(val);
347 } else {
348 oad(0xc481, val); /* add $xxx, %esp */
352 /* 'is_jmp' is '1' if it is a jump */
353 static void gcall_or_jmp(int is_jmp)
355 int r;
356 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
357 /* constant case */
358 if (vtop->r & VT_SYM) {
359 /* relocation case */
360 greloc(cur_text_section, vtop->sym,
361 ind + 1, R_386_PC32);
362 } else {
363 /* put an empty PC32 relocation */
364 put_elf_reloc(symtab_section, cur_text_section,
365 ind + 1, R_386_PC32, 0);
367 oad(0xe8 + is_jmp, vtop->c.ul - 4); /* call/jmp im */
368 } else {
369 /* otherwise, indirect call */
370 r = gv(RC_INT);
371 o(0xff); /* call/jmp *r */
372 o(0xd0 + r + (is_jmp << 4));
376 /* generate function call with address in (vtop->t, vtop->c) and free function
377 context. Stack entry is popped */
378 void gfunc_call(GFuncContext *c)
380 gcall_or_jmp(0);
381 if (c->args_size && c->func_call == FUNC_CDECL)
382 gadd_sp(c->args_size);
383 vtop--;
386 /* generate function prolog of type 't' */
387 void gfunc_prolog(int t)
389 int addr, align, size, u, func_call;
390 Sym *sym;
392 sym = sym_find((unsigned)t >> VT_STRUCT_SHIFT);
393 func_call = sym->r;
394 addr = 8;
395 /* if the function returns a structure, then add an
396 implicit pointer parameter */
397 func_vt = sym->t;
398 if ((func_vt & VT_BTYPE) == VT_STRUCT) {
399 func_vc = addr;
400 addr += 4;
402 /* define parameters */
403 while ((sym = sym->next) != NULL) {
404 u = sym->t;
405 sym_push(sym->v & ~SYM_FIELD, u,
406 VT_LOCAL | VT_LVAL, addr);
407 size = type_size(u, &align);
408 size = (size + 3) & ~3;
409 #ifdef FUNC_STRUCT_PARAM_AS_PTR
410 /* structs are passed as pointer */
411 if ((u & VT_BTYPE) == VT_STRUCT) {
412 size = 4;
414 #endif
415 addr += size;
417 func_ret_sub = 0;
418 /* pascal type call ? */
419 if (func_call == FUNC_STDCALL)
420 func_ret_sub = addr - 8;
421 o(0xe58955); /* push %ebp, mov %esp, %ebp */
422 func_sub_sp_offset = oad(0xec81, 0); /* sub $xxx, %esp */
423 /* leave some room for bound checking code */
424 if (do_bounds_check) {
425 oad(0xb8, 0); /* lbound section pointer */
426 oad(0xb8, 0); /* call to function */
427 func_bound_offset = lbounds_section->data_offset;
431 /* generate function epilog */
432 void gfunc_epilog(void)
434 #ifdef CONFIG_TCC_BCHECK
435 if (do_bounds_check && func_bound_offset != lbounds_section->data_offset) {
436 int saved_ind;
437 int *bounds_ptr;
438 Sym *sym, *sym_data;
439 /* add end of table info */
440 bounds_ptr = section_ptr_add(lbounds_section, sizeof(int));
441 *bounds_ptr = 0;
442 /* generate bound local allocation */
443 saved_ind = ind;
444 ind = func_sub_sp_offset + 4;
445 sym_data = get_sym_ref(char_pointer_type, lbounds_section,
446 func_bound_offset, lbounds_section->data_offset);
447 greloc(cur_text_section, sym_data,
448 ind + 1, R_386_32);
449 oad(0xb8, 0); /* mov %eax, xxx */
450 sym = external_global_sym(TOK___bound_local_new, func_old_type, 0);
451 greloc(cur_text_section, sym,
452 ind + 1, R_386_PC32);
453 oad(0xe8, -4);
454 ind = saved_ind;
455 /* generate bound check local freeing */
456 o(0x5250); /* save returned value, if any */
457 greloc(cur_text_section, sym_data,
458 ind + 1, R_386_32);
459 oad(0xb8, 0); /* mov %eax, xxx */
460 sym = external_global_sym(TOK___bound_local_delete, func_old_type, 0);
461 greloc(cur_text_section, sym,
462 ind + 1, R_386_PC32);
463 oad(0xe8, -4);
464 o(0x585a); /* restore returned value, if any */
466 #endif
467 o(0xc9); /* leave */
468 if (func_ret_sub == 0) {
469 o(0xc3); /* ret */
470 } else {
471 o(0xc2); /* ret n */
472 g(func_ret_sub);
473 g(func_ret_sub >> 8);
475 /* align local size to word & save local variables */
476 *(int *)(cur_text_section->data + func_sub_sp_offset) = (-loc + 3) & -4;
479 /* generate a jump to a label */
480 int gjmp(int t)
482 return psym(0xe9, t);
485 /* generate a jump to a fixed address */
486 void gjmp_addr(int a)
488 int r;
489 r = a - ind - 2;
490 if (r == (char)r) {
491 g(0xeb);
492 g(r);
493 } else {
494 oad(0xe9, a - ind - 5);
498 /* generate a test. set 'inv' to invert test. Stack entry is popped */
499 int gtst(int inv, int t)
501 int v, *p;
502 v = vtop->r & VT_VALMASK;
503 if (v == VT_CMP) {
504 /* fast case : can jump directly since flags are set */
505 g(0x0f);
506 t = psym((vtop->c.i - 16) ^ inv, t);
507 } else if (v == VT_JMP || v == VT_JMPI) {
508 /* && or || optimization */
509 if ((v & 1) == inv) {
510 /* insert vtop->c jump list in t */
511 p = &vtop->c.i;
512 while (*p != 0)
513 p = (int *)(cur_text_section->data + *p);
514 *p = t;
515 t = vtop->c.i;
516 } else {
517 t = gjmp(t);
518 gsym(vtop->c.i);
520 } else {
521 if (is_float(vtop->t)) {
522 vpushi(0);
523 gen_op(TOK_NE);
525 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
526 /* constant jmp optimization */
527 if ((vtop->c.i != 0) != inv)
528 t = gjmp(t);
529 } else {
530 v = gv(RC_INT);
531 o(0x85);
532 o(0xc0 + v * 9);
533 g(0x0f);
534 t = psym(0x85 ^ inv, t);
537 vtop--;
538 return t;
541 /* generate an integer binary operation */
542 void gen_opi(int op)
544 int r, fr, opc, c;
546 switch(op) {
547 case '+':
548 case TOK_ADDC1: /* add with carry generation */
549 opc = 0;
550 gen_op8:
551 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
552 /* constant case */
553 vswap();
554 r = gv(RC_INT);
555 vswap();
556 c = vtop->c.i;
557 if (c == (char)c) {
558 /* XXX: generate inc and dec for smaller code ? */
559 o(0x83);
560 o(0xc0 | (opc << 3) | r);
561 g(c);
562 } else {
563 o(0x81);
564 oad(0xc0 | (opc << 3) | r, c);
566 } else {
567 gv2(RC_INT, RC_INT);
568 r = vtop[-1].r;
569 fr = vtop[0].r;
570 o((opc << 3) | 0x01);
571 o(0xc0 + r + fr * 8);
573 vtop--;
574 if (op >= TOK_ULT && op <= TOK_GT) {
575 vtop--;
576 vset(VT_INT, VT_CMP, op);
578 break;
579 case '-':
580 case TOK_SUBC1: /* sub with carry generation */
581 opc = 5;
582 goto gen_op8;
583 case TOK_ADDC2: /* add with carry use */
584 opc = 2;
585 goto gen_op8;
586 case TOK_SUBC2: /* sub with carry use */
587 opc = 3;
588 goto gen_op8;
589 case '&':
590 opc = 4;
591 goto gen_op8;
592 case '^':
593 opc = 6;
594 goto gen_op8;
595 case '|':
596 opc = 1;
597 goto gen_op8;
598 case '*':
599 gv2(RC_INT, RC_INT);
600 r = vtop[-1].r;
601 fr = vtop[0].r;
602 vtop--;
603 o(0xaf0f); /* imul fr, r */
604 o(0xc0 + fr + r * 8);
605 break;
606 case TOK_SHL:
607 opc = 4;
608 goto gen_shift;
609 case TOK_SHR:
610 opc = 5;
611 goto gen_shift;
612 case TOK_SAR:
613 opc = 7;
614 gen_shift:
615 opc = 0xc0 | (opc << 3);
616 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
617 /* constant case */
618 vswap();
619 r = gv(RC_INT);
620 vswap();
621 c = vtop->c.i & 0x1f;
622 o(0xc1); /* shl/shr/sar $xxx, r */
623 o(opc | r);
624 g(c);
625 } else {
626 /* we generate the shift in ecx */
627 gv2(RC_INT, RC_ECX);
628 r = vtop[-1].r;
629 o(0xd3); /* shl/shr/sar %cl, r */
630 o(opc | r);
632 vtop--;
633 break;
634 case '/':
635 case TOK_UDIV:
636 case TOK_PDIV:
637 case '%':
638 case TOK_UMOD:
639 case TOK_UMULL:
640 /* first operand must be in eax */
641 /* XXX: need better constraint for second operand */
642 gv2(RC_EAX, RC_ECX);
643 r = vtop[-1].r;
644 fr = vtop[0].r;
645 vtop--;
646 save_reg(REG_EDX);
647 if (op == TOK_UMULL) {
648 o(0xf7); /* mul fr */
649 o(0xe0 + fr);
650 vtop->r2 = REG_EDX;
651 r = REG_EAX;
652 } else {
653 if (op == TOK_UDIV || op == TOK_UMOD) {
654 o(0xf7d231); /* xor %edx, %edx, div fr, %eax */
655 o(0xf0 + fr);
656 } else {
657 o(0xf799); /* cltd, idiv fr, %eax */
658 o(0xf8 + fr);
660 if (op == '%' || op == TOK_UMOD)
661 r = REG_EDX;
662 else
663 r = REG_EAX;
665 vtop->r = r;
666 break;
667 default:
668 opc = 7;
669 goto gen_op8;
673 /* generate a floating point operation 'v = t1 op t2' instruction. The
674 two operands are guaranted to have the same floating point type */
675 /* XXX: need to use ST1 too */
676 void gen_opf(int op)
678 int a, ft, fc, swapped, r;
680 /* convert constants to memory references */
681 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
682 vswap();
683 gv(RC_FLOAT);
684 vswap();
686 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
687 gv(RC_FLOAT);
689 /* must put at least one value in the floating point register */
690 if ((vtop[-1].r & VT_LVAL) &&
691 (vtop[0].r & VT_LVAL)) {
692 vswap();
693 gv(RC_FLOAT);
694 vswap();
696 swapped = 0;
697 /* swap the stack if needed so that t1 is the register and t2 is
698 the memory reference */
699 if (vtop[-1].r & VT_LVAL) {
700 vswap();
701 swapped = 1;
703 if (op >= TOK_ULT && op <= TOK_GT) {
704 /* load on stack second operand */
705 load(REG_ST0, vtop);
706 save_reg(REG_EAX); /* eax is used by FP comparison code */
707 if (op == TOK_GE || op == TOK_GT)
708 swapped = !swapped;
709 else if (op == TOK_EQ || op == TOK_NE)
710 swapped = 0;
711 if (swapped)
712 o(0xc9d9); /* fxch %st(1) */
713 o(0xe9da); /* fucompp */
714 o(0xe0df); /* fnstsw %ax */
715 if (op == TOK_EQ) {
716 o(0x45e480); /* and $0x45, %ah */
717 o(0x40fC80); /* cmp $0x40, %ah */
718 } else if (op == TOK_NE) {
719 o(0x45e480); /* and $0x45, %ah */
720 o(0x40f480); /* xor $0x40, %ah */
721 op = TOK_NE;
722 } else if (op == TOK_GE || op == TOK_LE) {
723 o(0x05c4f6); /* test $0x05, %ah */
724 op = TOK_EQ;
725 } else {
726 o(0x45c4f6); /* test $0x45, %ah */
727 op = TOK_EQ;
729 vtop--;
730 vtop->r = VT_CMP;
731 vtop->c.i = op;
732 } else {
733 /* no memory reference possible for long double operations */
734 if ((vtop->t & VT_BTYPE) == VT_LDOUBLE) {
735 load(REG_ST0, vtop);
736 swapped = !swapped;
739 switch(op) {
740 default:
741 case '+':
742 a = 0;
743 break;
744 case '-':
745 a = 4;
746 if (swapped)
747 a++;
748 break;
749 case '*':
750 a = 1;
751 break;
752 case '/':
753 a = 6;
754 if (swapped)
755 a++;
756 break;
758 ft = vtop->t;
759 fc = vtop->c.ul;
760 if ((ft & VT_BTYPE) == VT_LDOUBLE) {
761 o(0xde); /* fxxxp %st, %st(1) */
762 o(0xc1 + (a << 3));
763 } else {
764 /* if saved lvalue, then we must reload it */
765 r = vtop->r;
766 if ((r & VT_VALMASK) == VT_LLOCAL) {
767 SValue v1;
768 r = get_reg(RC_INT);
769 v1.t = VT_INT;
770 v1.r = VT_LOCAL | VT_LVAL;
771 v1.c.ul = fc;
772 load(r, &v1);
773 fc = 0;
776 if ((ft & VT_BTYPE) == VT_DOUBLE)
777 o(0xdc);
778 else
779 o(0xd8);
780 gen_modrm(a, r, vtop->sym, fc);
782 vtop--;
786 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
787 and 'long long' cases. */
788 void gen_cvt_itof(int t)
790 save_reg(REG_ST0);
791 gv(RC_INT);
792 if ((vtop->t & VT_BTYPE) == VT_LLONG) {
793 /* signed long long to float/double/long double (unsigned case
794 is handled generically) */
795 o(0x50 + vtop->r2); /* push r2 */
796 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
797 o(0x242cdf); /* fildll (%esp) */
798 o(0x08c483); /* add $8, %esp */
799 } else if ((vtop->t & (VT_BTYPE | VT_UNSIGNED)) ==
800 (VT_INT | VT_UNSIGNED)) {
801 /* unsigned int to float/double/long double */
802 o(0x6a); /* push $0 */
803 g(0x00);
804 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
805 o(0x242cdf); /* fildll (%esp) */
806 o(0x08c483); /* add $8, %esp */
807 } else {
808 /* int to float/double/long double */
809 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
810 o(0x2404db); /* fildl (%esp) */
811 o(0x04c483); /* add $4, %esp */
813 vtop->r = REG_ST0;
816 /* convert fp to int 't' type */
817 /* XXX: handle long long case */
818 void gen_cvt_ftoi(int t)
820 int r, r2, size;
821 Sym *sym;
823 gv(RC_FLOAT);
824 if (t != VT_INT)
825 size = 8;
826 else
827 size = 4;
829 o(0x2dd9); /* ldcw xxx */
830 sym = external_global_sym(TOK___tcc_int_fpu_control,
831 VT_SHORT | VT_UNSIGNED, VT_LVAL);
832 greloc(cur_text_section, sym,
833 ind, R_386_32);
834 gen_le32(0);
836 oad(0xec81, size); /* sub $xxx, %esp */
837 if (size == 4)
838 o(0x1cdb); /* fistpl */
839 else
840 o(0x3cdf); /* fistpll */
841 o(0x24);
842 o(0x2dd9); /* ldcw xxx */
843 sym = external_global_sym(TOK___tcc_fpu_control,
844 VT_SHORT | VT_UNSIGNED, VT_LVAL);
845 greloc(cur_text_section, sym,
846 ind, R_386_32);
847 gen_le32(0);
849 r = get_reg(RC_INT);
850 o(0x58 + r); /* pop r */
851 if (size == 8) {
852 if (t == VT_LLONG) {
853 vtop->r = r; /* mark reg as used */
854 r2 = get_reg(RC_INT);
855 o(0x58 + r2); /* pop r2 */
856 vtop->r2 = r2;
857 } else {
858 o(0x04c483); /* add $4, %esp */
861 vtop->r = r;
864 /* convert from one floating point type to another */
865 void gen_cvt_ftof(int t)
867 /* all we have to do on i386 is to put the float in a register */
868 gv(RC_FLOAT);
871 /* computed goto support */
872 void ggoto(void)
874 gcall_or_jmp(1);
875 vtop--;
878 /* bound check support functions */
879 #ifdef CONFIG_TCC_BCHECK
881 /* generate a bounded pointer addition */
882 void gen_bounded_ptr_add(void)
884 Sym *sym;
886 /* prepare fast i386 function call (args in eax and edx) */
887 gv2(RC_EAX, RC_EDX);
888 /* save all temporary registers */
889 vtop -= 2;
890 save_regs(0);
891 /* do a fast function call */
892 sym = external_global_sym(TOK___bound_ptr_add, func_old_type, 0);
893 greloc(cur_text_section, sym,
894 ind + 1, R_386_PC32);
895 oad(0xe8, -4);
896 /* returned pointer is in eax */
897 vtop++;
898 vtop->r = REG_EAX | VT_BOUNDED;
899 /* address of bounding function call point */
900 vtop->c.ul = (cur_text_section->reloc->data_offset - sizeof(Elf32_Rel));
903 /* patch pointer addition in vtop so that pointer dereferencing is
904 also tested */
905 void gen_bounded_ptr_deref(void)
907 int func;
908 int size, align;
909 Elf32_Rel *rel;
910 Sym *sym;
912 size = 0;
913 /* XXX: put that code in generic part of tcc */
914 if (!is_float(vtop->t)) {
915 if (vtop->r & VT_LVAL_BYTE)
916 size = 1;
917 else if (vtop->r & VT_LVAL_SHORT)
918 size = 2;
920 if (!size)
921 size = type_size(vtop->t, &align);
922 switch(size) {
923 case 1: func = TOK___bound_ptr_indir1; break;
924 case 2: func = TOK___bound_ptr_indir2; break;
925 case 4: func = TOK___bound_ptr_indir4; break;
926 case 8: func = TOK___bound_ptr_indir8; break;
927 case 12: func = TOK___bound_ptr_indir12; break;
928 case 16: func = TOK___bound_ptr_indir16; break;
929 default:
930 error("unhandled size when derefencing bounded pointer");
931 func = 0;
932 break;
935 /* patch relocation */
936 /* XXX: find a better solution ? */
937 rel = (Elf32_Rel *)(cur_text_section->reloc->data + vtop->c.ul);
938 sym = external_global_sym(func, func_old_type, 0);
939 if (!sym->c)
940 put_extern_sym(sym, NULL, 0, 0);
941 rel->r_info = ELF32_R_INFO(sym->c, ELF32_R_TYPE(rel->r_info));
943 #endif
945 /* end of X86 code generator */
946 /*************************************************************/