moved relocation handling to elf generator
[tinycc.git] / i386-gen.c
blobc4e6bc25def1f50774acad4659ab10ba2284ed2e
1 /*
2 * X86 code generator for TCC
3 *
4 * Copyright (c) 2001, 2002 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 /* number of available registers */
22 #define NB_REGS 4
24 /* a register can belong to several classes. The classes must be
25 sorted from more general to more precise (see gv2() code which does
26 assumptions on it). */
27 #define RC_INT 0x0001 /* generic integer register */
28 #define RC_FLOAT 0x0002 /* generic float register */
29 #define RC_EAX 0x0004
30 #define RC_ST0 0x0008
31 #define RC_ECX 0x0010
32 #define RC_EDX 0x0020
33 #define RC_IRET RC_EAX /* function return: integer register */
34 #define RC_LRET RC_EDX /* function return: second integer register */
35 #define RC_FRET RC_ST0 /* function return: float register */
37 /* pretty names for the registers */
38 enum {
39 REG_EAX = 0,
40 REG_ECX,
41 REG_EDX,
42 REG_ST0,
45 int reg_classes[NB_REGS] = {
46 /* eax */ RC_INT | RC_EAX,
47 /* ecx */ RC_INT | RC_ECX,
48 /* edx */ RC_INT | RC_EDX,
49 /* st0 */ RC_FLOAT | RC_ST0,
52 /* return registers for function */
53 #define REG_IRET REG_EAX /* single word int return register */
54 #define REG_LRET REG_EDX /* second word return register (for long long) */
55 #define REG_FRET REG_ST0 /* float return register */
57 /* defined if function parameters must be evaluated in reverse order */
58 #define INVERT_FUNC_PARAMS
60 /* defined if structures are passed as pointers. Otherwise structures
61 are directly pushed on stack. */
62 //#define FUNC_STRUCT_PARAM_AS_PTR
64 /* pointer size, in bytes */
65 #define PTR_SIZE 4
67 /* long double size and alignment, in bytes */
68 #define LDOUBLE_SIZE 12
69 #define LDOUBLE_ALIGN 4
71 /* relocation type for 32 bit data relocation */
72 #define R_DATA_32 R_386_32
74 /* function call context */
75 typedef struct GFuncContext {
76 int args_size;
77 int func_call; /* func call type (FUNC_STDCALL or FUNC_CDECL) */
78 } GFuncContext;
80 /******************************************************/
82 static int *func_sub_sp_ptr;
83 static unsigned char *func_bound_ptr;
84 static int func_ret_sub;
86 void g(int c)
88 *(char *)ind++ = c;
91 void o(int c)
93 while (c) {
94 g(c);
95 c = c / 256;
99 void gen_le32(int c)
101 g(c);
102 g(c >> 8);
103 g(c >> 16);
104 g(c >> 24);
107 /* output a symbol and patch all calls to it */
108 void gsym_addr(int t, int a)
110 int n;
111 while (t) {
112 n = *(int *)t; /* next value */
113 *(int *)t = a - t - 4;
114 t = n;
118 void gsym(int t)
120 gsym_addr(t, ind);
123 /* psym is used to put an instruction with a data field which is a
124 reference to a symbol. It is in fact the same as oad ! */
125 #define psym oad
127 /* instruction + 4 bytes data. Return the address of the data */
128 int oad(int c, int s)
130 o(c);
131 *(int *)ind = s;
132 s = ind;
133 ind = ind + 4;
134 return s;
137 /* output constant with relocation if 'r & VT_SYM' is true */
138 void gen_addr32(int r, int c)
140 if (!(r & VT_SYM)) {
141 gen_le32(c);
142 } else {
143 greloc(cur_text_section,
144 (Sym *)c, ind - (int)cur_text_section->data, R_386_32);
145 gen_le32(0);
149 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
150 opcode bits */
151 void gen_modrm(int op_reg, int r, int c)
153 op_reg = op_reg << 3;
154 if ((r & VT_VALMASK) == VT_CONST) {
155 /* constant memory reference */
156 o(0x05 | op_reg);
157 gen_addr32(r, c);
158 } else if ((r & VT_VALMASK) == VT_LOCAL) {
159 /* currently, we use only ebp as base */
160 if (c == (char)c) {
161 /* short reference */
162 o(0x45 | op_reg);
163 g(c);
164 } else {
165 oad(0x85 | op_reg, c);
167 } else {
168 g(0x00 | op_reg | (r & VT_VALMASK));
173 /* load 'r' from value 'sv' */
174 void load(int r, SValue *sv)
176 int v, t, ft, fc, fr;
177 SValue v1;
179 fr = sv->r;
180 ft = sv->t;
181 fc = sv->c.ul;
183 v = fr & VT_VALMASK;
184 if (fr & VT_LVAL) {
185 if (v == VT_LLOCAL) {
186 v1.t = VT_INT;
187 v1.r = VT_LOCAL | VT_LVAL;
188 v1.c.ul = fc;
189 load(r, &v1);
190 fr = r;
192 if ((ft & VT_BTYPE) == VT_FLOAT) {
193 o(0xd9); /* flds */
194 r = 0;
195 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
196 o(0xdd); /* fldl */
197 r = 0;
198 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
199 o(0xdb); /* fldt */
200 r = 5;
201 } else if ((ft & VT_TYPE) == VT_BYTE) {
202 o(0xbe0f); /* movsbl */
203 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
204 o(0xb60f); /* movzbl */
205 } else if ((ft & VT_TYPE) == VT_SHORT) {
206 o(0xbf0f); /* movswl */
207 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
208 o(0xb70f); /* movzwl */
209 } else {
210 o(0x8b); /* movl */
212 gen_modrm(r, fr, fc);
213 } else {
214 if (v == VT_CONST) {
215 o(0xb8 + r); /* mov $xx, r */
216 gen_addr32(fr, fc);
217 } else if (v == VT_LOCAL) {
218 o(0x8d); /* lea xxx(%ebp), r */
219 gen_modrm(r, VT_LOCAL, fc);
220 } else if (v == VT_CMP) {
221 oad(0xb8 + r, 0); /* mov $0, r */
222 o(0x0f); /* setxx %br */
223 o(fc);
224 o(0xc0 + r);
225 } else if (v == VT_JMP || v == VT_JMPI) {
226 t = v & 1;
227 oad(0xb8 + r, t); /* mov $1, r */
228 oad(0xe9, 5); /* jmp after */
229 gsym(fc);
230 oad(0xb8 + r, t ^ 1); /* mov $0, r */
231 } else if (v != r) {
232 o(0x89);
233 o(0xc0 + r + v * 8); /* mov v, r */
238 /* store register 'r' in lvalue 'v' */
239 void store(int r, SValue *v)
241 int fr, bt, ft, fc;
243 ft = v->t;
244 fc = v->c.ul;
245 fr = v->r & VT_VALMASK;
246 bt = ft & VT_BTYPE;
247 /* XXX: incorrect if float reg to reg */
248 if (bt == VT_FLOAT) {
249 o(0xd9); /* fsts */
250 r = 2;
251 } else if (bt == VT_DOUBLE) {
252 o(0xdd); /* fstpl */
253 r = 2;
254 } else if (bt == VT_LDOUBLE) {
255 o(0xc0d9); /* fld %st(0) */
256 o(0xdb); /* fstpt */
257 r = 7;
258 } else {
259 if (bt == VT_SHORT)
260 o(0x66);
261 if (bt == VT_BYTE)
262 o(0x88);
263 else
264 o(0x89);
266 if (fr == VT_CONST ||
267 fr == VT_LOCAL ||
268 (v->r & VT_LVAL)) {
269 gen_modrm(r, v->r, fc);
270 } else if (fr != r) {
271 o(0xc0 + fr + r * 8); /* mov r, fr */
275 /* start function call and return function call context */
276 void gfunc_start(GFuncContext *c, int func_call)
278 c->args_size = 0;
279 c->func_call = func_call;
282 /* push function parameter which is in (vtop->t, vtop->c). Stack entry
283 is then popped. */
284 void gfunc_param(GFuncContext *c)
286 int size, align, r;
288 if ((vtop->t & VT_BTYPE) == VT_STRUCT) {
289 size = type_size(vtop->t, &align);
290 /* align to stack align size */
291 size = (size + 3) & ~3;
292 /* allocate the necessary size on stack */
293 oad(0xec81, size); /* sub $xxx, %esp */
294 /* generate structure store */
295 r = get_reg(RC_INT);
296 o(0x89); /* mov %esp, r */
297 o(0xe0 + r);
298 vset(vtop->t, r | VT_LVAL, 0);
299 vswap();
300 vstore();
301 c->args_size += size;
302 } else if (is_float(vtop->t)) {
303 gv(RC_FLOAT); /* only one float register */
304 if ((vtop->t & VT_BTYPE) == VT_FLOAT)
305 size = 4;
306 else if ((vtop->t & VT_BTYPE) == VT_DOUBLE)
307 size = 8;
308 else
309 size = 12;
310 oad(0xec81, size); /* sub $xxx, %esp */
311 if (size == 12)
312 o(0x7cdb);
313 else
314 o(0x5cd9 + size - 4); /* fstp[s|l] 0(%esp) */
315 g(0x24);
316 g(0x00);
317 c->args_size += size;
318 } else {
319 /* simple type (currently always same size) */
320 /* XXX: implicit cast ? */
321 r = gv(RC_INT);
322 if ((vtop->t & VT_BTYPE) == VT_LLONG) {
323 size = 8;
324 o(0x50 + vtop->r2); /* push r */
325 } else {
326 size = 4;
328 o(0x50 + r); /* push r */
329 c->args_size += size;
331 vtop--;
334 /* generate function call with address in (vtop->t, vtop->c) and free function
335 context. Stack entry is popped */
336 void gfunc_call(GFuncContext *c)
338 int r;
339 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
340 /* constant case */
341 if (vtop->r & VT_SYM) {
342 /* relocation case */
343 greloc(cur_text_section, vtop->c.sym,
344 ind + 1 - (int)cur_text_section->data, R_386_PC32);
345 oad(0xe8, -4);
346 } else {
347 oad(0xe8, vtop->c.ul - ind - 5);
349 } else {
350 /* otherwise, indirect call */
351 r = gv(RC_INT);
352 o(0xff); /* call *r */
353 o(0xd0 + r);
355 if (c->args_size && c->func_call == FUNC_CDECL)
356 oad(0xc481, c->args_size); /* add $xxx, %esp */
357 vtop--;
360 /* generate function prolog of type 't' */
361 void gfunc_prolog(int t)
363 int addr, align, size, u, func_call;
364 Sym *sym;
366 sym = sym_find((unsigned)t >> VT_STRUCT_SHIFT);
367 func_call = sym->r;
368 addr = 8;
369 /* if the function returns a structure, then add an
370 implicit pointer parameter */
371 func_vt = sym->t;
372 if ((func_vt & VT_BTYPE) == VT_STRUCT) {
373 func_vc = addr;
374 addr += 4;
376 /* define parameters */
377 while ((sym = sym->next) != NULL) {
378 u = sym->t;
379 sym_push(sym->v & ~SYM_FIELD, u,
380 VT_LOCAL | VT_LVAL, addr);
381 size = type_size(u, &align);
382 size = (size + 3) & ~3;
383 #ifdef FUNC_STRUCT_PARAM_AS_PTR
384 /* structs are passed as pointer */
385 if ((u & VT_BTYPE) == VT_STRUCT) {
386 size = 4;
388 #endif
389 addr += size;
391 func_ret_sub = 0;
392 /* pascal type call ? */
393 if (func_call == FUNC_STDCALL)
394 func_ret_sub = addr - 8;
395 o(0xe58955); /* push %ebp, mov %esp, %ebp */
396 func_sub_sp_ptr = (int *)oad(0xec81, 0); /* sub $xxx, %esp */
397 /* leave some room for bound checking code */
398 if (do_bounds_check) {
399 oad(0xb8, 0); /* lbound section pointer */
400 oad(0xb8, 0); /* call to function */
401 func_bound_ptr = lbounds_section->data_ptr;
405 /* generate function epilog */
406 void gfunc_epilog(void)
408 #ifdef CONFIG_TCC_BCHECK
409 if (do_bounds_check && func_bound_ptr != lbounds_section->data_ptr) {
410 int saved_ind;
411 int *bounds_ptr;
412 /* add end of table info */
413 bounds_ptr = (int *)lbounds_section->data_ptr;
414 *bounds_ptr++ = 0;
415 lbounds_section->data_ptr = (unsigned char *)bounds_ptr;
416 /* generate bound local allocation */
417 saved_ind = ind;
418 ind = (int)func_sub_sp_ptr + 4;
419 oad(0xb8, (int)func_bound_ptr); /* mov %eax, xxx */
420 oad(0xe8, (int)__bound_local_new - ind - 5);
421 ind = saved_ind;
422 /* generate bound check local freeing */
423 o(0x5250); /* save returned value, if any */
424 oad(0xb8, (int)func_bound_ptr); /* mov %eax, xxx */
425 oad(0xe8, (int)__bound_local_delete - ind - 5);
426 o(0x585a); /* restore returned value, if any */
428 #endif
429 o(0xc9); /* leave */
430 if (func_ret_sub == 0) {
431 o(0xc3); /* ret */
432 } else {
433 o(0xc2); /* ret n */
434 g(func_ret_sub);
435 g(func_ret_sub >> 8);
437 /* align local size to word & save local variables */
438 *func_sub_sp_ptr = (-loc + 3) & -4;
441 /* generate a jump to a label */
442 int gjmp(int t)
444 return psym(0xe9, t);
447 /* generate a jump to a fixed address */
448 void gjmp_addr(int a)
450 oad(0xe9, a - ind - 5);
453 /* generate a test. set 'inv' to invert test. Stack entry is popped */
454 int gtst(int inv, int t)
456 int v, *p;
457 v = vtop->r & VT_VALMASK;
458 if (v == VT_CMP) {
459 /* fast case : can jump directly since flags are set */
460 g(0x0f);
461 t = psym((vtop->c.i - 16) ^ inv, t);
462 } else if (v == VT_JMP || v == VT_JMPI) {
463 /* && or || optimization */
464 if ((v & 1) == inv) {
465 /* insert vtop->c jump list in t */
466 p = &vtop->c.i;
467 while (*p != 0)
468 p = (int *)*p;
469 *p = t;
470 t = vtop->c.i;
471 } else {
472 t = gjmp(t);
473 gsym(vtop->c.i);
475 } else {
476 if (is_float(vtop->t)) {
477 vpushi(0);
478 gen_op(TOK_NE);
480 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
481 /* constant jmp optimization */
482 if ((vtop->c.i != 0) != inv)
483 t = gjmp(t);
484 } else {
485 v = gv(RC_INT);
486 o(0x85);
487 o(0xc0 + v * 9);
488 g(0x0f);
489 t = psym(0x85 ^ inv, t);
492 vtop--;
493 return t;
496 /* generate an integer binary operation */
497 void gen_opi(int op)
499 int r, fr, opc, c;
501 switch(op) {
502 case '+':
503 case TOK_ADDC1: /* add with carry generation */
504 opc = 0;
505 gen_op8:
506 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
507 /* constant case */
508 vswap();
509 r = gv(RC_INT);
510 vswap();
511 c = vtop->c.i;
512 if (c == (char)c) {
513 /* XXX: generate inc and dec for smaller code ? */
514 o(0x83);
515 o(0xc0 | (opc << 3) | r);
516 g(c);
517 } else {
518 o(0x81);
519 oad(0xc0 | (opc << 3) | r, c);
521 } else {
522 gv2(RC_INT, RC_INT);
523 r = vtop[-1].r;
524 fr = vtop[0].r;
525 o((opc << 3) | 0x01);
526 o(0xc0 + r + fr * 8);
528 vtop--;
529 if (op >= TOK_ULT && op <= TOK_GT) {
530 vtop--;
531 vset(VT_INT, VT_CMP, op);
533 break;
534 case '-':
535 case TOK_SUBC1: /* sub with carry generation */
536 opc = 5;
537 goto gen_op8;
538 case TOK_ADDC2: /* add with carry use */
539 opc = 2;
540 goto gen_op8;
541 case TOK_SUBC2: /* sub with carry use */
542 opc = 3;
543 goto gen_op8;
544 case '&':
545 opc = 4;
546 goto gen_op8;
547 case '^':
548 opc = 6;
549 goto gen_op8;
550 case '|':
551 opc = 1;
552 goto gen_op8;
553 case '*':
554 gv2(RC_INT, RC_INT);
555 r = vtop[-1].r;
556 fr = vtop[0].r;
557 vtop--;
558 o(0xaf0f); /* imul fr, r */
559 o(0xc0 + fr + r * 8);
560 break;
561 case TOK_SHL:
562 opc = 4;
563 goto gen_shift;
564 case TOK_SHR:
565 opc = 5;
566 goto gen_shift;
567 case TOK_SAR:
568 opc = 7;
569 gen_shift:
570 opc = 0xc0 | (opc << 3);
571 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
572 /* constant case */
573 vswap();
574 r = gv(RC_INT);
575 vswap();
576 c = vtop->c.i & 0x1f;
577 o(0xc1); /* shl/shr/sar $xxx, r */
578 o(opc | r);
579 g(c);
580 } else {
581 /* we generate the shift in ecx */
582 gv2(RC_INT, RC_ECX);
583 r = vtop[-1].r;
584 o(0xd3); /* shl/shr/sar %cl, r */
585 o(opc | r);
587 vtop--;
588 break;
589 case '/':
590 case TOK_UDIV:
591 case TOK_PDIV:
592 case '%':
593 case TOK_UMOD:
594 case TOK_UMULL:
595 /* first operand must be in eax */
596 /* XXX: need better constraint for second operand */
597 gv2(RC_EAX, RC_ECX);
598 r = vtop[-1].r;
599 fr = vtop[0].r;
600 vtop--;
601 save_reg(REG_EDX);
602 if (op == TOK_UMULL) {
603 o(0xf7); /* mul fr */
604 o(0xe0 + fr);
605 vtop->r2 = REG_EDX;
606 r = REG_EAX;
607 } else {
608 if (op == TOK_UDIV || op == TOK_UMOD) {
609 o(0xf7d231); /* xor %edx, %edx, div fr, %eax */
610 o(0xf0 + fr);
611 } else {
612 o(0xf799); /* cltd, idiv fr, %eax */
613 o(0xf8 + fr);
615 if (op == '%' || op == TOK_UMOD)
616 r = REG_EDX;
617 else
618 r = REG_EAX;
620 vtop->r = r;
621 break;
622 default:
623 opc = 7;
624 goto gen_op8;
628 /* generate a floating point operation 'v = t1 op t2' instruction. The
629 two operands are guaranted to have the same floating point type */
630 /* XXX: need to use ST1 too */
631 void gen_opf(int op)
633 int a, ft, fc, swapped, r;
635 /* convert constants to memory references */
636 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
637 vswap();
638 gv(RC_FLOAT);
639 vswap();
641 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
642 gv(RC_FLOAT);
644 /* must put at least one value in the floating point register */
645 if ((vtop[-1].r & VT_LVAL) &&
646 (vtop[0].r & VT_LVAL)) {
647 vswap();
648 gv(RC_FLOAT);
649 vswap();
651 swapped = 0;
652 /* swap the stack if needed so that t1 is the register and t2 is
653 the memory reference */
654 if (vtop[-1].r & VT_LVAL) {
655 vswap();
656 swapped = 1;
658 if (op >= TOK_ULT && op <= TOK_GT) {
659 /* load on stack second operand */
660 load(REG_ST0, vtop);
661 save_reg(REG_EAX); /* eax is used by FP comparison code */
662 if (op == TOK_GE || op == TOK_GT)
663 swapped = !swapped;
664 else if (op == TOK_EQ || op == TOK_NE)
665 swapped = 0;
666 if (swapped)
667 o(0xc9d9); /* fxch %st(1) */
668 o(0xe9da); /* fucompp */
669 o(0xe0df); /* fnstsw %ax */
670 if (op == TOK_EQ) {
671 o(0x45e480); /* and $0x45, %ah */
672 o(0x40fC80); /* cmp $0x40, %ah */
673 } else if (op == TOK_NE) {
674 o(0x45e480); /* and $0x45, %ah */
675 o(0x40f480); /* xor $0x40, %ah */
676 op = TOK_NE;
677 } else if (op == TOK_GE || op == TOK_LE) {
678 o(0x05c4f6); /* test $0x05, %ah */
679 op = TOK_EQ;
680 } else {
681 o(0x45c4f6); /* test $0x45, %ah */
682 op = TOK_EQ;
684 vtop--;
685 vtop->r = VT_CMP;
686 vtop->c.i = op;
687 } else {
688 /* no memory reference possible for long double operations */
689 if ((vtop->t & VT_BTYPE) == VT_LDOUBLE) {
690 load(REG_ST0, vtop);
691 swapped = !swapped;
694 switch(op) {
695 default:
696 case '+':
697 a = 0;
698 break;
699 case '-':
700 a = 4;
701 if (swapped)
702 a++;
703 break;
704 case '*':
705 a = 1;
706 break;
707 case '/':
708 a = 6;
709 if (swapped)
710 a++;
711 break;
713 ft = vtop->t;
714 fc = vtop->c.ul;
715 if ((ft & VT_BTYPE) == VT_LDOUBLE) {
716 o(0xde); /* fxxxp %st, %st(1) */
717 o(0xc1 + (a << 3));
718 } else {
719 /* if saved lvalue, then we must reload it */
720 r = vtop->r;
721 if ((r & VT_VALMASK) == VT_LLOCAL) {
722 SValue v1;
723 r = get_reg(RC_INT);
724 v1.t = VT_INT;
725 v1.r = VT_LOCAL | VT_LVAL;
726 v1.c.ul = fc;
727 load(r, &v1);
728 fc = 0;
731 if ((ft & VT_BTYPE) == VT_DOUBLE)
732 o(0xdc);
733 else
734 o(0xd8);
735 gen_modrm(a, r, fc);
737 vtop--;
741 /* FPU control word for rounding to nearest mode */
742 /* XXX: should move that into tcc lib support code ! */
743 static unsigned short __tcc_fpu_control = 0x137f;
744 /* FPU control word for round to zero mode for int convertion */
745 static unsigned short __tcc_int_fpu_control = 0x137f | 0x0c00;
747 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
748 and 'long long' cases. */
749 void gen_cvt_itof(int t)
751 save_reg(REG_ST0);
752 gv(RC_INT);
753 if ((vtop->t & VT_BTYPE) == VT_LLONG) {
754 /* signed long long to float/double/long double (unsigned case
755 is handled generically) */
756 o(0x50 + vtop->r2); /* push r2 */
757 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
758 o(0x242cdf); /* fildll (%esp) */
759 o(0x08c483); /* add $8, %esp */
760 } else if ((vtop->t & (VT_BTYPE | VT_UNSIGNED)) ==
761 (VT_INT | VT_UNSIGNED)) {
762 /* unsigned int to float/double/long double */
763 o(0x6a); /* push $0 */
764 g(0x00);
765 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
766 o(0x242cdf); /* fildll (%esp) */
767 o(0x08c483); /* add $8, %esp */
768 } else {
769 /* int to float/double/long double */
770 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
771 o(0x2404db); /* fildl (%esp) */
772 o(0x04c483); /* add $4, %esp */
774 vtop->r = REG_ST0;
777 /* convert fp to int 't' type */
778 /* XXX: handle long long case */
779 void gen_cvt_ftoi(int t)
781 int r, r2, size;
783 gv(RC_FLOAT);
784 if (t != VT_INT)
785 size = 8;
786 else
787 size = 4;
789 oad(0x2dd9, (int)&__tcc_int_fpu_control); /* ldcw xxx */
790 oad(0xec81, size); /* sub $xxx, %esp */
791 if (size == 4)
792 o(0x1cdb); /* fistpl */
793 else
794 o(0x3cdf); /* fistpll */
795 o(0x24);
796 oad(0x2dd9, (int)&__tcc_fpu_control); /* ldcw xxx */
797 r = get_reg(RC_INT);
798 o(0x58 + r); /* pop r */
799 if (size == 8) {
800 if (t == VT_LLONG) {
801 vtop->r = r; /* mark reg as used */
802 r2 = get_reg(RC_INT);
803 o(0x58 + r2); /* pop r2 */
804 vtop->r2 = r2;
805 } else {
806 o(0x04c483); /* add $4, %esp */
809 vtop->r = r;
812 /* convert from one floating point type to another */
813 void gen_cvt_ftof(int t)
815 /* all we have to do on i386 is to put the float in a register */
816 gv(RC_FLOAT);
819 /* bound check support functions */
820 #ifdef CONFIG_TCC_BCHECK
822 /* generate a bounded pointer addition */
823 void gen_bounded_ptr_add(void)
825 int addr;
826 /* prepare fast i386 function call (args in eax and edx) */
827 gv2(RC_EAX, RC_EDX);
828 /* save all temporary registers */
829 vtop -= 2;
830 save_regs(0);
831 /* do a fast function call */
832 addr = ind;
833 oad(0xe8, (int)__bound_ptr_add - ind - 5);
834 /* returned pointer is in eax */
835 vtop++;
836 vtop->r = REG_EAX | VT_BOUNDED;
837 vtop->c.ul = addr; /* address of bounding function call point */
840 /* patch pointer addition in vtop so that pointer dereferencing is
841 also tested */
842 void gen_bounded_ptr_deref(void)
844 void *func;
845 int size, align, addr;
847 size = 0;
848 /* XXX: put that code in generic part of tcc */
849 if (!is_float(vtop->t)) {
850 if (vtop->r & VT_LVAL_BYTE)
851 size = 1;
852 else if (vtop->r & VT_LVAL_SHORT)
853 size = 2;
855 if (!size)
856 size = type_size(vtop->t, &align);
857 switch(size) {
858 case 1: func = __bound_ptr_indir1; break;
859 case 2: func = __bound_ptr_indir2; break;
860 case 4: func = __bound_ptr_indir4; break;
861 case 8: func = __bound_ptr_indir8; break;
862 case 12: func = __bound_ptr_indir12; break;
863 case 16: func = __bound_ptr_indir16; break;
864 default:
865 error("unhandled size when derefencing bounded pointer");
866 func = NULL;
867 break;
870 addr = vtop->c.ul;
871 *(int *)(addr + 1) = (int)func - addr - 5;
873 #endif
875 /* end of X86 code generator */
876 /*************************************************************/