1 #ifdef TARGET_DEFS_ONLY
3 // Number of registers available to allocator:
4 #define NB_REGS 19 // x10-x17 aka a0-a7, f10-f17 aka fa0-fa7, xxx, ra, sp
8 #define TREG_R(x) (x) // x = 0..7
9 #define TREG_F(x) (x + 8) // x = 0..7
11 // Register classes sorted from more general to more precise:
12 #define RC_INT (1 << 0)
13 #define RC_FLOAT (1 << 1)
14 #define RC_R(x) (1 << (2 + (x))) // x = 0..7
15 #define RC_F(x) (1 << (10 + (x))) // x = 0..7
17 #define RC_IRET (RC_R(0)) // int return register class
18 #define RC_IRE2 (RC_R(1)) // int 2nd return register class
19 #define RC_FRET (RC_F(0)) // float return register class
21 #define REG_IRET (TREG_R(0)) // int return register number
22 #define REG_IRE2 (TREG_R(1)) // int 2nd return register number
23 #define REG_FRET (TREG_F(0)) // float return register number
27 #define LDOUBLE_SIZE 16
28 #define LDOUBLE_ALIGN 16
32 #define CHAR_IS_UNSIGNED
39 ST_DATA
const char * const target_machine_defs
=
47 "__riscv_float_abi_double\0"
55 ST_DATA
const int reg_classes
[NB_REGS
] = {
77 #if defined(CONFIG_TCC_BCHECK)
78 static addr_t func_bound_offset
;
79 static unsigned long func_bound_ind
;
80 ST_DATA
int func_bound_add_epilog
;
83 static int ireg(int r
)
89 assert(r
>= 0 && r
< 8);
90 return r
+ 10; // tccrX --> aX == x(10+X)
93 static int is_ireg(int r
)
95 return (unsigned)r
< 8 || r
== TREG_RA
|| r
== TREG_SP
;
98 static int freg(int r
)
100 assert(r
>= 8 && r
< 16);
101 return r
- 8 + 10; // tccfX --> faX == f(10+X)
104 static int is_freg(int r
)
106 return r
>= 8 && r
< 16;
109 ST_FUNC
void o(unsigned int c
)
114 if (ind1
> cur_text_section
->data_allocated
)
115 section_realloc(cur_text_section
, ind1
);
116 write32le(cur_text_section
->data
+ ind
, c
);
120 static void EIu(uint32_t opcode
, uint32_t func3
,
121 uint32_t rd
, uint32_t rs1
, uint32_t imm
)
123 o(opcode
| (func3
<< 12) | (rd
<< 7) | (rs1
<< 15) | (imm
<< 20));
126 static void ER(uint32_t opcode
, uint32_t func3
,
127 uint32_t rd
, uint32_t rs1
, uint32_t rs2
, uint32_t func7
)
129 o(opcode
| func3
<< 12 | rd
<< 7 | rs1
<< 15 | rs2
<< 20 | func7
<< 25);
132 static void EI(uint32_t opcode
, uint32_t func3
,
133 uint32_t rd
, uint32_t rs1
, uint32_t imm
)
135 assert(! ((imm
+ (1 << 11)) >> 12));
136 EIu(opcode
, func3
, rd
, rs1
, imm
);
139 static void ES(uint32_t opcode
, uint32_t func3
,
140 uint32_t rs1
, uint32_t rs2
, uint32_t imm
)
142 assert(! ((imm
+ (1 << 11)) >> 12));
143 o(opcode
| (func3
<< 12) | ((imm
& 0x1f) << 7) | (rs1
<< 15)
144 | (rs2
<< 20) | ((imm
>> 5) << 25));
147 // Patch all branches in list pointed to by t to branch to a:
148 ST_FUNC
void gsym_addr(int t_
, int a_
)
153 unsigned char *ptr
= cur_text_section
->data
+ t
;
154 uint32_t next
= read32le(ptr
);
155 uint32_t r
= a
- t
, imm
;
156 if ((r
+ (1 << 21)) & ~((1U << 22) - 2))
157 tcc_error("out-of-range branch chain");
158 imm
= (((r
>> 12) & 0xff) << 12)
159 | (((r
>> 11) & 1) << 20)
160 | (((r
>> 1) & 0x3ff) << 21)
161 | (((r
>> 20) & 1) << 31);
162 write32le(ptr
, r
== 4 ? 0x33 : 0x6f | imm
); // nop || j imm
167 static int load_symofs(int r
, SValue
*sv
, int forstore
)
170 int fc
= sv
->c
.i
, v
= sv
->r
& VT_VALMASK
;
171 if (sv
->r
& VT_SYM
) {
173 assert(v
== VT_CONST
);
174 if (sv
->sym
->type
.t
& VT_STATIC
) { // XXX do this per linker relax
175 greloca(cur_text_section
, sv
->sym
, ind
,
176 R_RISCV_PCREL_HI20
, sv
->c
.i
);
179 if (((unsigned)fc
+ (1 << 11)) >> 12)
180 tcc_error("unimp: large addend for global address (0x%lx)", (long)sv
->c
.i
);
181 greloca(cur_text_section
, sv
->sym
, ind
,
182 R_RISCV_GOT_HI20
, 0);
185 label
.type
.t
= VT_VOID
| VT_STATIC
;
187 put_extern_sym(&label
, cur_text_section
, ind
, 0);
188 rr
= is_ireg(r
) ? ireg(r
) : 5;
189 o(0x17 | (rr
<< 7)); // auipc RR, 0 %pcrel_hi(sym)+addend
190 greloca(cur_text_section
, &label
, ind
,
192 ? R_RISCV_PCREL_LO12_I
: R_RISCV_PCREL_LO12_S
, 0);
194 EI(0x03, 3, rr
, rr
, 0); // ld RR, 0(RR)
196 } else if (v
== VT_LOCAL
|| v
== VT_LLOCAL
) {
199 tcc_error("unimp: store(giant local off) (0x%lx)", (long)sv
->c
.i
);
200 if (((unsigned)fc
+ (1 << 11)) >> 12) {
201 rr
= is_ireg(r
) ? ireg(r
) : 5; // t0
202 o(0x37 | (rr
<< 7) | ((0x800 + fc
) & 0xfffff000)); //lui RR, upper(fc)
203 ER(0x33, 0, rr
, rr
, 8, 0); // add RR, RR, s0
204 sv
->c
.i
= fc
<< 20 >> 20;
211 static void load_large_constant(int rr
, int fc
, uint32_t pi
)
215 o(0x37 | (rr
<< 7) | (((pi
+ 0x800) & 0xfffff000))); // lui RR, up(up(fc))
216 EI(0x13, 0, rr
, rr
, (int)pi
<< 20 >> 20); // addi RR, RR, lo(up(fc))
217 EI(0x13, 1, rr
, rr
, 12); // slli RR, RR, 12
218 EI(0x13, 0, rr
, rr
, (fc
+ (1 << 19)) >> 20); // addi RR, RR, up(lo(fc))
219 EI(0x13, 1, rr
, rr
, 12); // slli RR, RR, 12
221 EI(0x13, 0, rr
, rr
, fc
>> 8); // addi RR, RR, lo1(lo(fc))
222 EI(0x13, 1, rr
, rr
, 8); // slli RR, RR, 8
225 ST_FUNC
void load(int r
, SValue
*sv
)
228 int v
= fr
& VT_VALMASK
;
229 int rr
= is_ireg(r
) ? ireg(r
) : freg(r
);
231 int bt
= sv
->type
.t
& VT_BTYPE
;
234 int func3
, opcode
= is_freg(r
) ? 0x07 : 0x03, br
;
235 size
= type_size(&sv
->type
, &align
);
236 assert (!is_freg(r
) || bt
== VT_FLOAT
|| bt
== VT_DOUBLE
);
237 if (bt
== VT_FUNC
) /* XXX should be done in generic code */
239 func3
= size
== 1 ? 0 : size
== 2 ? 1 : size
== 4 ? 2 : 3;
240 if (size
< 4 && !is_float(sv
->type
.t
) && (sv
->type
.t
& VT_UNSIGNED
))
242 if (v
== VT_LOCAL
|| (fr
& VT_SYM
)) {
243 br
= load_symofs(r
, sv
, 0);
245 } else if (v
< VT_CONST
) {
247 /*if (((unsigned)fc + (1 << 11)) >> 12)
248 tcc_error("unimp: load(large addend) (0x%x)", fc);*/
249 fc
= 0; // XXX store ofs in LVAL(reg)
250 } else if (v
== VT_LLOCAL
) {
251 br
= load_symofs(r
, sv
, 0);
253 EI(0x03, 3, rr
, br
, fc
); // ld RR, fc(BR)
256 } else if (v
== VT_CONST
) {
257 int64_t si
= sv
->c
.i
;
260 load_large_constant(rr
, fc
, si
);
263 o(0x37 | (rr
<< 7) | ((0x800 + fc
) & 0xfffff000)); //lui RR, upper(fc)
268 tcc_error("unimp: load(non-local lval)");
270 EI(opcode
, func3
, rr
, br
, fc
); // l[bhwd][u] / fl[wd] RR, fc(BR)
271 } else if (v
== VT_CONST
) {
272 int rb
= 0, do32bit
= 8, zext
= 0;
273 assert((!is_float(sv
->type
.t
) && is_ireg(r
)) || bt
== VT_LDOUBLE
);
275 rb
= load_symofs(r
, sv
, 0);
279 if (is_float(sv
->type
.t
) && bt
!= VT_LDOUBLE
)
280 tcc_error("unimp: load(float)");
282 int64_t si
= sv
->c
.i
;
285 load_large_constant(rr
, fc
, si
);
289 } else if (bt
== VT_LLONG
) {
290 /* A 32bit unsigned constant for a 64bit type.
291 lui always sign extends, so we need to do an explicit zext.*/
295 if (((unsigned)fc
+ (1 << 11)) >> 12)
296 o(0x37 | (rr
<< 7) | ((0x800 + fc
) & 0xfffff000)), rb
= rr
; //lui RR, upper(fc)
297 if (fc
|| (rr
!= rb
) || do32bit
|| (fr
& VT_SYM
))
298 EI(0x13 | do32bit
, 0, rr
, rb
, fc
<< 20 >> 20); // addi[w] R, x0|R, FC
300 EI(0x13, 1, rr
, rr
, 32); // slli RR, RR, 32
301 EI(0x13, 5, rr
, rr
, 32); // srli RR, RR, 32
303 } else if (v
== VT_LOCAL
) {
304 int br
= load_symofs(r
, sv
, 0);
307 EI(0x13, 0, rr
, br
, fc
); // addi R, s0, FC
308 } else if (v
< VT_CONST
) { /* reg-reg */
309 //assert(!fc); XXX support offseted regs
310 if (is_freg(r
) && is_freg(v
))
311 ER(0x53, 0, rr
, freg(v
), freg(v
), bt
== VT_DOUBLE
? 0x11 : 0x10); //fsgnj.[sd] RR, V, V == fmv.[sd] RR, V
312 else if (is_ireg(r
) && is_ireg(v
))
313 EI(0x13, 0, rr
, ireg(v
), 0); // addi RR, V, 0 == mv RR, V
315 int func7
= is_ireg(r
) ? 0x70 : 0x78;
316 size
= type_size(&sv
->type
, &align
);
319 assert(size
== 4 || size
== 8);
320 o(0x53 | (rr
<< 7) | ((is_freg(v
) ? freg(v
) : ireg(v
)) << 15)
321 | (func7
<< 25)); // fmv.{w.x, x.w, d.x, x.d} RR, VR
323 } else if (v
== VT_CMP
) {
324 int op
= vtop
->cmp_op
;
325 int a
= vtop
->cmp_r
& 0xff;
326 int b
= (vtop
->cmp_r
>> 8) & 0xff;
337 if (op
& 1) { // remove [U]GE,GT
341 if ((op
& 7) == 6) { // [U]LE
342 int t
= a
; a
= b
; b
= t
;
345 ER(0x33, (op
> TOK_UGT
) ? 2 : 3, rr
, a
, b
, 0); // slt[u] d, a, b
347 EI(0x13, 4, rr
, rr
, 1); // xori d, d, 1
352 ER(0x33, 0, rr
, a
, b
, 0x20); // sub d, a, b
354 ER(0x33, 3, rr
, 0, rr
, 0); // sltu d, x0, d == snez d,d
356 EI(0x13, 3, rr
, rr
, 1); // sltiu d, d, 1 == seqz d,d
359 } else if ((v
& ~1) == VT_JMP
) {
362 EI(0x13, 0, rr
, 0, t
); // addi RR, x0, t
365 EI(0x13, 0, rr
, 0, t
^ 1); // addi RR, x0, !t
367 tcc_error("unimp: load(non-const)");
370 ST_FUNC
void store(int r
, SValue
*sv
)
372 int fr
= sv
->r
& VT_VALMASK
;
373 int rr
= is_ireg(r
) ? ireg(r
) : freg(r
), ptrreg
;
375 int bt
= sv
->type
.t
& VT_BTYPE
;
376 int align
, size
= type_size(&sv
->type
, &align
);
377 assert(!is_float(bt
) || is_freg(r
) || bt
== VT_LDOUBLE
);
378 /* long doubles are in two integer registers, but the load/store
379 primitives only deal with one, so do as if it's one reg. */
380 if (bt
== VT_LDOUBLE
)
383 tcc_error("unimp: store(struct)");
385 tcc_error("unimp: large sized store");
386 assert(sv
->r
& VT_LVAL
);
387 if (fr
== VT_LOCAL
|| (sv
->r
& VT_SYM
)) {
388 ptrreg
= load_symofs(-1, sv
, 1);
390 } else if (fr
< VT_CONST
) {
392 /*if (((unsigned)fc + (1 << 11)) >> 12)
393 tcc_error("unimp: store(large addend) (0x%x)", fc);*/
394 fc
= 0; // XXX support offsets regs
395 } else if (fr
== VT_CONST
) {
396 int64_t si
= sv
->c
.i
;
400 load_large_constant(ptrreg
, fc
, si
);
403 o(0x37 | (ptrreg
<< 7) | ((0x800 + fc
) & 0xfffff000)); //lui RR, upper(fc)
407 tcc_error("implement me: %s(!local)", __FUNCTION__
);
408 ES(is_freg(r
) ? 0x27 : 0x23, // fs... | s...
409 size
== 1 ? 0 : size
== 2 ? 1 : size
== 4 ? 2 : 3, // ... [wd] | [bhwd]
410 ptrreg
, rr
, fc
); // RR, fc(base)
413 static void gcall_or_jmp(int docall
)
415 int tr
= docall
? 1 : 5; // ra or t0
416 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
&&
417 ((vtop
->r
& VT_SYM
) && vtop
->c
.i
== (int)vtop
->c
.i
)) {
418 /* constant symbolic case -> simple relocation */
419 greloca(cur_text_section
, vtop
->sym
, ind
,
420 R_RISCV_CALL_PLT
, (int)vtop
->c
.i
);
421 o(0x17 | (tr
<< 7)); // auipc TR, 0 %call(func)
422 EI(0x67, 0, tr
, tr
, 0);// jalr TR, r(TR)
423 } else if (vtop
->r
< VT_CONST
) {
424 int r
= ireg(vtop
->r
);
425 EI(0x67, 0, tr
, r
, 0); // jalr TR, 0(R)
430 EI(0x67, 0, tr
, r
, 0); // jalr TR, 0(R)
434 #if defined(CONFIG_TCC_BCHECK)
436 static void gen_bounds_call(int v
)
438 Sym
*sym
= external_helper_sym(v
);
440 greloca(cur_text_section
, sym
, ind
, R_RISCV_CALL_PLT
, 0);
441 o(0x17 | (1 << 7)); // auipc TR, 0 %call(func)
442 EI(0x67, 0, 1, 1, 0); // jalr TR, r(TR)
445 static void gen_bounds_prolog(void)
447 /* leave some room for bound checking code */
448 func_bound_offset
= lbounds_section
->data_offset
;
449 func_bound_ind
= ind
;
450 func_bound_add_epilog
= 0;
451 o(0x00000013); /* ld a0,#lbound section pointer */
453 o(0x00000013); /* nop -> call __bound_local_new */
457 static void gen_bounds_epilog(void)
464 int offset_modified
= func_bound_offset
!= lbounds_section
->data_offset
;
466 if (!offset_modified
&& !func_bound_add_epilog
)
469 /* add end of table info */
470 bounds_ptr
= section_ptr_add(lbounds_section
, sizeof(addr_t
));
473 sym_data
= get_sym_ref(&char_pointer_type
, lbounds_section
,
474 func_bound_offset
, PTR_SIZE
);
476 label
.type
.t
= VT_VOID
| VT_STATIC
;
477 /* generate bound local allocation */
478 if (offset_modified
) {
480 ind
= func_bound_ind
;
481 put_extern_sym(&label
, cur_text_section
, ind
, 0);
482 greloca(cur_text_section
, sym_data
, ind
, R_RISCV_GOT_HI20
, 0);
483 o(0x17 | (10 << 7)); // auipc a0, 0 %pcrel_hi(sym)+addend
484 greloca(cur_text_section
, &label
, ind
, R_RISCV_PCREL_LO12_I
, 0);
485 EI(0x03, 3, 10, 10, 0); // ld a0, 0(a0)
486 gen_bounds_call(TOK___bound_local_new
);
488 label
.c
= 0; /* force new local ELF symbol */
491 /* generate bound check local freeing */
492 o(0xe02a1101); /* addi sp,sp,-32 sd a0,0(sp) */
493 o(0xa82ae42e); /* sd a1,8(sp) fsd fa0,16(sp) */
494 put_extern_sym(&label
, cur_text_section
, ind
, 0);
495 greloca(cur_text_section
, sym_data
, ind
, R_RISCV_GOT_HI20
, 0);
496 o(0x17 | (10 << 7)); // auipc a0, 0 %pcrel_hi(sym)+addend
497 greloca(cur_text_section
, &label
, ind
, R_RISCV_PCREL_LO12_I
, 0);
498 EI(0x03, 3, 10, 10, 0); // ld a0, 0(a0)
499 gen_bounds_call(TOK___bound_local_delete
);
500 o(0x65a26502); /* ld a0,0(sp) ld a1,8(sp) */
501 o(0x61052542); /* fld fa0,16(sp) addi sp,sp,32 */
505 static void reg_pass_rec(CType
*type
, int *rc
, int *fieldofs
, int ofs
)
507 if ((type
->t
& VT_BTYPE
) == VT_STRUCT
) {
509 if (type
->ref
->type
.t
== VT_UNION
)
511 else for (f
= type
->ref
->next
; f
; f
= f
->next
)
512 reg_pass_rec(&f
->type
, rc
, fieldofs
, ofs
+ f
->c
);
513 } else if (type
->t
& VT_ARRAY
) {
514 if (type
->ref
->c
< 0 || type
->ref
->c
> 2)
517 int a
, sz
= type_size(&type
->ref
->type
, &a
);
518 reg_pass_rec(&type
->ref
->type
, rc
, fieldofs
, ofs
);
519 if (rc
[0] > 2 || (rc
[0] == 2 && type
->ref
->c
> 1))
521 else if (type
->ref
->c
== 2 && rc
[0] && rc
[1] == RC_FLOAT
) {
522 rc
[++rc
[0]] = RC_FLOAT
;
523 fieldofs
[rc
[0]] = ((ofs
+ sz
) << 4)
524 | (type
->ref
->type
.t
& VT_BTYPE
);
525 } else if (type
->ref
->c
== 2)
528 } else if (rc
[0] == 2 || rc
[0] < 0 || (type
->t
& VT_BTYPE
) == VT_LDOUBLE
)
530 else if (!rc
[0] || rc
[1] == RC_FLOAT
|| is_float(type
->t
)) {
531 rc
[++rc
[0]] = is_float(type
->t
) ? RC_FLOAT
: RC_INT
;
532 fieldofs
[rc
[0]] = (ofs
<< 4) | ((type
->t
& VT_BTYPE
) == VT_PTR
? VT_LLONG
: type
->t
& VT_BTYPE
);
537 static void reg_pass(CType
*type
, int *prc
, int *fieldofs
, int named
)
540 reg_pass_rec(type
, prc
, fieldofs
, 0);
541 if (prc
[0] <= 0 || !named
) {
542 int align
, size
= type_size(type
, &align
);
543 prc
[0] = (size
+ 7) >> 3;
544 prc
[1] = prc
[2] = RC_INT
;
545 fieldofs
[1] = (0 << 4) | (size
<= 1 ? VT_BYTE
: size
<= 2 ? VT_SHORT
: size
<= 4 ? VT_INT
: VT_LLONG
);
546 fieldofs
[2] = (8 << 4) | (size
<= 9 ? VT_BYTE
: size
<= 10 ? VT_SHORT
: size
<= 12 ? VT_INT
: VT_LLONG
);
550 ST_FUNC
void gfunc_call(int nb_args
)
552 int i
, align
, size
, areg
[2];
553 int *info
= tcc_malloc((nb_args
+ 1) * sizeof (int));
554 int stack_adj
= 0, tempspace
= 0, stack_add
, ofs
, splitofs
= 0;
558 #ifdef CONFIG_TCC_BCHECK
559 int bc_save
= tcc_state
->do_bounds_check
;
560 if (tcc_state
->do_bounds_check
)
561 gbound_args(nb_args
);
564 areg
[0] = 0; /* int arg regs */
565 areg
[1] = 8; /* float arg regs */
566 sa
= vtop
[-nb_args
].type
.ref
->next
;
567 for (i
= 0; i
< nb_args
; i
++) {
568 int nregs
, byref
= 0, tempofs
;
569 int prc
[3], fieldofs
[3];
570 sv
= &vtop
[1 + i
- nb_args
];
571 sv
->type
.t
&= ~VT_ARRAY
; // XXX this should be done in tccgen.c
572 size
= type_size(&sv
->type
, &align
);
576 tempspace
= (tempspace
+ align
- 1) & -align
;
580 byref
= 64 | (tempofs
<< 7);
582 reg_pass(&sv
->type
, prc
, fieldofs
, sa
!= 0);
583 if (!sa
&& align
== 2*XLEN
&& size
<= 2*XLEN
)
584 areg
[0] = (areg
[0] + 1) & ~1;
588 else if ((prc
[1] == RC_INT
&& areg
[0] >= 8)
589 || (prc
[1] == RC_FLOAT
&& areg
[1] >= 16)
590 || (nregs
== 2 && prc
[1] == RC_FLOAT
&& prc
[2] == RC_FLOAT
592 || (nregs
== 2 && prc
[1] != prc
[2]
593 && (areg
[1] >= 16 || areg
[0] >= 8))) {
597 stack_adj
+= (size
+ align
- 1) & -align
;
598 if (!sa
) /* one vararg on stack forces the rest on stack */
599 areg
[0] = 8, areg
[1] = 16;
601 info
[i
] = areg
[prc
[1] - 1]++;
603 info
[i
] |= (fieldofs
[1] & VT_BTYPE
) << 12;
604 assert(!(fieldofs
[1] >> 4));
606 if (prc
[2] == RC_FLOAT
|| areg
[0] < 8)
607 info
[i
] |= (1 + areg
[prc
[2] - 1]++) << 7;
613 assert((fieldofs
[2] >> 4) < 2048);
614 info
[i
] |= fieldofs
[2] << (12 + 4); // includes offset
622 stack_adj
= (stack_adj
+ 15) & -16;
623 tempspace
= (tempspace
+ 15) & -16;
624 stack_add
= stack_adj
+ tempspace
;
626 /* fetch cpu flag before generating any code */
627 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
)
631 if (stack_add
>= 0x1000) {
632 o(0x37 | (5 << 7) | (-stack_add
& 0xfffff000)); //lui t0, upper(v)
633 EI(0x13, 0, 5, 5, -stack_add
<< 20 >> 20); // addi t0, t0, lo(v)
634 ER(0x33, 0, 2, 2, 5, 0); // add sp, sp, t0
637 EI(0x13, 0, 2, 2, -stack_add
); // addi sp, sp, -adj
638 for (i
= ofs
= 0; i
< nb_args
; i
++) {
639 if (info
[i
] & (64 | 32)) {
641 size
= type_size(&vtop
->type
, &align
);
643 vset(&char_pointer_type
, TREG_SP
, 0);
644 vpushi(stack_adj
+ (info
[i
] >> 7));
646 vpushv(vtop
); // this replaces the old argument
649 vtop
->type
= vtop
[-1].type
;
658 /* Once we support offseted regs we can do this:
659 vset(&vtop->type, TREG_SP | VT_LVAL, ofs);
660 to construct the lvalue for the outgoing stack slot,
661 until then we have to jump through hoops. */
662 vset(&char_pointer_type
, TREG_SP
, 0);
663 ofs
= (ofs
+ align
- 1) & -align
;
667 vtop
->type
= vtop
[-1].type
;
670 vtop
->r
= vtop
->r2
= VT_CONST
; // this arg is done
674 } else if (info
[i
] & 16) {
681 for (i
= 0; i
< nb_args
; i
++) {
682 int ii
= info
[nb_args
- 1 - i
], r
= ii
, r2
= r
;
687 r2
= r2
& 64 ? 0 : (r2
>> 7) & 31;
690 origtype
= vtop
->type
;
691 size
= type_size(&vtop
->type
, &align
);
694 loadt
= vtop
->type
.t
& VT_BTYPE
;
695 if (loadt
== VT_STRUCT
) {
696 loadt
= (ii
>> 12) & VT_BTYPE
;
698 if (info
[nb_args
- 1 - i
] & 16) {
702 if (loadt
== VT_LDOUBLE
) {
709 vtop
->type
.t
= loadt
| (vtop
->type
.t
& VT_UNSIGNED
);
710 gv(r
< 8 ? RC_R(r
) : RC_F(r
- 8));
711 vtop
->type
= origtype
;
713 if (r2
&& loadt
!= VT_LDOUBLE
) {
715 assert(r2
< 16 || r2
== TREG_RA
);
718 vtop
->type
= char_pointer_type
;
720 #ifdef CONFIG_TCC_BCHECK
721 if ((origtype
.t
& VT_BTYPE
) == VT_STRUCT
)
722 tcc_state
->do_bounds_check
= 0;
725 #ifdef CONFIG_TCC_BCHECK
726 tcc_state
->do_bounds_check
= bc_save
;
729 vtop
->type
= origtype
;
730 loadt
= vtop
->type
.t
& VT_BTYPE
;
731 if (loadt
== VT_STRUCT
) {
732 loadt
= (ii
>> 16) & VT_BTYPE
;
734 save_reg_upstack(r2
, 1);
735 vtop
->type
.t
= loadt
| (vtop
->type
.t
& VT_UNSIGNED
);
737 assert(r2
< VT_CONST
);
741 if (info
[nb_args
- 1 - i
] & 16) {
742 ES(0x23, 3, 2, ireg(vtop
->r2
), splitofs
); // sd t0, ofs(sp)
744 } else if (loadt
== VT_LDOUBLE
&& vtop
->r2
!= r2
) {
745 assert(vtop
->r2
<= 7 && r2
<= 7);
746 /* XXX we'd like to have 'gv' move directly into
747 the right class instead of us fixing it up. */
748 EI(0x13, 0, ireg(r2
), ireg(vtop
->r2
), 0); // mv Ra+1, RR2
756 save_regs(nb_args
+ 1);
760 if (stack_add
>= 0x1000) {
761 o(0x37 | (5 << 7) | (stack_add
& 0xfffff000)); //lui t0, upper(v)
762 EI(0x13, 0, 5, 5, stack_add
<< 20 >> 20); // addi t0, t0, lo(v)
763 ER(0x33, 0, 2, 2, 5, 0); // add sp, sp, t0
766 EI(0x13, 0, 2, 2, stack_add
); // addi sp, sp, adj
771 static int func_sub_sp_offset
, num_va_regs
, func_va_list_ofs
;
773 ST_FUNC
void gfunc_prolog(Sym
*func_sym
)
775 CType
*func_type
= &func_sym
->type
;
776 int i
, addr
, align
, size
;
782 sym
= func_type
->ref
;
783 loc
= -16; // for ra and s0
784 func_sub_sp_offset
= ind
;
787 areg
[0] = 0, areg
[1] = 0;
789 /* if the function returns by reference, then add an
790 implicit pointer parameter */
791 size
= type_size(&func_vt
, &align
);
792 if (size
> 2 * XLEN
) {
795 ES(0x23, 3, 8, 10 + areg
[0]++, loc
); // sd a0, loc(s0)
797 /* define parameters */
798 while ((sym
= sym
->next
) != NULL
) {
801 int prc
[3], fieldofs
[3];
803 size
= type_size(type
, &align
);
804 if (size
> 2 * XLEN
) {
805 type
= &char_pointer_type
;
806 size
= align
= byref
= 8;
808 reg_pass(type
, prc
, fieldofs
, 1);
810 if (areg
[prc
[1] - 1] >= 8
812 && ((prc
[1] == RC_FLOAT
&& prc
[2] == RC_FLOAT
&& areg
[1] >= 7)
813 || (prc
[1] != prc
[2] && (areg
[1] >= 8 || areg
[0] >= 8))))) {
816 addr
= (addr
+ align
- 1) & -align
;
820 loc
-= regcount
* 8; // XXX could reserve only 'size' bytes
822 for (i
= 0; i
< regcount
; i
++) {
823 if (areg
[prc
[1+i
] - 1] >= 8) {
824 assert(i
== 1 && regcount
== 2 && !(addr
& 7));
825 EI(0x03, 3, 5, 8, addr
); // ld t0, addr(s0)
827 ES(0x23, 3, 8, 5, loc
+ i
*8); // sd t0, loc(s0)
828 } else if (prc
[1+i
] == RC_FLOAT
) {
829 ES(0x27, (size
/ regcount
) == 4 ? 2 : 3, 8, 10 + areg
[1]++, loc
+ (fieldofs
[i
+1] >> 4)); // fs[wd] FAi, loc(s0)
831 ES(0x23, 3, 8, 10 + areg
[0]++, loc
+ i
*8); // sd aX, loc(s0) // XXX
835 sym_push(sym
->v
& ~SYM_FIELD
, &sym
->type
,
836 (byref
? VT_LLOCAL
: VT_LOCAL
) | VT_LVAL
,
839 func_va_list_ofs
= addr
;
842 for (; areg
[0] < 8; areg
[0]++) {
844 ES(0x23, 3, 8, 10 + areg
[0], -8 + num_va_regs
* 8); // sd aX, loc(s0)
847 #ifdef CONFIG_TCC_BCHECK
848 if (tcc_state
->do_bounds_check
)
853 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
,
854 int *ret_align
, int *regsize
)
856 int align
, size
= type_size(vt
, &align
), nregs
;
857 int prc
[3], fieldofs
[3];
862 reg_pass(vt
, prc
, fieldofs
, 1);
864 if (nregs
== 2 && prc
[1] != prc
[2])
865 return -1; /* generic code can't deal with this case */
866 if (prc
[1] == RC_FLOAT
) {
867 *regsize
= size
/ nregs
;
869 ret
->t
= fieldofs
[1] & VT_BTYPE
;
874 ST_FUNC
void arch_transfer_ret_regs(int aftercall
)
876 int prc
[3], fieldofs
[3];
877 reg_pass(&vtop
->type
, prc
, fieldofs
, 1);
878 assert(prc
[0] == 2 && prc
[1] != prc
[2] && !(fieldofs
[1] >> 4));
879 assert(vtop
->r
== (VT_LOCAL
| VT_LVAL
));
881 vtop
->type
.t
= fieldofs
[1] & VT_BTYPE
;
882 (aftercall
? store
: load
)(prc
[1] == RC_INT
? REG_IRET
: REG_FRET
, vtop
);
883 vtop
->c
.i
+= fieldofs
[2] >> 4;
884 vtop
->type
.t
= fieldofs
[2] & VT_BTYPE
;
885 (aftercall
? store
: load
)(prc
[2] == RC_INT
? REG_IRET
: REG_FRET
, vtop
);
889 ST_FUNC
void gfunc_epilog(void)
891 int v
, saved_ind
, d
, large_ofs_ind
;
893 #ifdef CONFIG_TCC_BCHECK
894 if (tcc_state
->do_bounds_check
)
898 loc
= (loc
- num_va_regs
* 8);
899 d
= v
= (-loc
+ 15) & -16;
901 if (v
>= (1 << 11)) {
903 o(0x37 | (5 << 7) | ((0x800 + (v
-16)) & 0xfffff000)); //lui t0, upper(v)
904 EI(0x13, 0, 5, 5, (v
-16) << 20 >> 20); // addi t0, t0, lo(v)
905 ER(0x33, 0, 2, 2, 5, 0); // add sp, sp, t0
907 EI(0x03, 3, 1, 2, d
- 8 - num_va_regs
* 8); // ld ra, v-8(sp)
908 EI(0x03, 3, 8, 2, d
- 16 - num_va_regs
* 8); // ld s0, v-16(sp)
909 EI(0x13, 0, 2, 2, d
); // addi sp, sp, v
910 EI(0x67, 0, 0, 1, 0); // jalr x0, 0(x1), aka ret
912 if (v
>= (1 << 11)) {
913 EI(0x13, 0, 8, 2, d
- num_va_regs
* 8); // addi s0, sp, d
914 o(0x37 | (5 << 7) | ((0x800 + (v
-16)) & 0xfffff000)); //lui t0, upper(v)
915 EI(0x13, 0, 5, 5, (v
-16) << 20 >> 20); // addi t0, t0, lo(v)
916 ER(0x33, 0, 2, 2, 5, 0x20); // sub sp, sp, t0
917 gjmp_addr(func_sub_sp_offset
+ 5*4);
921 ind
= func_sub_sp_offset
;
922 EI(0x13, 0, 2, 2, -d
); // addi sp, sp, -d
923 ES(0x23, 3, 2, 1, d
- 8 - num_va_regs
* 8); // sd ra, d-8(sp)
924 ES(0x23, 3, 2, 8, d
- 16 - num_va_regs
* 8); // sd s0, d-16(sp)
926 EI(0x13, 0, 8, 2, d
- num_va_regs
* 8); // addi s0, sp, d
928 gjmp_addr(large_ofs_ind
);
929 if ((ind
- func_sub_sp_offset
) != 5*4)
930 EI(0x13, 0, 0, 0, 0); // addi x0, x0, 0 == nop
934 ST_FUNC
void gen_va_start(void)
937 vset(&char_pointer_type
, VT_LOCAL
, func_va_list_ofs
);
940 ST_FUNC
void gen_fill_nops(int bytes
)
943 tcc_error("alignment of code section not multiple of 4");
945 EI(0x13, 0, 0, 0, 0); // addi x0, x0, 0 == nop
950 // Generate forward branch to label:
951 ST_FUNC
int gjmp(int t
)
959 // Generate branch to known address:
960 ST_FUNC
void gjmp_addr(int a
)
962 uint32_t r
= a
- ind
, imm
;
963 if ((r
+ (1 << 21)) & ~((1U << 22) - 2)) {
964 o(0x17 | (5 << 7) | (((r
+ 0x800) & 0xfffff000))); // lui RR, up(r)
965 r
= (int)r
<< 20 >> 20;
966 EI(0x67, 0, 0, 5, r
); // jalr x0, r(t0)
968 imm
= (((r
>> 12) & 0xff) << 12)
969 | (((r
>> 11) & 1) << 20)
970 | (((r
>> 1) & 0x3ff) << 21)
971 | (((r
>> 20) & 1) << 31);
972 o(0x6f | imm
); // jal x0, imm == j imm
976 ST_FUNC
int gjmp_cond(int op
, int t
)
979 int a
= vtop
->cmp_r
& 0xff;
980 int b
= (vtop
->cmp_r
>> 8) & 0xff;
982 case TOK_ULT
: op
= 6; break;
983 case TOK_UGE
: op
= 7; break;
984 case TOK_ULE
: op
= 7; tmp
= a
; a
= b
; b
= tmp
; break;
985 case TOK_UGT
: op
= 6; tmp
= a
; a
= b
; b
= tmp
; break;
986 case TOK_LT
: op
= 4; break;
987 case TOK_GE
: op
= 5; break;
988 case TOK_LE
: op
= 5; tmp
= a
; a
= b
; b
= tmp
; break;
989 case TOK_GT
: op
= 4; tmp
= a
; a
= b
; b
= tmp
; break;
990 case TOK_NE
: op
= 1; break;
991 case TOK_EQ
: op
= 0; break;
993 o(0x63 | (op
^ 1) << 12 | a
<< 15 | b
<< 20 | 8 << 7); // bOP a,b,+4
997 ST_FUNC
int gjmp_append(int n
, int t
)
1000 /* insert jump list n into t */
1002 uint32_t n1
= n
, n2
;
1003 while ((n2
= read32le(p
= cur_text_section
->data
+ n1
)))
1011 static void gen_opil(int op
, int ll
)
1016 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1018 if (fc
== vtop
->c
.i
&& !(((unsigned)fc
+ (1 << 11)) >> 12)) {
1020 int m
= ll
? 31 : 63;
1023 a
= ireg(vtop
[0].r
);
1025 d
= get_reg(RC_INT
);
1030 if (fc
<= -(1 << 11))
1034 func3
= 0; // addi d, a, fc
1037 EI(0x13 | cll
, func3
, ireg(d
), a
, fc
);
1039 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1040 vset_VT_CMP(TOK_NE
);
1041 vtop
->cmp_r
= ireg(d
) | 0 << 8;
1046 if (fc
>= (1 << 11) - 1)
1049 case TOK_LT
: func3
= 2; goto do_cop
; // slti d, a, fc
1051 if (fc
>= (1 << 11) - 1 || fc
== -1)
1054 case TOK_ULT
: func3
= 3; goto do_cop
; // sltiu d, a, fc
1055 case '^': func3
= 4; goto do_cop
; // xori d, a, fc
1056 case '|': func3
= 6; goto do_cop
; // ori d, a, fc
1057 case '&': func3
= 7; goto do_cop
; // andi d, a, fc
1058 case TOK_SHL
: func3
= 1; cll
= ll
; fc
&= m
; goto do_cop
; // slli d, a, fc
1059 case TOK_SHR
: func3
= 5; cll
= ll
; fc
&= m
; goto do_cop
; // srli d, a, fc
1060 case TOK_SAR
: func3
= 5; cll
= ll
; fc
= 1024 | (fc
& m
); goto do_cop
;
1062 case TOK_UGE
: /* -> TOK_ULT */
1063 case TOK_UGT
: /* -> TOK_ULE */
1064 case TOK_GE
: /* -> TOK_LT */
1065 case TOK_GT
: /* -> TOK_LE */
1066 gen_opil(op
- 1, !ll
);
1073 gen_opil('-', !ll
), a
= ireg(vtop
++->r
);
1076 vtop
->cmp_r
= a
| 0 << 8;
1081 gv2(RC_INT
, RC_INT
);
1082 a
= ireg(vtop
[-1].r
);
1083 b
= ireg(vtop
[0].r
);
1085 d
= get_reg(RC_INT
);
1091 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1093 vtop
->cmp_r
= a
| b
<< 8;
1096 tcc_error("implement me: %s(%s)", __FUNCTION__
, get_tok_str(op
, NULL
));
1100 ER(0x33 | ll
, 0, d
, a
, b
, 0); // add d, a, b
1103 ER(0x33 | ll
, 0, d
, a
, b
, 0x20); // sub d, a, b
1106 ER(0x33 | ll
| ll
, 5, d
, a
, b
, 0x20); // sra d, a, b
1109 ER(0x33 | ll
| ll
, 5, d
, a
, b
, 0); // srl d, a, b
1112 ER(0x33 | ll
, 1, d
, a
, b
, 0); // sll d, a, b
1115 ER(0x33 | ll
, 0, d
, a
, b
, 1); // mul d, a, b
1118 ER(0x33 | ll
, 4, d
, a
, b
, 1); // div d, a, b
1121 ER(0x33, 7, d
, a
, b
, 0); // and d, a, b
1124 ER(0x33, 4, d
, a
, b
, 0); // xor d, a, b
1127 ER(0x33, 6, d
, a
, b
, 0); // or d, a, b
1130 ER(ll
? 0x3b: 0x33, 6, d
, a
, b
, 1); // rem d, a, b
1133 ER(0x33 | ll
, 7, d
, a
, b
, 1); // remu d, a, b
1137 ER(0x33 | ll
, 5, d
, a
, b
, 1); // divu d, a, b
1142 ST_FUNC
void gen_opi(int op
)
1147 ST_FUNC
void gen_opl(int op
)
1152 ST_FUNC
void gen_opf(int op
)
1154 int rs1
, rs2
, rd
, dbl
, invert
;
1155 if (vtop
[0].type
.t
== VT_LDOUBLE
) {
1156 CType type
= vtop
[0].type
;
1160 case '*': func
= TOK___multf3
; break;
1161 case '+': func
= TOK___addtf3
; break;
1162 case '-': func
= TOK___subtf3
; break;
1163 case '/': func
= TOK___divtf3
; break;
1164 case TOK_EQ
: func
= TOK___eqtf2
; cond
= 1; break;
1165 case TOK_NE
: func
= TOK___netf2
; cond
= 0; break;
1166 case TOK_LT
: func
= TOK___lttf2
; cond
= 10; break;
1167 case TOK_GE
: func
= TOK___getf2
; cond
= 11; break;
1168 case TOK_LE
: func
= TOK___letf2
; cond
= 12; break;
1169 case TOK_GT
: func
= TOK___gttf2
; cond
= 13; break;
1170 default: assert(0); break;
1172 vpush_helper_func(func
);
1177 vtop
->r2
= cond
< 0 ? TREG_R(1) : VT_CONST
;
1187 gv2(RC_FLOAT
, RC_FLOAT
);
1188 assert(vtop
->type
.t
== VT_DOUBLE
|| vtop
->type
.t
== VT_FLOAT
);
1189 dbl
= vtop
->type
.t
== VT_DOUBLE
;
1190 rs1
= freg(vtop
[-1].r
);
1191 rs2
= freg(vtop
->r
);
1200 rd
= get_reg(RC_FLOAT
);
1203 ER(0x53, 7, rd
, rs1
, rs2
, dbl
| (op
<< 2)); // fop.[sd] RD, RS1, RS2 (dyn rm)
1217 rd
= get_reg(RC_INT
);
1220 ER(0x53, op
, rd
, rs1
, rs2
, dbl
| 0x50); // fcmp.[sd] RD, RS1, RS2 (op == eq/lt/le)
1222 EI(0x13, 4, rd
, rd
, 1); // xori RD, 1
1236 rd
= rs1
, rs1
= rs2
, rs2
= rd
;
1240 rd
= rs1
, rs1
= rs2
, rs2
= rd
;
1245 ST_FUNC
void gen_cvt_sxtw(void)
1247 /* XXX on risc-v the registers are usually sign-extended already.
1248 Let's try to not do anything here. */
1251 ST_FUNC
void gen_cvt_itof(int t
)
1253 int rr
= ireg(gv(RC_INT
)), dr
;
1254 int u
= vtop
->type
.t
& VT_UNSIGNED
;
1255 int l
= (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
;
1256 if (t
== VT_LDOUBLE
) {
1258 (u
? TOK___floatunditf
: TOK___floatditf
) :
1259 (u
? TOK___floatunsitf
: TOK___floatsitf
);
1260 vpush_helper_func(func
);
1266 vtop
->r2
= TREG_R(1);
1269 dr
= get_reg(RC_FLOAT
);
1273 EIu(0x53, 7, dr
, rr
, ((0x68 | (t
== VT_DOUBLE
? 1 : 0)) << 5) | (u
? 1 : 0) | (l
? 2 : 0)); // fcvt.[sd].[wl][u]
1277 ST_FUNC
void gen_cvt_ftoi(int t
)
1279 int ft
= vtop
->type
.t
& VT_BTYPE
;
1280 int l
= (t
& VT_BTYPE
) == VT_LLONG
;
1281 int u
= t
& VT_UNSIGNED
;
1282 if (ft
== VT_LDOUBLE
) {
1284 (u
? TOK___fixunstfdi
: TOK___fixtfdi
) :
1285 (u
? TOK___fixunstfsi
: TOK___fixtfsi
);
1286 vpush_helper_func(func
);
1293 int rr
= freg(gv(RC_FLOAT
)), dr
;
1295 dr
= get_reg(RC_INT
);
1299 EIu(0x53, 1, dr
, rr
, ((0x60 | (ft
== VT_DOUBLE
? 1 : 0)) << 5) | (u
? 1 : 0) | (l
? 2 : 0)); // fcvt.[wl][u].[sd] rtz
1303 ST_FUNC
void gen_cvt_ftof(int dt
)
1305 int st
= vtop
->type
.t
& VT_BTYPE
, rs
, rd
;
1309 if (dt
== VT_LDOUBLE
|| st
== VT_LDOUBLE
) {
1310 int func
= (dt
== VT_LDOUBLE
) ?
1311 (st
== VT_FLOAT
? TOK___extendsftf2
: TOK___extenddftf2
) :
1312 (dt
== VT_FLOAT
? TOK___trunctfsf2
: TOK___trunctfdf2
);
1313 /* We can't use gfunc_call, as func_old_type works like vararg
1314 functions, and on riscv unnamed float args are passed like
1315 integers. But we really need them in the float argument registers
1316 for extendsftf2/extenddftf2. So, do it explicitely. */
1318 if (dt
== VT_LDOUBLE
)
1322 assert(vtop
->r2
< 7);
1323 if (vtop
->r2
!= 1 + vtop
->r
) {
1324 EI(0x13, 0, ireg(vtop
->r
) + 1, ireg(vtop
->r2
), 0); // mv Ra+1, RR2
1325 vtop
->r2
= 1 + vtop
->r
;
1328 vpush_helper_func(func
);
1333 if (dt
== VT_LDOUBLE
)
1334 vtop
->r
= REG_IRET
, vtop
->r2
= REG_IRET
+1;
1338 assert (dt
== VT_FLOAT
|| dt
== VT_DOUBLE
);
1339 assert (st
== VT_FLOAT
|| st
== VT_DOUBLE
);
1341 rd
= get_reg(RC_FLOAT
);
1342 if (dt
== VT_DOUBLE
)
1343 EI(0x53, 0, freg(rd
), freg(rs
), 0x21 << 5); // fcvt.d.s RD, RS (no rm)
1345 EI(0x53, 7, freg(rd
), freg(rs
), (0x20 << 5) | 1); // fcvt.s.d RD, RS (dyn rm)
1350 /* increment tcov counter */
1351 ST_FUNC
void gen_increment_tcov (SValue
*sv
)
1355 label
.type
.t
= VT_VOID
| VT_STATIC
;
1358 vtop
->r
= r1
= get_reg(RC_INT
);
1359 r2
= get_reg(RC_INT
);
1362 greloca(cur_text_section
, sv
->sym
, ind
, R_RISCV_PCREL_HI20
, 0);
1363 put_extern_sym(&label
, cur_text_section
, ind
, 0);
1364 o(0x17 | (r1
<< 7)); // auipc RR, 0 %pcrel_hi(sym)
1365 greloca(cur_text_section
, &label
, ind
, R_RISCV_PCREL_LO12_I
, 0);
1366 EI(0x03, 3, r2
, r1
, 0); // ld r2, x[r1]
1367 EI(0x13, 0, r2
, r2
, 1); // addi r2, r2, #1
1368 greloca(cur_text_section
, sv
->sym
, ind
, R_RISCV_PCREL_HI20
, 0);
1369 label
.c
= 0; /* force new local ELF symbol */
1370 put_extern_sym(&label
, cur_text_section
, ind
, 0);
1371 o(0x17 | (r1
<< 7)); // auipc RR, 0 %pcrel_hi(sym)
1372 greloca(cur_text_section
, &label
, ind
, R_RISCV_PCREL_LO12_S
, 0);
1373 ES(0x23, 3, r1
, r2
, 0); // sd r2, [r1]
1377 ST_FUNC
void ggoto(void)
1383 ST_FUNC
void gen_vla_sp_save(int addr
)
1385 ES(0x23, 3, 8, 2, addr
); // sd sp, fc(s0)
1388 ST_FUNC
void gen_vla_sp_restore(int addr
)
1390 EI(0x03, 3, 2, 8, addr
); // ld sp, fc(s0)
1393 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
)
1396 #if defined(CONFIG_TCC_BCHECK)
1397 if (tcc_state
->do_bounds_check
)
1400 rr
= ireg(gv(RC_INT
));
1401 #if defined(CONFIG_TCC_BCHECK)
1402 if (tcc_state
->do_bounds_check
)
1403 EI(0x13, 0, rr
, rr
, 15+1); // addi RR, RR, 15+1
1406 EI(0x13, 0, rr
, rr
, 15); // addi RR, RR, 15
1407 EI(0x13, 7, rr
, rr
, -16); // andi, RR, RR, -16
1408 ER(0x33, 0, 2, 2, rr
, 0x20); // sub sp, sp, rr
1410 #if defined(CONFIG_TCC_BCHECK)
1411 if (tcc_state
->do_bounds_check
) {
1413 vtop
->r
= TREG_R(0);
1414 o(0x00010513); /* mv a0,sp */
1416 vpush_helper_func(TOK___bound_new_region
);
1419 func_bound_add_epilog
= 1;