riscv: some long double support
[tinycc.git] / riscv64-gen.c
blobbc2f983aa67732ae225bdfdaf68ce480cfc7d767
1 #ifdef TARGET_DEFS_ONLY
3 // Number of registers available to allocator:
4 #define NB_REGS 16 // x10-x17 aka a0-a7, f10-f17 aka fa0-fa7
6 #define TREG_R(x) (x) // x = 0..7
7 #define TREG_F(x) (x + 8) // x = 0..7
9 // Register classes sorted from more general to more precise:
10 #define RC_INT (1 << 0)
11 #define RC_FLOAT (1 << 1)
12 #define RC_R(x) (1 << (2 + (x))) // x = 0..7
13 #define RC_F(x) (1 << (10 + (x))) // x = 0..7
15 #define RC_IRET (RC_R(0)) // int return register class
16 #define RC_FRET (RC_F(0)) // float return register class
18 #define REG_IRET (TREG_R(0)) // int return register number
19 #define REG_FRET (TREG_F(0)) // float return register number
21 #define PTR_SIZE 8
23 #define LDOUBLE_SIZE 16
24 #define LDOUBLE_ALIGN 16
26 #define MAX_ALIGN 16
28 #define CHAR_IS_UNSIGNED
30 #else
31 #include "tcc.h"
32 #include <assert.h>
34 #define XLEN 8
36 #define TREG_RA 17
37 #define TREG_SP 18
39 ST_DATA const int reg_classes[NB_REGS] = {
40 RC_INT | RC_R(0),
41 RC_INT | RC_R(1),
42 RC_INT | RC_R(2),
43 RC_INT | RC_R(3),
44 RC_INT | RC_R(4),
45 RC_INT | RC_R(5),
46 RC_INT | RC_R(6),
47 RC_INT | RC_R(7),
48 RC_FLOAT | RC_F(0),
49 RC_FLOAT | RC_F(1),
50 RC_FLOAT | RC_F(2),
51 RC_FLOAT | RC_F(3),
52 RC_FLOAT | RC_F(4),
53 RC_FLOAT | RC_F(5),
54 RC_FLOAT | RC_F(6),
55 RC_FLOAT | RC_F(7)
58 static int ireg(int r)
60 if (r == TREG_RA)
61 return 1; // ra
62 if (r == TREG_SP)
63 return 2; // sp
64 assert(r >= 0 && r < 8);
65 return r + 10; // tccrX --> aX == x(10+X)
68 static int is_ireg(int r)
70 return r < 8 || r == TREG_RA || r == TREG_SP;
73 static int freg(int r)
75 assert(r >= 8 && r < 16);
76 return r - 8 + 10; // tccfX --> faX == f(10+X)
79 static int is_freg(int r)
81 return r >= 8 && r < 16;
84 ST_FUNC void o(unsigned int c)
86 int ind1 = ind + 4;
87 if (nocode_wanted)
88 return;
89 if (ind1 > cur_text_section->data_allocated)
90 section_realloc(cur_text_section, ind1);
91 write32le(cur_text_section->data + ind, c);
92 ind = ind1;
95 static void EI(uint32_t opcode, uint32_t func3,
96 uint32_t rd, uint32_t rs1, uint32_t imm)
98 assert(! ((imm + (1 << 11)) >> 12));
99 o(opcode | (func3 << 12) | (rd << 7) | (rs1 << 15) | (imm << 20));
102 static void ES(uint32_t opcode, uint32_t func3,
103 uint32_t rs1, uint32_t rs2, uint32_t imm)
105 assert(! ((imm + (1 << 11)) >> 12));
106 o(opcode | (func3 << 12) | ((imm & 0x1f) << 7) | (rs1 << 15)
107 | (rs2 << 20) | ((imm >> 5) << 25));
110 // Patch all branches in list pointed to by t to branch to a:
111 ST_FUNC void gsym_addr(int t_, int a_)
113 uint32_t t = t_;
114 uint32_t a = a_;
115 while (t) {
116 unsigned char *ptr = cur_text_section->data + t;
117 uint32_t next = read32le(ptr);
118 uint32_t r = a - t, imm;
119 if ((r + (1 << 21)) & ~((1U << 22) - 2))
120 tcc_error("out-of-range branch chain");
121 imm = (((r >> 12) & 0xff) << 12)
122 | (((r >> 11) & 1) << 20)
123 | (((r >> 1) & 0x3ff) << 21)
124 | (((r >> 20) & 1) << 31);
125 write32le(ptr, r == 4 ? 0x33 : 0x6f | imm); // nop || j imm
126 t = next;
130 ST_FUNC void load(int r, SValue *sv)
132 int fr = sv->r;
133 int v = fr & VT_VALMASK;
134 int rr = is_ireg(r) ? ireg(r) : freg(r);
135 int fc = sv->c.i;
136 int bt = sv->type.t & VT_BTYPE;
137 int align, size = type_size(&sv->type, &align);
138 if (fr & VT_LVAL) {
139 int func3, opcode = 0x03;
140 if (is_freg(r)) {
141 assert(bt == VT_DOUBLE || bt == VT_FLOAT);
142 opcode = 0x07;
143 func3 = bt == VT_DOUBLE ? 3 : 2;
144 } else {
145 assert(is_ireg(r));
146 if (bt == VT_FUNC)
147 size = PTR_SIZE;
148 func3 = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3;
149 if (size < 8 && !is_float(sv->type.t) && (sv->type.t & VT_UNSIGNED))
150 func3 |= 4;
152 if (v == VT_LOCAL) {
153 if (((unsigned)fc + (1 << 11)) >> 12)
154 tcc_error("unimp: load(large local ofs) (0x%x)", fc);
155 EI(opcode, func3, rr, 8, fc); // l[bhwd][u]/fl[wd] RR, fc(s0)
156 } else if (v < VT_CONST) {
157 /*if (((unsigned)fc + (1 << 11)) >> 12)
158 tcc_error("unimp: load(large addend) (0x%x)", fc);*/
159 fc = 0; // XXX store ofs in LVAL(reg)
160 EI(opcode, func3, rr, ireg(v), fc); // l[bhwd][u] RR, 0(V)
161 } else if (v == VT_CONST && (fr & VT_SYM)) {
162 static Sym label;
163 int addend = 0, tempr;
164 if (1 || ((unsigned)fc + (1 << 11)) >> 12)
165 addend = fc, fc = 0;
167 greloca(cur_text_section, sv->sym, ind,
168 R_RISCV_PCREL_HI20, addend);
169 if (!label.v) {
170 label.v = tok_alloc(".L0 ", 4)->tok;
171 label.type.t = VT_VOID | VT_STATIC;
173 label.c = 0; /* force new local ELF symbol */
174 put_extern_sym(&label, cur_text_section, ind, 0);
175 tempr = is_ireg(r) ? rr : ireg(get_reg(RC_INT));
176 o(0x17 | (tempr << 7)); // auipc TR, 0 %pcrel_hi(sym)+addend
177 greloca(cur_text_section, &label, ind,
178 R_RISCV_PCREL_LO12_I, 0);
179 EI(opcode, func3, rr, tempr, fc); // l[bhwd][u] RR, fc(TR)
180 } else if (v == VT_LLOCAL) {
181 int tempr = rr;
182 if (((unsigned)fc + (1 << 11)) >> 12)
183 tcc_error("unimp: load(large local ofs) (0x%x)", fc);
184 if (!is_ireg(r))
185 tempr = ireg(get_reg(RC_INT));
186 EI(0x03, 3, tempr, 8, fc); // ld TEMPR, fc(s0)
187 EI(opcode, func3, rr, tempr, 0); // l[bhwd][u] RR, 0(TEMPR)
188 } else {
189 tcc_error("unimp: load(non-local lval)");
191 } else if (v == VT_CONST) {
192 int rb = 0;
193 assert(!is_float(sv->type.t) && is_ireg(r));
194 if (fr & VT_SYM) {
195 static Sym label;
196 greloca(cur_text_section, sv->sym, ind,
197 R_RISCV_PCREL_HI20, sv->c.i);
198 if (!label.v) {
199 label.v = tok_alloc(".L0 ", 4)->tok;
200 label.type.t = VT_VOID | VT_STATIC;
202 label.c = 0; /* force new local ELF symbol */
203 put_extern_sym(&label, cur_text_section, ind, 0);
204 o(0x17 | (rr << 7)); // auipc RR, 0 %call(func)
205 greloca(cur_text_section, &label, ind,
206 R_RISCV_PCREL_LO12_I, 0);
207 rb = rr;
208 fc = 0;
209 sv->c.i = 0;
211 if (is_float(sv->type.t))
212 tcc_error("unimp: load(float)");
213 if (fc != sv->c.i) {
214 int64_t si = sv->c.i;
215 uint32_t pi;
216 si >>= 32;
217 if (si != 0)
218 tcc_error("unimp: load(very large const)");
219 /* A 32bit unsigned constant. lui always sign extends, so we need
220 tricks. */
221 pi = (uint32_t)sv->c.i;
222 o(0x37 | (rr << 7) | (((pi + 0x80000) & 0xfff00000) >> 8)); // lui RR, up(fc)>>8
223 EI(0x13, 0, rr, rr, (((pi + 0x200) & 0x000ffc00) >> 8) | (-((int)(pi + 0x200) & 0x80000) >> 8)); // addi RR, RR, mid(fc)
224 EI(0x13, 1, rr, rr, 8); // slli RR, RR, 8
225 fc = (pi & 0x3ff) | (-((int)(pi & 0x200)));
226 rb = rr;
228 if (((unsigned)fc + (1 << 11)) >> 12)
229 o(0x37 | (rr << 7) | ((0x800 + fc) & 0xfffff000)), rb = rr; //lui RR, upper(fc)
230 EI(0x13, 0, rr, rb, fc << 20 >> 20); // addi R, x0|R, FC
231 } else if (v == VT_LOCAL) {
232 assert(is_ireg(r));
233 if (((unsigned)fc + (1 << 11)) >> 12)
234 tcc_error("unimp: load(addr large local ofs) (0x%x)", fc);
235 EI(0x13, 0, rr, 8, fc); // addi R, s0, FC
236 } else if (v < VT_CONST) {
237 /* reg-reg */
238 //assert(!fc); XXX support offseted regs
239 if (is_freg(r) && is_freg(v))
240 o(0x53 | (rr << 7) | (freg(v) << 15) | (freg(v) << 20) | ((bt == VT_DOUBLE ? 0x11 : 0x10) << 25)); //fsgnj.[sd] RR, V, V == fmv.[sd] RR, V
241 else if (is_ireg(r) && is_ireg(v))
242 EI(0x13, 0, rr, ireg(v), 0); // addi RR, V, 0 == mv RR, V
243 else {
244 int func7 = is_ireg(r) ? 0x70 : 0x78;
245 if (size == 8)
246 func7 |= 1;
247 assert(size == 4 || size == 8);
248 o(0x53 | (rr << 7) | ((is_freg(v) ? freg(v) : ireg(v)) << 15)
249 | (func7 << 25)); // fmv.{w.x, x.w, d.x, x.d} RR, VR
251 } else if (v == VT_CMP) { // we rely on cmp_r to be the correct result
252 EI(0x13, 0, rr, vtop->cmp_r, 0); // mv RR, CMP_R
253 } else if ((v & ~1) == VT_JMP) {
254 int t = v & 1;
255 assert(is_ireg(r));
256 EI(0x13, 0, rr, 0, t); // addi RR, x0, t
257 gjmp_addr(ind + 8);
258 gsym(fc);
259 EI(0x13, 0, rr, 0, t ^ 1); // addi RR, x0, !t
260 } else
261 tcc_error("unimp: load(non-const)");
264 ST_FUNC void store(int r, SValue *sv)
266 int fr = sv->r & VT_VALMASK;
267 int rr = is_ireg(r) ? ireg(r) : freg(r);
268 int fc = sv->c.i;
269 int ft = sv->type.t;
270 int bt = ft & VT_BTYPE;
271 int align, size = type_size(&sv->type, &align);
272 assert(!is_float(bt) || is_freg(r));
273 if (bt == VT_STRUCT)
274 tcc_error("unimp: store(struct)");
275 if (size > 8)
276 tcc_error("unimp: large sized store");
277 assert(sv->r & VT_LVAL);
278 if (fr == VT_LOCAL) {
279 if (((unsigned)fc + (1 << 11)) >> 12)
280 tcc_error("unimp: store(large local off) (0x%x)", fc);
281 if (is_freg(r))
282 ES(0x27, size == 4 ? 2 : 3, 8, rr, fc); // fs[wd] RR, fc(s0)
283 else
284 ES(0x23, size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3,
285 8, rr, fc); // s[bhwd] RR, fc(s0)
286 } else if (fr < VT_CONST) {
287 int ptrreg = ireg(fr);
288 /*if (((unsigned)fc + (1 << 11)) >> 12)
289 tcc_error("unimp: store(large addend) (0x%x)", fc);*/
290 fc = 0; // XXX support offsets regs
291 if (is_freg(r))
292 ES(0x27, size == 4 ? 2 : 3, ptrreg, rr, fc); // fs[wd] RR, fc(PTRREG)
293 else
294 ES(0x23, size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3,
295 ptrreg, rr, fc); // s[bhwd] RR, fc(PTRREG)
296 } else if (sv->r == (VT_CONST | VT_SYM | VT_LVAL)) {
297 static Sym label;
298 int tempr, addend = 0;
299 if (1 || ((unsigned)fc + (1 << 11)) >> 12)
300 addend = fc, fc = 0;
302 tempr = ireg(get_reg(RC_INT));
303 greloca(cur_text_section, sv->sym, ind,
304 R_RISCV_PCREL_HI20, addend);
305 if (!label.v) {
306 label.v = tok_alloc(".L0 ", 4)->tok;
307 label.type.t = VT_VOID | VT_STATIC;
309 label.c = 0; /* force new local ELF symbol */
310 put_extern_sym(&label, cur_text_section, ind, 0);
311 o(0x17 | (tempr << 7)); // auipc TEMPR, 0 %pcrel_hi(sym)+addend
312 greloca(cur_text_section, &label, ind,
313 R_RISCV_PCREL_LO12_S, 0);
314 if (is_freg(r))
315 ES(0x27, size == 4 ? 2 : 3, tempr, rr, fc); // fs[wd] RR, fc(TEMPR)
316 else
317 ES(0x23, size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3,
318 tempr, rr, fc); // s[bhwd] RR, fc(TEMPR)
319 } else
320 tcc_error("implement me: %s(!local)", __FUNCTION__);
323 static void gcall(void)
325 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
326 ((vtop->r & VT_SYM) && vtop->c.i == (int)vtop->c.i)) {
327 /* constant symbolic case -> simple relocation */
328 greloca(cur_text_section, vtop->sym, ind,
329 R_RISCV_CALL_PLT, (int)vtop->c.i);
330 o(0x17 | (1 << 7)); // auipc ra, 0 %call(func)
331 o(0x80e7); // jalr ra, 0 %call(func)
332 } else if ((vtop->r & VT_VALMASK) < VT_CONST) {
333 int r = ireg(vtop->r & VT_VALMASK);
334 EI(0x67, 0, 1, r, 0); // jalr ra, 0(R)
335 } else {
336 int r = TREG_RA;
337 load(r, vtop);
338 r = ireg(r);
339 EI(0x67, 0, 1, r, 0); // jalr ra, 0(R)
343 ST_FUNC void gfunc_call(int nb_args)
345 int i, align, size, aireg, afreg;
346 int info[nb_args ? nb_args : 1];
347 int stack_adj = 0, ofs;
348 int force_stack = 0;
349 SValue *sv;
350 Sym *sa;
351 aireg = afreg = 0;
352 sa = vtop[-nb_args].type.ref->next;
353 for (i = 0; i < nb_args; i++) {
354 int *pareg, nregs, infreg = 0;
355 sv = &vtop[1 + i - nb_args];
356 sv->type.t &= ~VT_ARRAY; // XXX this should be done in tccgen.c
357 size = type_size(&sv->type, &align);
358 if ((size > 8 && ((sv->type.t & VT_BTYPE) != VT_LDOUBLE))
359 || ((sv->type.t & VT_BTYPE) == VT_STRUCT))
360 tcc_error("unimp: call arg %d wrong type", nb_args - i);
361 nregs = 1;
362 if ((sv->type.t & VT_BTYPE) == VT_LDOUBLE) {
363 infreg = 0, nregs = 2;
364 if (!sa) {
365 aireg = (aireg + 1) & ~1;
367 } else
368 infreg = sa && is_float(sv->type.t);
369 pareg = infreg ? &afreg : &aireg;
370 if ((*pareg < 8) && !force_stack) {
371 info[i] = *pareg + (infreg ? 8 : 0);
372 (*pareg)++;
373 if (nregs == 1)
375 else if (*pareg < 8)
376 (*pareg)++;
377 else {
378 info[i] |= 16;
379 stack_adj += 8;
380 tcc_error("unimp: param passing half in reg, half on stack");
382 } else {
383 info[i] = 32;
384 stack_adj += (size + align - 1) & -align;
385 if (!sa)
386 force_stack = 1;
388 if (sa)
389 sa = sa->next;
391 stack_adj = (stack_adj + 15) & -16;
392 if (stack_adj) {
393 EI(0x13, 0, 2, 2, -stack_adj); // addi sp, sp, -adj
394 for (i = ofs = 0; i < nb_args; i++) {
395 if (1 && info[i] >= 32) {
396 vrotb(nb_args - i);
397 size = type_size(&vtop->type, &align);
398 /* Once we support offseted regs we can do this:
399 vset(&vtop->type, TREG_SP | VT_LVAL, ofs);
400 to construct the lvalue for the outgoing stack slot,
401 until then we have to jump through hoops. */
402 vset(&char_pointer_type, TREG_SP, 0);
403 vpushi(ofs);
404 gen_op('+');
405 indir();
406 vtop->type = vtop[-1].type;
407 vswap();
408 vstore();
409 vrott(nb_args - i);
410 ofs += (size + align - 1) & -align;
411 ofs = (ofs + 7) & -8;
415 for (i = 0; i < nb_args; i++) {
416 int r = info[nb_args - 1 - i];
417 if (r < 32) {
418 r &= 15;
419 vrotb(i+1);
420 gv(r < 8 ? RC_R(r) : RC_F(r - 8));
421 if (vtop->r2 < VT_CONST) {
422 assert((vtop->type.t & VT_BTYPE) == VT_LDOUBLE);
423 assert(vtop->r < 7);
424 if (vtop->r2 != 1 + vtop->r) {
425 /* XXX we'd like to have 'gv' move directly into
426 the right class instead of us fixing it up. */
427 EI(0x13, 0, ireg(vtop->r) + 1, ireg(vtop->r2), 0); // mv Ra+1, RR2
428 vtop->r2 = 1 + vtop->r;
431 vrott(i+1);
434 vrotb(nb_args + 1);
435 gcall();
436 vtop -= nb_args + 1;
437 if (stack_adj)
438 EI(0x13, 0, 2, 2, stack_adj); // addi sp, sp, adj
441 static int func_sub_sp_offset;
443 ST_FUNC void gfunc_prolog(CType *func_type)
445 int i, addr, align, size;
446 int param_addr = 0;
447 int aireg, afreg;
448 Sym *sym;
449 CType *type;
451 sym = func_type->ref;
452 func_vt = sym->type;
453 loc = -16; // for ra and s0
454 func_sub_sp_offset = ind;
455 ind += 4 * 4;
456 if (sym->f.func_type == FUNC_ELLIPSIS) {
457 tcc_error("unimp: vararg prologue");
460 aireg = afreg = 0;
461 addr = 0; // XXX not correct
462 /* if the function returns a structure, then add an
463 implicit pointer parameter */
464 size = type_size(&func_vt, &align);
465 if (size > 2 * XLEN) {
466 tcc_error("unimp: struct return");
467 func_vc = loc;
469 /* define parameters */
470 while ((sym = sym->next) != NULL) {
471 type = &sym->type;
472 size = type_size(type, &align);
473 if (size > 2 * XLEN) {
474 from_stack:
475 addr = (addr + align - 1) & -align;
476 param_addr = addr;
477 addr += size;
478 } else {
479 int regcount = 1;
480 if (size > XLEN)
481 regcount++, tcc_error("unimp: scalars > 64bit");
482 if (regcount + (is_float(type->t) ? afreg : aireg) >= 8)
483 goto from_stack;
484 loc -= regcount * 8; // XXX could reserve only 'size' bytes
485 param_addr = loc;
486 for (i = 0; i < regcount; i++) {
487 if (is_float(type->t)) {
488 assert(type->t == VT_FLOAT || type->t == VT_DOUBLE);
489 ES(0x27, size == 4 ? 2 : 3, 8, 10 + afreg, loc + i*8); // fs[wd] FAi, loc(s0)
490 afreg++;
491 } else {
492 ES(0x23, 3, 8, 10 + aireg, loc + i*8); // sd aX, loc(s0) // XXX
493 aireg++;
497 sym_push(sym->v & ~SYM_FIELD, type,
498 VT_LOCAL | lvalue_type(type->t), param_addr);
502 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret,
503 int *ret_align, int *regsize)
505 /* generic code can only deal with structs of pow(2) sizes
506 (it always deals with whole registers), so go through our own
507 code. */
508 return 0;
511 ST_FUNC void gfunc_return(CType *func_type)
513 int align, size = type_size(func_type, &align);
514 if ((func_type->t & VT_BTYPE) == VT_STRUCT
515 || size > 2 * XLEN) {
516 tcc_error("unimp: struct or large return");
518 if (is_float(func_type->t))
519 gv(RC_FRET);
520 else
521 gv(RC_IRET);
522 vtop--;
525 ST_FUNC void gfunc_epilog(void)
527 int v, saved_ind;
529 v = (-loc + 15) & -16;
531 EI(0x03, 3, 1, 2, v - 8); // ld ra, v-8(sp)
532 EI(0x03, 3, 8, 2, v - 16); // ld s0, v-16(sp)
533 EI(0x13, 0, 2, 2, v); // addi sp, sp, v
534 EI(0x67, 0, 0, 1, 0); // jalr x0, 0(x1), aka ret
535 saved_ind = ind;
536 ind = func_sub_sp_offset;
537 EI(0x13, 0, 2, 2, -v); // addi sp, sp, -v
538 ES(0x23, 3, 2, 1, v - 8); // sd ra, v-8(sp)
539 ES(0x23, 3, 2, 8, v - 16); // sd s0, v-16(sp)
540 EI(0x13, 0, 8, 2, v); // addi s0, sp, v
541 ind = saved_ind;
544 ST_FUNC void gen_va_start(void)
546 tcc_error("implement me: %s", __FUNCTION__);
549 ST_FUNC void gen_va_arg(CType *t)
551 tcc_error("implement me: %s", __FUNCTION__);
554 ST_FUNC void gen_fill_nops(int bytes)
556 tcc_error("implement me: %s", __FUNCTION__);
557 if ((bytes & 3))
558 tcc_error("alignment of code section not multiple of 4");
561 // Generate forward branch to label:
562 ST_FUNC int gjmp(int t)
564 if (nocode_wanted)
565 return t;
566 o(t);
567 return ind - 4;
570 // Generate branch to known address:
571 ST_FUNC void gjmp_addr(int a)
573 uint32_t r = a - ind, imm;
574 if ((r + (1 << 21)) & ~((1U << 22) - 2))
575 tcc_error("out-of-range jump");
576 imm = (((r >> 12) & 0xff) << 12)
577 | (((r >> 11) & 1) << 20)
578 | (((r >> 1) & 0x3ff) << 21)
579 | (((r >> 20) & 1) << 31);
580 o(0x6f | imm); // jal x0, imm == j imm
583 ST_FUNC int gjmp_cond(int op, int t)
585 int inv = op & 1;
586 assert(op == TOK_EQ || op == TOK_NE);
587 assert(vtop->cmp_r >= 10 && vtop->cmp_r < 18);
588 o(0x63 | (!inv << 12) | (vtop->cmp_r << 15) | (8 << 7)); // bne/beq x0,r,+4
589 return gjmp(t);
592 ST_FUNC int gjmp_append(int n, int t)
594 void *p;
595 /* insert jump list n into t */
596 if (n) {
597 uint32_t n1 = n, n2;
598 while ((n2 = read32le(p = cur_text_section->data + n1)))
599 n1 = n2;
600 write32le(p, t);
601 t = n;
603 return t;
606 static void gen_opil(int op, int ll)
608 int a, b, d;
609 int inv = 0;
610 int func3 = 0, func7 = 0;
611 /* XXX We could special-case some constant args. */
612 gv2(RC_INT, RC_INT);
613 a = ireg(vtop[-1].r);
614 b = ireg(vtop[0].r);
615 vtop -= 2;
616 d = get_reg(RC_INT);
617 vtop++;
618 vtop[0].r = d;
619 d = ireg(d);
620 switch (op) {
621 case '%':
622 case TOK_SAR:
623 case TOK_SHR:
624 case TOK_PDIV:
625 default:
626 tcc_error("implement me: %s(%s)", __FUNCTION__, get_tok_str(op, NULL));
628 case '+':
629 o(0x33 | (d << 7) | (a << 15) | (b << 20)); // add d, a, b
630 break;
631 case '-':
632 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (0x20 << 25)); //sub d, a, b
633 break;
634 case TOK_SHL:
635 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (1 << 12)); //sll d, a, b
636 break;
637 case '*':
638 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (0x01 << 25)); //mul d, a, b
639 break;
640 case '/':
641 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (0x01 << 25) | (4 << 12)); //div d, a, b
642 break;
643 case '&':
644 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (7 << 12)); // and d, a, b
645 break;
646 case '^':
647 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (4 << 12)); // xor d, a, b
648 break;
649 case '|':
650 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (6 << 12)); // or d, a, b
651 break;
652 case TOK_UMOD:
653 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (0x01 << 25) | (7 << 12)); //remu d, a, b
654 break;
655 case TOK_UDIV:
656 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (0x01 << 25) | (5 << 12)); //divu d, a, b
657 break;
659 case TOK_ULT:
660 case TOK_UGE:
661 case TOK_ULE:
662 case TOK_UGT:
663 case TOK_LT:
664 case TOK_GE:
665 case TOK_LE:
666 case TOK_GT:
667 if (op & 1) { // remove [U]GE,GT
668 inv = 1;
669 op--;
671 if ((op & 7) == 6) { // [U]LE
672 int t = a; a = b; b = t;
673 inv ^= 1;
675 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (((op > TOK_UGT) ? 2 : 3) << 12)); // slt[u] d, a, b
676 if (inv)
677 EI(0x13, 4, d, d, 1); // xori d, d, 1
678 vset_VT_CMP(TOK_NE);
679 vtop->cmp_r = d;
680 break;
681 case TOK_NE:
682 case TOK_EQ:
683 o(0x33 | (d << 7) | (a << 15) | (b << 20) | (0x20 << 25)); // sub d, a, b
684 if (op == TOK_NE)
685 o(0x33 | (3 << 12) | (d << 7) | (0 << 15) | (d << 20)); // sltu d, x0, d == snez d,d
686 else
687 EI(0x13, 3, d, d, 1); // sltiu d, d, 1 == seqz d,d
688 vset_VT_CMP(TOK_NE);
689 vtop->cmp_r = d;
690 break;
694 ST_FUNC void gen_opi(int op)
696 gen_opil(op, 0);
699 ST_FUNC void gen_opl(int op)
701 gen_opil(op, 1);
704 ST_FUNC void gen_opf(int op)
706 int rs1, rs2, rd, dbl, invert;
707 gv2(RC_FLOAT, RC_FLOAT);
708 assert(vtop->type.t == VT_DOUBLE || vtop->type.t == VT_FLOAT);
709 dbl = vtop->type.t == VT_DOUBLE;
710 rs1 = freg(vtop[-1].r);
711 rs2 = freg(vtop->r);
712 vtop--;
713 invert = 0;
714 switch(op) {
715 default:
716 assert(0);
717 case '+':
718 op = 0; // fadd
719 arithop:
720 rd = get_reg(RC_FLOAT);
721 vtop->r = rd;
722 rd = freg(rd);
723 o(0x53 | (rd << 7) | (rs1 << 15) | (rs2 << 20) | (7 << 12) | (dbl << 25) | (op << 27)); // fop.[sd] RD, RS1, RS2 (dyn rm)
724 break;
725 case '-':
726 op = 1; // fsub
727 goto arithop;
728 case '*':
729 op = 2; // fmul
730 goto arithop;
731 case '/':
732 op = 3; // fdiv
733 goto arithop;
734 case TOK_EQ:
735 op = 2; // EQ
736 cmpop:
737 rd = get_reg(RC_INT);
738 vtop->r = rd;
739 rd = ireg(rd);
740 o(0x53 | (rd << 7) | (rs1 << 15) | (rs2 << 20) | (op << 12) | (dbl << 25) | (0x14 << 27)); // fcmp.[sd] RD, RS1, RS2 (op == eq/lt/le)
741 if (invert)
742 EI(0x13, 4, rd, rd, 1); // xori RD, 1
743 break;
744 case TOK_NE:
745 invert = 1;
746 op = 2; // EQ
747 goto cmpop;
748 case TOK_LT:
749 op = 1; // LT
750 goto cmpop;
751 case TOK_LE:
752 op = 0; // LE
753 goto cmpop;
754 case TOK_GT:
755 op = 1; // LT
756 rd = rs1, rs1 = rs2, rs2 = rd;
757 goto cmpop;
758 case TOK_GE:
759 op = 0; // LE
760 rd = rs1, rs1 = rs2, rs2 = rd;
761 goto cmpop;
765 ST_FUNC void gen_cvt_sxtw(void)
767 /* XXX on risc-v the registers are usually sign-extended already.
768 Let's try to not do anything here. */
771 ST_FUNC void gen_cvt_itof(int t)
773 tcc_error("implement me: %s", __FUNCTION__);
776 ST_FUNC void gen_cvt_ftoi(int t)
778 tcc_error("implement me: %s", __FUNCTION__);
781 ST_FUNC void gen_cvt_ftof(int dt)
783 int st = vtop->type.t & VT_BTYPE, rs, rd;
784 dt &= VT_BTYPE;
785 assert (dt == VT_FLOAT || dt == VT_DOUBLE);
786 assert (st == VT_FLOAT || st == VT_DOUBLE);
787 if (st == dt)
788 return;
789 rs = gv(RC_FLOAT);
790 rd = get_reg(RC_FLOAT);
791 if (dt == VT_DOUBLE)
792 EI(0x53, 7, freg(rd), freg(rs), 0x21 << 5); // fcvt.d.s RD, RS (dyn rm)
793 else
794 EI(0x53, 7, freg(rd), freg(rs), (0x20 << 5) | 1); // fcvt.s.d RD, RS
795 vtop->r = rd;
798 ST_FUNC void ggoto(void)
800 tcc_error("implement me: %s", __FUNCTION__);
803 ST_FUNC void gen_vla_sp_save(int addr)
805 tcc_error("implement me: %s", __FUNCTION__);
808 ST_FUNC void gen_vla_sp_restore(int addr)
810 tcc_error("implement me: %s", __FUNCTION__);
813 ST_FUNC void gen_vla_alloc(CType *type, int align)
815 tcc_error("implement me: %s", __FUNCTION__);
817 #endif