4 * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
5 * Feng Gao <gf91597@gmail.com>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "disas/disas.h"
26 #include "qemu-common.h"
28 #include "qemu/bitops.h"
29 #include "exec/cpu_ldst.h"
30 #include "exec/translator.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
38 #define LOG_DIS(str, ...) \
39 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->pc, ## __VA_ARGS__)
41 /* is_jmp field values */
42 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
44 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
46 typedef struct DisasContext
{
52 uint32_t delayed_branch
;
53 bool singlestep_enabled
;
57 static TCGv cpu_R
[32];
60 static TCGv jmp_pc
; /* l.jr/l.jalr temp pc */
62 static TCGv cpu_sr_f
; /* bf/bnf, F flag taken */
63 static TCGv cpu_sr_cy
; /* carry (unsigned overflow) */
64 static TCGv cpu_sr_ov
; /* signed overflow */
65 static TCGv cpu_lock_addr
;
66 static TCGv cpu_lock_value
;
67 static TCGv_i32 fpcsr
;
68 static TCGv_i64 cpu_mac
; /* MACHI:MACLO */
69 static TCGv_i32 cpu_dflag
;
70 #include "exec/gen-icount.h"
72 void openrisc_translate_init(void)
74 static const char * const regnames
[] = {
75 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
76 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
77 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
78 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
82 cpu_sr
= tcg_global_mem_new(cpu_env
,
83 offsetof(CPUOpenRISCState
, sr
), "sr");
84 cpu_dflag
= tcg_global_mem_new_i32(cpu_env
,
85 offsetof(CPUOpenRISCState
, dflag
),
87 cpu_pc
= tcg_global_mem_new(cpu_env
,
88 offsetof(CPUOpenRISCState
, pc
), "pc");
89 cpu_ppc
= tcg_global_mem_new(cpu_env
,
90 offsetof(CPUOpenRISCState
, ppc
), "ppc");
91 jmp_pc
= tcg_global_mem_new(cpu_env
,
92 offsetof(CPUOpenRISCState
, jmp_pc
), "jmp_pc");
93 cpu_sr_f
= tcg_global_mem_new(cpu_env
,
94 offsetof(CPUOpenRISCState
, sr_f
), "sr_f");
95 cpu_sr_cy
= tcg_global_mem_new(cpu_env
,
96 offsetof(CPUOpenRISCState
, sr_cy
), "sr_cy");
97 cpu_sr_ov
= tcg_global_mem_new(cpu_env
,
98 offsetof(CPUOpenRISCState
, sr_ov
), "sr_ov");
99 cpu_lock_addr
= tcg_global_mem_new(cpu_env
,
100 offsetof(CPUOpenRISCState
, lock_addr
),
102 cpu_lock_value
= tcg_global_mem_new(cpu_env
,
103 offsetof(CPUOpenRISCState
, lock_value
),
105 fpcsr
= tcg_global_mem_new_i32(cpu_env
,
106 offsetof(CPUOpenRISCState
, fpcsr
),
108 cpu_mac
= tcg_global_mem_new_i64(cpu_env
,
109 offsetof(CPUOpenRISCState
, mac
),
111 for (i
= 0; i
< 32; i
++) {
112 cpu_R
[i
] = tcg_global_mem_new(cpu_env
,
113 offsetof(CPUOpenRISCState
,
120 static void gen_exception(DisasContext
*dc
, unsigned int excp
)
122 TCGv_i32 tmp
= tcg_const_i32(excp
);
123 gen_helper_exception(cpu_env
, tmp
);
124 tcg_temp_free_i32(tmp
);
127 static void gen_illegal_exception(DisasContext
*dc
)
129 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
130 gen_exception(dc
, EXCP_ILLEGAL
);
131 dc
->is_jmp
= DISAS_UPDATE
;
134 /* not used yet, open it when we need or64. */
135 /*#ifdef TARGET_OPENRISC64
136 static void check_ob64s(DisasContext *dc)
138 if (!(dc->flags & CPUCFGR_OB64S)) {
139 gen_illegal_exception(dc);
143 static void check_of64s(DisasContext *dc)
145 if (!(dc->flags & CPUCFGR_OF64S)) {
146 gen_illegal_exception(dc);
150 static void check_ov64s(DisasContext *dc)
152 if (!(dc->flags & CPUCFGR_OV64S)) {
153 gen_illegal_exception(dc);
158 /* We're about to write to REG. On the off-chance that the user is
159 writing to R0, re-instate the architectural register. */
160 #define check_r0_write(reg) \
162 if (unlikely(reg == 0)) { \
167 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
169 if (unlikely(dc
->singlestep_enabled
)) {
173 #ifndef CONFIG_USER_ONLY
174 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
180 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
182 if (use_goto_tb(dc
, dest
)) {
183 tcg_gen_movi_tl(cpu_pc
, dest
);
185 tcg_gen_exit_tb((uintptr_t)dc
->tb
+ n
);
187 tcg_gen_movi_tl(cpu_pc
, dest
);
188 if (dc
->singlestep_enabled
) {
189 gen_exception(dc
, EXCP_DEBUG
);
195 static void gen_jump(DisasContext
*dc
, int32_t n26
, uint32_t reg
, uint32_t op0
)
197 target_ulong tmp_pc
= dc
->pc
+ n26
* 4;
201 tcg_gen_movi_tl(jmp_pc
, tmp_pc
);
203 case 0x01: /* l.jal */
204 tcg_gen_movi_tl(cpu_R
[9], dc
->pc
+ 8);
205 /* Optimize jal being used to load the PC for PIC. */
206 if (tmp_pc
== dc
->pc
+ 8) {
209 tcg_gen_movi_tl(jmp_pc
, tmp_pc
);
211 case 0x03: /* l.bnf */
212 case 0x04: /* l.bf */
214 TCGv t_next
= tcg_const_tl(dc
->pc
+ 8);
215 TCGv t_true
= tcg_const_tl(tmp_pc
);
216 TCGv t_zero
= tcg_const_tl(0);
218 tcg_gen_movcond_tl(op0
== 0x03 ? TCG_COND_EQ
: TCG_COND_NE
,
219 jmp_pc
, cpu_sr_f
, t_zero
, t_true
, t_next
);
221 tcg_temp_free(t_next
);
222 tcg_temp_free(t_true
);
223 tcg_temp_free(t_zero
);
226 case 0x11: /* l.jr */
227 tcg_gen_mov_tl(jmp_pc
, cpu_R
[reg
]);
229 case 0x12: /* l.jalr */
230 tcg_gen_movi_tl(cpu_R
[9], (dc
->pc
+ 8));
231 tcg_gen_mov_tl(jmp_pc
, cpu_R
[reg
]);
234 gen_illegal_exception(dc
);
238 dc
->delayed_branch
= 2;
241 static void gen_ove_cy(DisasContext
*dc
)
243 if (dc
->tb_flags
& SR_OVE
) {
244 gen_helper_ove_cy(cpu_env
);
248 static void gen_ove_ov(DisasContext
*dc
)
250 if (dc
->tb_flags
& SR_OVE
) {
251 gen_helper_ove_ov(cpu_env
);
255 static void gen_ove_cyov(DisasContext
*dc
)
257 if (dc
->tb_flags
& SR_OVE
) {
258 gen_helper_ove_cyov(cpu_env
);
262 static void gen_add(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
264 TCGv t0
= tcg_const_tl(0);
265 TCGv res
= tcg_temp_new();
267 tcg_gen_add2_tl(res
, cpu_sr_cy
, srca
, t0
, srcb
, t0
);
268 tcg_gen_xor_tl(cpu_sr_ov
, srca
, srcb
);
269 tcg_gen_xor_tl(t0
, res
, srcb
);
270 tcg_gen_andc_tl(cpu_sr_ov
, t0
, cpu_sr_ov
);
273 tcg_gen_mov_tl(dest
, res
);
279 static void gen_addc(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
281 TCGv t0
= tcg_const_tl(0);
282 TCGv res
= tcg_temp_new();
284 tcg_gen_add2_tl(res
, cpu_sr_cy
, srca
, t0
, cpu_sr_cy
, t0
);
285 tcg_gen_add2_tl(res
, cpu_sr_cy
, res
, cpu_sr_cy
, srcb
, t0
);
286 tcg_gen_xor_tl(cpu_sr_ov
, srca
, srcb
);
287 tcg_gen_xor_tl(t0
, res
, srcb
);
288 tcg_gen_andc_tl(cpu_sr_ov
, t0
, cpu_sr_ov
);
291 tcg_gen_mov_tl(dest
, res
);
297 static void gen_sub(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
299 TCGv res
= tcg_temp_new();
301 tcg_gen_sub_tl(res
, srca
, srcb
);
302 tcg_gen_xor_tl(cpu_sr_cy
, srca
, srcb
);
303 tcg_gen_xor_tl(cpu_sr_ov
, res
, srcb
);
304 tcg_gen_and_tl(cpu_sr_ov
, cpu_sr_ov
, cpu_sr_cy
);
305 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_sr_cy
, srca
, srcb
);
307 tcg_gen_mov_tl(dest
, res
);
313 static void gen_mul(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
315 TCGv t0
= tcg_temp_new();
317 tcg_gen_muls2_tl(dest
, cpu_sr_ov
, srca
, srcb
);
318 tcg_gen_sari_tl(t0
, dest
, TARGET_LONG_BITS
- 1);
319 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_sr_ov
, cpu_sr_ov
, t0
);
322 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
326 static void gen_mulu(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
328 tcg_gen_muls2_tl(dest
, cpu_sr_cy
, srca
, srcb
);
329 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_sr_cy
, cpu_sr_cy
, 0);
334 static void gen_div(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
336 TCGv t0
= tcg_temp_new();
338 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_ov
, srcb
, 0);
339 /* The result of divide-by-zero is undefined.
340 Supress the host-side exception by dividing by 1. */
341 tcg_gen_or_tl(t0
, srcb
, cpu_sr_ov
);
342 tcg_gen_div_tl(dest
, srca
, t0
);
345 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
349 static void gen_divu(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
351 TCGv t0
= tcg_temp_new();
353 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_cy
, srcb
, 0);
354 /* The result of divide-by-zero is undefined.
355 Supress the host-side exception by dividing by 1. */
356 tcg_gen_or_tl(t0
, srcb
, cpu_sr_cy
);
357 tcg_gen_divu_tl(dest
, srca
, t0
);
363 static void gen_muld(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
365 TCGv_i64 t1
= tcg_temp_new_i64();
366 TCGv_i64 t2
= tcg_temp_new_i64();
368 tcg_gen_ext_tl_i64(t1
, srca
);
369 tcg_gen_ext_tl_i64(t2
, srcb
);
370 if (TARGET_LONG_BITS
== 32) {
371 tcg_gen_mul_i64(cpu_mac
, t1
, t2
);
372 tcg_gen_movi_tl(cpu_sr_ov
, 0);
374 TCGv_i64 high
= tcg_temp_new_i64();
376 tcg_gen_muls2_i64(cpu_mac
, high
, t1
, t2
);
377 tcg_gen_sari_i64(t1
, cpu_mac
, 63);
378 tcg_gen_setcond_i64(TCG_COND_NE
, t1
, t1
, high
);
379 tcg_temp_free_i64(high
);
380 tcg_gen_trunc_i64_tl(cpu_sr_ov
, t1
);
381 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
385 tcg_temp_free_i64(t1
);
386 tcg_temp_free_i64(t2
);
389 static void gen_muldu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
391 TCGv_i64 t1
= tcg_temp_new_i64();
392 TCGv_i64 t2
= tcg_temp_new_i64();
394 tcg_gen_extu_tl_i64(t1
, srca
);
395 tcg_gen_extu_tl_i64(t2
, srcb
);
396 if (TARGET_LONG_BITS
== 32) {
397 tcg_gen_mul_i64(cpu_mac
, t1
, t2
);
398 tcg_gen_movi_tl(cpu_sr_cy
, 0);
400 TCGv_i64 high
= tcg_temp_new_i64();
402 tcg_gen_mulu2_i64(cpu_mac
, high
, t1
, t2
);
403 tcg_gen_setcondi_i64(TCG_COND_NE
, high
, high
, 0);
404 tcg_gen_trunc_i64_tl(cpu_sr_cy
, high
);
405 tcg_temp_free_i64(high
);
409 tcg_temp_free_i64(t1
);
410 tcg_temp_free_i64(t2
);
413 static void gen_mac(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
415 TCGv_i64 t1
= tcg_temp_new_i64();
416 TCGv_i64 t2
= tcg_temp_new_i64();
418 tcg_gen_ext_tl_i64(t1
, srca
);
419 tcg_gen_ext_tl_i64(t2
, srcb
);
420 tcg_gen_mul_i64(t1
, t1
, t2
);
422 /* Note that overflow is only computed during addition stage. */
423 tcg_gen_xor_i64(t2
, cpu_mac
, t1
);
424 tcg_gen_add_i64(cpu_mac
, cpu_mac
, t1
);
425 tcg_gen_xor_i64(t1
, t1
, cpu_mac
);
426 tcg_gen_andc_i64(t1
, t1
, t2
);
427 tcg_temp_free_i64(t2
);
429 #if TARGET_LONG_BITS == 32
430 tcg_gen_extrh_i64_i32(cpu_sr_ov
, t1
);
432 tcg_gen_mov_i64(cpu_sr_ov
, t1
);
434 tcg_temp_free_i64(t1
);
439 static void gen_macu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
441 TCGv_i64 t1
= tcg_temp_new_i64();
442 TCGv_i64 t2
= tcg_temp_new_i64();
444 tcg_gen_extu_tl_i64(t1
, srca
);
445 tcg_gen_extu_tl_i64(t2
, srcb
);
446 tcg_gen_mul_i64(t1
, t1
, t2
);
447 tcg_temp_free_i64(t2
);
449 /* Note that overflow is only computed during addition stage. */
450 tcg_gen_add_i64(cpu_mac
, cpu_mac
, t1
);
451 tcg_gen_setcond_i64(TCG_COND_LTU
, t1
, cpu_mac
, t1
);
452 tcg_gen_trunc_i64_tl(cpu_sr_cy
, t1
);
453 tcg_temp_free_i64(t1
);
458 static void gen_msb(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
460 TCGv_i64 t1
= tcg_temp_new_i64();
461 TCGv_i64 t2
= tcg_temp_new_i64();
463 tcg_gen_ext_tl_i64(t1
, srca
);
464 tcg_gen_ext_tl_i64(t2
, srcb
);
465 tcg_gen_mul_i64(t1
, t1
, t2
);
467 /* Note that overflow is only computed during subtraction stage. */
468 tcg_gen_xor_i64(t2
, cpu_mac
, t1
);
469 tcg_gen_sub_i64(cpu_mac
, cpu_mac
, t1
);
470 tcg_gen_xor_i64(t1
, t1
, cpu_mac
);
471 tcg_gen_and_i64(t1
, t1
, t2
);
472 tcg_temp_free_i64(t2
);
474 #if TARGET_LONG_BITS == 32
475 tcg_gen_extrh_i64_i32(cpu_sr_ov
, t1
);
477 tcg_gen_mov_i64(cpu_sr_ov
, t1
);
479 tcg_temp_free_i64(t1
);
484 static void gen_msbu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
486 TCGv_i64 t1
= tcg_temp_new_i64();
487 TCGv_i64 t2
= tcg_temp_new_i64();
489 tcg_gen_extu_tl_i64(t1
, srca
);
490 tcg_gen_extu_tl_i64(t2
, srcb
);
491 tcg_gen_mul_i64(t1
, t1
, t2
);
493 /* Note that overflow is only computed during subtraction stage. */
494 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, cpu_mac
, t1
);
495 tcg_gen_sub_i64(cpu_mac
, cpu_mac
, t1
);
496 tcg_gen_trunc_i64_tl(cpu_sr_cy
, t2
);
497 tcg_temp_free_i64(t2
);
498 tcg_temp_free_i64(t1
);
503 static void gen_lwa(DisasContext
*dc
, TCGv rd
, TCGv ra
, int32_t ofs
)
505 TCGv ea
= tcg_temp_new();
507 tcg_gen_addi_tl(ea
, ra
, ofs
);
508 tcg_gen_qemu_ld_tl(rd
, ea
, dc
->mem_idx
, MO_TEUL
);
509 tcg_gen_mov_tl(cpu_lock_addr
, ea
);
510 tcg_gen_mov_tl(cpu_lock_value
, rd
);
514 static void gen_swa(DisasContext
*dc
, int b
, TCGv ra
, int32_t ofs
)
517 TCGLabel
*lab_fail
, *lab_done
;
520 tcg_gen_addi_tl(ea
, ra
, ofs
);
522 /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned
523 to cpu_R[0]. Since l.swa is quite often immediately followed by a
524 branch, don't bother reallocating; finish the TB using the "real" R0.
525 This also takes care of RB input across the branch. */
528 lab_fail
= gen_new_label();
529 lab_done
= gen_new_label();
530 tcg_gen_brcond_tl(TCG_COND_NE
, ea
, cpu_lock_addr
, lab_fail
);
533 val
= tcg_temp_new();
534 tcg_gen_atomic_cmpxchg_tl(val
, cpu_lock_addr
, cpu_lock_value
,
535 cpu_R
[b
], dc
->mem_idx
, MO_TEUL
);
536 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_sr_f
, val
, cpu_lock_value
);
539 tcg_gen_br(lab_done
);
541 gen_set_label(lab_fail
);
542 tcg_gen_movi_tl(cpu_sr_f
, 0);
544 gen_set_label(lab_done
);
545 tcg_gen_movi_tl(cpu_lock_addr
, -1);
548 static void dec_calc(DisasContext
*dc
, uint32_t insn
)
550 uint32_t op0
, op1
, op2
;
552 op0
= extract32(insn
, 0, 4);
553 op1
= extract32(insn
, 8, 2);
554 op2
= extract32(insn
, 6, 2);
555 ra
= extract32(insn
, 16, 5);
556 rb
= extract32(insn
, 11, 5);
557 rd
= extract32(insn
, 21, 5);
562 case 0x0: /* l.add */
563 LOG_DIS("l.add r%d, r%d, r%d\n", rd
, ra
, rb
);
564 gen_add(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
567 case 0x1: /* l.addc */
568 LOG_DIS("l.addc r%d, r%d, r%d\n", rd
, ra
, rb
);
569 gen_addc(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
572 case 0x2: /* l.sub */
573 LOG_DIS("l.sub r%d, r%d, r%d\n", rd
, ra
, rb
);
574 gen_sub(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
577 case 0x3: /* l.and */
578 LOG_DIS("l.and r%d, r%d, r%d\n", rd
, ra
, rb
);
579 tcg_gen_and_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
583 LOG_DIS("l.or r%d, r%d, r%d\n", rd
, ra
, rb
);
584 tcg_gen_or_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
587 case 0x5: /* l.xor */
588 LOG_DIS("l.xor r%d, r%d, r%d\n", rd
, ra
, rb
);
589 tcg_gen_xor_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
595 LOG_DIS("l.sll r%d, r%d, r%d\n", rd
, ra
, rb
);
596 tcg_gen_shl_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
599 LOG_DIS("l.srl r%d, r%d, r%d\n", rd
, ra
, rb
);
600 tcg_gen_shr_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
603 LOG_DIS("l.sra r%d, r%d, r%d\n", rd
, ra
, rb
);
604 tcg_gen_sar_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
607 LOG_DIS("l.ror r%d, r%d, r%d\n", rd
, ra
, rb
);
608 tcg_gen_rotr_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
615 case 0: /* l.exths */
616 LOG_DIS("l.exths r%d, r%d\n", rd
, ra
);
617 tcg_gen_ext16s_tl(cpu_R
[rd
], cpu_R
[ra
]);
619 case 1: /* l.extbs */
620 LOG_DIS("l.extbs r%d, r%d\n", rd
, ra
);
621 tcg_gen_ext8s_tl(cpu_R
[rd
], cpu_R
[ra
]);
623 case 2: /* l.exthz */
624 LOG_DIS("l.exthz r%d, r%d\n", rd
, ra
);
625 tcg_gen_ext16u_tl(cpu_R
[rd
], cpu_R
[ra
]);
627 case 3: /* l.extbz */
628 LOG_DIS("l.extbz r%d, r%d\n", rd
, ra
);
629 tcg_gen_ext8u_tl(cpu_R
[rd
], cpu_R
[ra
]);
636 case 0: /* l.extws */
637 LOG_DIS("l.extws r%d, r%d\n", rd
, ra
);
638 tcg_gen_ext32s_tl(cpu_R
[rd
], cpu_R
[ra
]);
640 case 1: /* l.extwz */
641 LOG_DIS("l.extwz r%d, r%d\n", rd
, ra
);
642 tcg_gen_ext32u_tl(cpu_R
[rd
], cpu_R
[ra
]);
647 case 0xe: /* l.cmov */
648 LOG_DIS("l.cmov r%d, r%d, r%d\n", rd
, ra
, rb
);
650 TCGv zero
= tcg_const_tl(0);
651 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_R
[rd
], cpu_sr_f
, zero
,
652 cpu_R
[ra
], cpu_R
[rb
]);
657 case 0xf: /* l.ff1 */
658 LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd
, ra
, rb
);
659 tcg_gen_ctzi_tl(cpu_R
[rd
], cpu_R
[ra
], -1);
660 tcg_gen_addi_tl(cpu_R
[rd
], cpu_R
[rd
], 1);
667 case 0xf: /* l.fl1 */
668 LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd
, ra
, rb
);
669 tcg_gen_clzi_tl(cpu_R
[rd
], cpu_R
[ra
], TARGET_LONG_BITS
);
670 tcg_gen_subfi_tl(cpu_R
[rd
], TARGET_LONG_BITS
, cpu_R
[rd
]);
680 case 0x6: /* l.mul */
681 LOG_DIS("l.mul r%d, r%d, r%d\n", rd
, ra
, rb
);
682 gen_mul(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
685 case 0x7: /* l.muld */
686 LOG_DIS("l.muld r%d, r%d\n", ra
, rb
);
687 gen_muld(dc
, cpu_R
[ra
], cpu_R
[rb
]);
690 case 0x9: /* l.div */
691 LOG_DIS("l.div r%d, r%d, r%d\n", rd
, ra
, rb
);
692 gen_div(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
695 case 0xa: /* l.divu */
696 LOG_DIS("l.divu r%d, r%d, r%d\n", rd
, ra
, rb
);
697 gen_divu(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
700 case 0xb: /* l.mulu */
701 LOG_DIS("l.mulu r%d, r%d, r%d\n", rd
, ra
, rb
);
702 gen_mulu(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
705 case 0xc: /* l.muldu */
706 LOG_DIS("l.muldu r%d, r%d\n", ra
, rb
);
707 gen_muldu(dc
, cpu_R
[ra
], cpu_R
[rb
]);
712 gen_illegal_exception(dc
);
715 static void dec_misc(DisasContext
*dc
, uint32_t insn
)
719 uint32_t L6
, K5
, K16
, K5_11
;
720 int32_t I16
, I5_11
, N26
;
724 op0
= extract32(insn
, 26, 6);
725 op1
= extract32(insn
, 24, 2);
726 ra
= extract32(insn
, 16, 5);
727 rb
= extract32(insn
, 11, 5);
728 rd
= extract32(insn
, 21, 5);
729 L6
= extract32(insn
, 5, 6);
730 K5
= extract32(insn
, 0, 5);
731 K16
= extract32(insn
, 0, 16);
733 N26
= sextract32(insn
, 0, 26);
734 K5_11
= (extract32(insn
, 21, 5) << 11) | extract32(insn
, 0, 11);
735 I5_11
= (int16_t)K5_11
;
739 LOG_DIS("l.j %d\n", N26
);
740 gen_jump(dc
, N26
, 0, op0
);
743 case 0x01: /* l.jal */
744 LOG_DIS("l.jal %d\n", N26
);
745 gen_jump(dc
, N26
, 0, op0
);
748 case 0x03: /* l.bnf */
749 LOG_DIS("l.bnf %d\n", N26
);
750 gen_jump(dc
, N26
, 0, op0
);
753 case 0x04: /* l.bf */
754 LOG_DIS("l.bf %d\n", N26
);
755 gen_jump(dc
, N26
, 0, op0
);
760 case 0x01: /* l.nop */
761 LOG_DIS("l.nop %d\n", I16
);
765 gen_illegal_exception(dc
);
770 case 0x11: /* l.jr */
771 LOG_DIS("l.jr r%d\n", rb
);
772 gen_jump(dc
, 0, rb
, op0
);
775 case 0x12: /* l.jalr */
776 LOG_DIS("l.jalr r%d\n", rb
);
777 gen_jump(dc
, 0, rb
, op0
);
780 case 0x13: /* l.maci */
781 LOG_DIS("l.maci r%d, %d\n", ra
, I16
);
782 t0
= tcg_const_tl(I16
);
783 gen_mac(dc
, cpu_R
[ra
], t0
);
787 case 0x09: /* l.rfe */
790 #if defined(CONFIG_USER_ONLY)
793 if (dc
->mem_idx
== MMU_USER_IDX
) {
794 gen_illegal_exception(dc
);
797 gen_helper_rfe(cpu_env
);
798 dc
->is_jmp
= DISAS_UPDATE
;
803 case 0x1b: /* l.lwa */
804 LOG_DIS("l.lwa r%d, r%d, %d\n", rd
, ra
, I16
);
806 gen_lwa(dc
, cpu_R
[rd
], cpu_R
[ra
], I16
);
809 case 0x1c: /* l.cust1 */
810 LOG_DIS("l.cust1\n");
813 case 0x1d: /* l.cust2 */
814 LOG_DIS("l.cust2\n");
817 case 0x1e: /* l.cust3 */
818 LOG_DIS("l.cust3\n");
821 case 0x1f: /* l.cust4 */
822 LOG_DIS("l.cust4\n");
825 case 0x3c: /* l.cust5 */
826 LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd
, ra
, rb
, L6
, K5
);
829 case 0x3d: /* l.cust6 */
830 LOG_DIS("l.cust6\n");
833 case 0x3e: /* l.cust7 */
834 LOG_DIS("l.cust7\n");
837 case 0x3f: /* l.cust8 */
838 LOG_DIS("l.cust8\n");
841 /* not used yet, open it when we need or64. */
842 /*#ifdef TARGET_OPENRISC64
844 LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16);
850 case 0x21: /* l.lwz */
851 LOG_DIS("l.lwz r%d, r%d, %d\n", rd
, ra
, I16
);
855 case 0x22: /* l.lws */
856 LOG_DIS("l.lws r%d, r%d, %d\n", rd
, ra
, I16
);
860 case 0x23: /* l.lbz */
861 LOG_DIS("l.lbz r%d, r%d, %d\n", rd
, ra
, I16
);
865 case 0x24: /* l.lbs */
866 LOG_DIS("l.lbs r%d, r%d, %d\n", rd
, ra
, I16
);
870 case 0x25: /* l.lhz */
871 LOG_DIS("l.lhz r%d, r%d, %d\n", rd
, ra
, I16
);
875 case 0x26: /* l.lhs */
876 LOG_DIS("l.lhs r%d, r%d, %d\n", rd
, ra
, I16
);
883 tcg_gen_addi_tl(t0
, cpu_R
[ra
], I16
);
884 tcg_gen_qemu_ld_tl(cpu_R
[rd
], t0
, dc
->mem_idx
, mop
);
888 case 0x27: /* l.addi */
889 LOG_DIS("l.addi r%d, r%d, %d\n", rd
, ra
, I16
);
891 t0
= tcg_const_tl(I16
);
892 gen_add(dc
, cpu_R
[rd
], cpu_R
[ra
], t0
);
896 case 0x28: /* l.addic */
897 LOG_DIS("l.addic r%d, r%d, %d\n", rd
, ra
, I16
);
899 t0
= tcg_const_tl(I16
);
900 gen_addc(dc
, cpu_R
[rd
], cpu_R
[ra
], t0
);
904 case 0x29: /* l.andi */
905 LOG_DIS("l.andi r%d, r%d, %d\n", rd
, ra
, K16
);
907 tcg_gen_andi_tl(cpu_R
[rd
], cpu_R
[ra
], K16
);
910 case 0x2a: /* l.ori */
911 LOG_DIS("l.ori r%d, r%d, %d\n", rd
, ra
, K16
);
913 tcg_gen_ori_tl(cpu_R
[rd
], cpu_R
[ra
], K16
);
916 case 0x2b: /* l.xori */
917 LOG_DIS("l.xori r%d, r%d, %d\n", rd
, ra
, I16
);
919 tcg_gen_xori_tl(cpu_R
[rd
], cpu_R
[ra
], I16
);
922 case 0x2c: /* l.muli */
923 LOG_DIS("l.muli r%d, r%d, %d\n", rd
, ra
, I16
);
925 t0
= tcg_const_tl(I16
);
926 gen_mul(dc
, cpu_R
[rd
], cpu_R
[ra
], t0
);
930 case 0x2d: /* l.mfspr */
931 LOG_DIS("l.mfspr r%d, r%d, %d\n", rd
, ra
, K16
);
934 #if defined(CONFIG_USER_ONLY)
937 TCGv_i32 ti
= tcg_const_i32(K16
);
938 if (dc
->mem_idx
== MMU_USER_IDX
) {
939 gen_illegal_exception(dc
);
942 gen_helper_mfspr(cpu_R
[rd
], cpu_env
, cpu_R
[rd
], cpu_R
[ra
], ti
);
943 tcg_temp_free_i32(ti
);
948 case 0x30: /* l.mtspr */
949 LOG_DIS("l.mtspr r%d, r%d, %d\n", ra
, rb
, K5_11
);
951 #if defined(CONFIG_USER_ONLY)
954 TCGv_i32 im
= tcg_const_i32(K5_11
);
955 if (dc
->mem_idx
== MMU_USER_IDX
) {
956 gen_illegal_exception(dc
);
959 gen_helper_mtspr(cpu_env
, cpu_R
[ra
], cpu_R
[rb
], im
);
960 tcg_temp_free_i32(im
);
965 case 0x33: /* l.swa */
966 LOG_DIS("l.swa r%d, r%d, %d\n", ra
, rb
, I5_11
);
967 gen_swa(dc
, rb
, cpu_R
[ra
], I5_11
);
970 /* not used yet, open it when we need or64. */
971 /*#ifdef TARGET_OPENRISC64
973 LOG_DIS("l.sd r%d, r%d, %d\n", ra, rb, I5_11);
979 case 0x35: /* l.sw */
980 LOG_DIS("l.sw r%d, r%d, %d\n", ra
, rb
, I5_11
);
984 case 0x36: /* l.sb */
985 LOG_DIS("l.sb r%d, r%d, %d\n", ra
, rb
, I5_11
);
989 case 0x37: /* l.sh */
990 LOG_DIS("l.sh r%d, r%d, %d\n", ra
, rb
, I5_11
);
996 TCGv t0
= tcg_temp_new();
997 tcg_gen_addi_tl(t0
, cpu_R
[ra
], I5_11
);
998 tcg_gen_qemu_st_tl(cpu_R
[rb
], t0
, dc
->mem_idx
, mop
);
1004 gen_illegal_exception(dc
);
1009 static void dec_mac(DisasContext
*dc
, uint32_t insn
)
1013 op0
= extract32(insn
, 0, 4);
1014 ra
= extract32(insn
, 16, 5);
1015 rb
= extract32(insn
, 11, 5);
1018 case 0x0001: /* l.mac */
1019 LOG_DIS("l.mac r%d, r%d\n", ra
, rb
);
1020 gen_mac(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1023 case 0x0002: /* l.msb */
1024 LOG_DIS("l.msb r%d, r%d\n", ra
, rb
);
1025 gen_msb(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1028 case 0x0003: /* l.macu */
1029 LOG_DIS("l.macu r%d, r%d\n", ra
, rb
);
1030 gen_macu(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1033 case 0x0004: /* l.msbu */
1034 LOG_DIS("l.msbu r%d, r%d\n", ra
, rb
);
1035 gen_msbu(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1039 gen_illegal_exception(dc
);
1044 static void dec_logic(DisasContext
*dc
, uint32_t insn
)
1047 uint32_t rd
, ra
, L6
, S6
;
1048 op0
= extract32(insn
, 6, 2);
1049 rd
= extract32(insn
, 21, 5);
1050 ra
= extract32(insn
, 16, 5);
1051 L6
= extract32(insn
, 0, 6);
1052 S6
= L6
& (TARGET_LONG_BITS
- 1);
1056 case 0x00: /* l.slli */
1057 LOG_DIS("l.slli r%d, r%d, %d\n", rd
, ra
, L6
);
1058 tcg_gen_shli_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1061 case 0x01: /* l.srli */
1062 LOG_DIS("l.srli r%d, r%d, %d\n", rd
, ra
, L6
);
1063 tcg_gen_shri_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1066 case 0x02: /* l.srai */
1067 LOG_DIS("l.srai r%d, r%d, %d\n", rd
, ra
, L6
);
1068 tcg_gen_sari_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1071 case 0x03: /* l.rori */
1072 LOG_DIS("l.rori r%d, r%d, %d\n", rd
, ra
, L6
);
1073 tcg_gen_rotri_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1077 gen_illegal_exception(dc
);
1082 static void dec_M(DisasContext
*dc
, uint32_t insn
)
1087 op0
= extract32(insn
, 16, 1);
1088 rd
= extract32(insn
, 21, 5);
1089 K16
= extract32(insn
, 0, 16);
1093 case 0x0: /* l.movhi */
1094 LOG_DIS("l.movhi r%d, %d\n", rd
, K16
);
1095 tcg_gen_movi_tl(cpu_R
[rd
], (K16
<< 16));
1098 case 0x1: /* l.macrc */
1099 LOG_DIS("l.macrc r%d\n", rd
);
1100 tcg_gen_trunc_i64_tl(cpu_R
[rd
], cpu_mac
);
1101 tcg_gen_movi_i64(cpu_mac
, 0);
1105 gen_illegal_exception(dc
);
1110 static void dec_comp(DisasContext
*dc
, uint32_t insn
)
1115 op0
= extract32(insn
, 21, 5);
1116 ra
= extract32(insn
, 16, 5);
1117 rb
= extract32(insn
, 11, 5);
1119 /* unsigned integers */
1120 tcg_gen_ext32u_tl(cpu_R
[ra
], cpu_R
[ra
]);
1121 tcg_gen_ext32u_tl(cpu_R
[rb
], cpu_R
[rb
]);
1124 case 0x0: /* l.sfeq */
1125 LOG_DIS("l.sfeq r%d, r%d\n", ra
, rb
);
1126 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1129 case 0x1: /* l.sfne */
1130 LOG_DIS("l.sfne r%d, r%d\n", ra
, rb
);
1131 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1134 case 0x2: /* l.sfgtu */
1135 LOG_DIS("l.sfgtu r%d, r%d\n", ra
, rb
);
1136 tcg_gen_setcond_tl(TCG_COND_GTU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1139 case 0x3: /* l.sfgeu */
1140 LOG_DIS("l.sfgeu r%d, r%d\n", ra
, rb
);
1141 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1144 case 0x4: /* l.sfltu */
1145 LOG_DIS("l.sfltu r%d, r%d\n", ra
, rb
);
1146 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1149 case 0x5: /* l.sfleu */
1150 LOG_DIS("l.sfleu r%d, r%d\n", ra
, rb
);
1151 tcg_gen_setcond_tl(TCG_COND_LEU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1154 case 0xa: /* l.sfgts */
1155 LOG_DIS("l.sfgts r%d, r%d\n", ra
, rb
);
1156 tcg_gen_setcond_tl(TCG_COND_GT
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1159 case 0xb: /* l.sfges */
1160 LOG_DIS("l.sfges r%d, r%d\n", ra
, rb
);
1161 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1164 case 0xc: /* l.sflts */
1165 LOG_DIS("l.sflts r%d, r%d\n", ra
, rb
);
1166 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1169 case 0xd: /* l.sfles */
1170 LOG_DIS("l.sfles r%d, r%d\n", ra
, rb
);
1171 tcg_gen_setcond_tl(TCG_COND_LE
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1175 gen_illegal_exception(dc
);
1180 static void dec_compi(DisasContext
*dc
, uint32_t insn
)
1185 op0
= extract32(insn
, 21, 5);
1186 ra
= extract32(insn
, 16, 5);
1187 I16
= sextract32(insn
, 0, 16);
1190 case 0x0: /* l.sfeqi */
1191 LOG_DIS("l.sfeqi r%d, %d\n", ra
, I16
);
1192 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_f
, cpu_R
[ra
], I16
);
1195 case 0x1: /* l.sfnei */
1196 LOG_DIS("l.sfnei r%d, %d\n", ra
, I16
);
1197 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_sr_f
, cpu_R
[ra
], I16
);
1200 case 0x2: /* l.sfgtui */
1201 LOG_DIS("l.sfgtui r%d, %d\n", ra
, I16
);
1202 tcg_gen_setcondi_tl(TCG_COND_GTU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1205 case 0x3: /* l.sfgeui */
1206 LOG_DIS("l.sfgeui r%d, %d\n", ra
, I16
);
1207 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1210 case 0x4: /* l.sfltui */
1211 LOG_DIS("l.sfltui r%d, %d\n", ra
, I16
);
1212 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1215 case 0x5: /* l.sfleui */
1216 LOG_DIS("l.sfleui r%d, %d\n", ra
, I16
);
1217 tcg_gen_setcondi_tl(TCG_COND_LEU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1220 case 0xa: /* l.sfgtsi */
1221 LOG_DIS("l.sfgtsi r%d, %d\n", ra
, I16
);
1222 tcg_gen_setcondi_tl(TCG_COND_GT
, cpu_sr_f
, cpu_R
[ra
], I16
);
1225 case 0xb: /* l.sfgesi */
1226 LOG_DIS("l.sfgesi r%d, %d\n", ra
, I16
);
1227 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_sr_f
, cpu_R
[ra
], I16
);
1230 case 0xc: /* l.sfltsi */
1231 LOG_DIS("l.sfltsi r%d, %d\n", ra
, I16
);
1232 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_sr_f
, cpu_R
[ra
], I16
);
1235 case 0xd: /* l.sflesi */
1236 LOG_DIS("l.sflesi r%d, %d\n", ra
, I16
);
1237 tcg_gen_setcondi_tl(TCG_COND_LE
, cpu_sr_f
, cpu_R
[ra
], I16
);
1241 gen_illegal_exception(dc
);
1246 static void dec_sys(DisasContext
*dc
, uint32_t insn
)
1251 op0
= extract32(insn
, 16, 10);
1252 K16
= extract32(insn
, 0, 16);
1255 case 0x000: /* l.sys */
1256 LOG_DIS("l.sys %d\n", K16
);
1257 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1258 gen_exception(dc
, EXCP_SYSCALL
);
1259 dc
->is_jmp
= DISAS_UPDATE
;
1262 case 0x100: /* l.trap */
1263 LOG_DIS("l.trap %d\n", K16
);
1264 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1265 gen_exception(dc
, EXCP_TRAP
);
1268 case 0x300: /* l.csync */
1269 LOG_DIS("l.csync\n");
1272 case 0x200: /* l.msync */
1273 LOG_DIS("l.msync\n");
1274 tcg_gen_mb(TCG_MO_ALL
);
1277 case 0x270: /* l.psync */
1278 LOG_DIS("l.psync\n");
1282 gen_illegal_exception(dc
);
1287 static void dec_float(DisasContext
*dc
, uint32_t insn
)
1290 uint32_t ra
, rb
, rd
;
1291 op0
= extract32(insn
, 0, 8);
1292 ra
= extract32(insn
, 16, 5);
1293 rb
= extract32(insn
, 11, 5);
1294 rd
= extract32(insn
, 21, 5);
1297 case 0x00: /* lf.add.s */
1298 LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1300 gen_helper_float_add_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1303 case 0x01: /* lf.sub.s */
1304 LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1306 gen_helper_float_sub_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1309 case 0x02: /* lf.mul.s */
1310 LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1312 gen_helper_float_mul_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1315 case 0x03: /* lf.div.s */
1316 LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1318 gen_helper_float_div_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1321 case 0x04: /* lf.itof.s */
1322 LOG_DIS("lf.itof r%d, r%d\n", rd
, ra
);
1324 gen_helper_itofs(cpu_R
[rd
], cpu_env
, cpu_R
[ra
]);
1327 case 0x05: /* lf.ftoi.s */
1328 LOG_DIS("lf.ftoi r%d, r%d\n", rd
, ra
);
1330 gen_helper_ftois(cpu_R
[rd
], cpu_env
, cpu_R
[ra
]);
1333 case 0x06: /* lf.rem.s */
1334 LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1336 gen_helper_float_rem_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1339 case 0x07: /* lf.madd.s */
1340 LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1342 gen_helper_float_madd_s(cpu_R
[rd
], cpu_env
, cpu_R
[rd
],
1343 cpu_R
[ra
], cpu_R
[rb
]);
1346 case 0x08: /* lf.sfeq.s */
1347 LOG_DIS("lf.sfeq.s r%d, r%d\n", ra
, rb
);
1348 gen_helper_float_eq_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1351 case 0x09: /* lf.sfne.s */
1352 LOG_DIS("lf.sfne.s r%d, r%d\n", ra
, rb
);
1353 gen_helper_float_ne_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1356 case 0x0a: /* lf.sfgt.s */
1357 LOG_DIS("lf.sfgt.s r%d, r%d\n", ra
, rb
);
1358 gen_helper_float_gt_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1361 case 0x0b: /* lf.sfge.s */
1362 LOG_DIS("lf.sfge.s r%d, r%d\n", ra
, rb
);
1363 gen_helper_float_ge_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1366 case 0x0c: /* lf.sflt.s */
1367 LOG_DIS("lf.sflt.s r%d, r%d\n", ra
, rb
);
1368 gen_helper_float_lt_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1371 case 0x0d: /* lf.sfle.s */
1372 LOG_DIS("lf.sfle.s r%d, r%d\n", ra
, rb
);
1373 gen_helper_float_le_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1376 /* not used yet, open it when we need or64. */
1377 /*#ifdef TARGET_OPENRISC64
1379 LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb);
1382 gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1386 LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb);
1389 gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1393 LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb);
1396 gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1400 LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb);
1403 gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1406 case 0x14: lf.itof.d
1407 LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
1410 gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]);
1413 case 0x15: lf.ftoi.d
1414 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
1417 gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]);
1421 LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb);
1424 gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1427 case 0x17: lf.madd.d
1428 LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb);
1431 gen_helper_float_madd_d(cpu_R[rd], cpu_env, cpu_R[rd],
1432 cpu_R[ra], cpu_R[rb]);
1435 case 0x18: lf.sfeq.d
1436 LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb);
1438 gen_helper_float_eq_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1441 case 0x1a: lf.sfgt.d
1442 LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb);
1444 gen_helper_float_gt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1447 case 0x1b: lf.sfge.d
1448 LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb);
1450 gen_helper_float_ge_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1453 case 0x19: lf.sfne.d
1454 LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb);
1456 gen_helper_float_ne_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1459 case 0x1c: lf.sflt.d
1460 LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb);
1462 gen_helper_float_lt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1465 case 0x1d: lf.sfle.d
1466 LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb);
1468 gen_helper_float_le_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1473 gen_illegal_exception(dc
);
1478 static void disas_openrisc_insn(DisasContext
*dc
, OpenRISCCPU
*cpu
)
1482 insn
= cpu_ldl_code(&cpu
->env
, dc
->pc
);
1483 op0
= extract32(insn
, 26, 6);
1495 dec_logic(dc
, insn
);
1499 dec_compi(dc
, insn
);
1507 dec_float(dc
, insn
);
1524 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
1526 CPUOpenRISCState
*env
= cs
->env_ptr
;
1527 OpenRISCCPU
*cpu
= openrisc_env_get_cpu(env
);
1528 struct DisasContext ctx
, *dc
= &ctx
;
1530 uint32_t next_page_start
;
1537 dc
->is_jmp
= DISAS_NEXT
;
1539 dc
->mem_idx
= cpu_mmu_index(&cpu
->env
, false);
1540 dc
->tb_flags
= tb
->flags
;
1541 dc
->delayed_branch
= (dc
->tb_flags
& TB_FLAGS_DFLAG
) != 0;
1542 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1544 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1546 max_insns
= tb_cflags(tb
) & CF_COUNT_MASK
;
1548 if (max_insns
== 0) {
1549 max_insns
= CF_COUNT_MASK
;
1551 if (max_insns
> TCG_MAX_INSNS
) {
1552 max_insns
= TCG_MAX_INSNS
;
1555 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1556 && qemu_log_in_addr_range(pc_start
)) {
1558 qemu_log("----------------\n");
1559 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
1564 /* Allow the TCG optimizer to see that R0 == 0,
1565 when it's true, which is the common case. */
1566 if (dc
->tb_flags
& TB_FLAGS_R0_0
) {
1567 cpu_R
[0] = tcg_const_tl(0);
1573 tcg_gen_insn_start(dc
->pc
, (dc
->delayed_branch
? 1 : 0)
1574 | (num_insns
? 2 : 0));
1577 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1578 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1579 gen_exception(dc
, EXCP_DEBUG
);
1580 dc
->is_jmp
= DISAS_UPDATE
;
1581 /* The address covered by the breakpoint must be included in
1582 [tb->pc, tb->pc + tb->size) in order to for it to be
1583 properly cleared -- thus we increment the PC here so that
1584 the logic setting tb->size below does the right thing. */
1589 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
1592 disas_openrisc_insn(dc
, cpu
);
1593 dc
->pc
= dc
->pc
+ 4;
1596 if (dc
->delayed_branch
) {
1597 dc
->delayed_branch
--;
1598 if (!dc
->delayed_branch
) {
1599 tcg_gen_mov_tl(cpu_pc
, jmp_pc
);
1600 tcg_gen_discard_tl(jmp_pc
);
1601 dc
->is_jmp
= DISAS_UPDATE
;
1605 } while (!dc
->is_jmp
1606 && !tcg_op_buf_full()
1607 && !cs
->singlestep_enabled
1609 && (dc
->pc
< next_page_start
)
1610 && num_insns
< max_insns
);
1612 if (tb_cflags(tb
) & CF_LAST_IO
) {
1616 if ((dc
->tb_flags
& TB_FLAGS_DFLAG
? 1 : 0) != (dc
->delayed_branch
!= 0)) {
1617 tcg_gen_movi_i32(cpu_dflag
, dc
->delayed_branch
!= 0);
1620 tcg_gen_movi_tl(cpu_ppc
, dc
->pc
- 4);
1621 if (dc
->is_jmp
== DISAS_NEXT
) {
1622 dc
->is_jmp
= DISAS_UPDATE
;
1623 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1625 if (unlikely(cs
->singlestep_enabled
)) {
1626 gen_exception(dc
, EXCP_DEBUG
);
1628 switch (dc
->is_jmp
) {
1630 gen_goto_tb(dc
, 0, dc
->pc
);
1636 /* indicate that the hash table must be used
1637 to find the next TB */
1641 /* nothing more to generate */
1646 gen_tb_end(tb
, num_insns
);
1648 tb
->size
= dc
->pc
- pc_start
;
1649 tb
->icount
= num_insns
;
1651 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1652 && qemu_log_in_addr_range(pc_start
)) {
1653 log_target_disas(cs
, pc_start
, tb
->size
, 0);
1659 void openrisc_cpu_dump_state(CPUState
*cs
, FILE *f
,
1660 fprintf_function cpu_fprintf
,
1663 OpenRISCCPU
*cpu
= OPENRISC_CPU(cs
);
1664 CPUOpenRISCState
*env
= &cpu
->env
;
1667 cpu_fprintf(f
, "PC=%08x\n", env
->pc
);
1668 for (i
= 0; i
< 32; ++i
) {
1669 cpu_fprintf(f
, "R%02d=%08x%c", i
, cpu_get_gpr(env
, i
),
1670 (i
% 4) == 3 ? '\n' : ' ');
1674 void restore_state_to_opc(CPUOpenRISCState
*env
, TranslationBlock
*tb
,
1678 env
->dflag
= data
[1] & 1;
1680 env
->ppc
= env
->pc
- 4;