4 * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
5 * Feng Gao <gf91597@gmail.com>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "disas/disas.h"
26 #include "qemu-common.h"
28 #include "qemu/bitops.h"
29 #include "exec/cpu_ldst.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
34 #include "trace-tcg.h"
37 #define LOG_DIS(str, ...) \
38 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->pc, ## __VA_ARGS__)
40 typedef struct DisasContext
{
46 uint32_t delayed_branch
;
47 bool singlestep_enabled
;
50 static TCGv_env cpu_env
;
52 static TCGv cpu_R
[32];
55 static TCGv jmp_pc
; /* l.jr/l.jalr temp pc */
57 static TCGv cpu_sr_f
; /* bf/bnf, F flag taken */
58 static TCGv cpu_sr_cy
; /* carry (unsigned overflow) */
59 static TCGv cpu_sr_ov
; /* signed overflow */
60 static TCGv cpu_lock_addr
;
61 static TCGv cpu_lock_value
;
62 static TCGv_i32 fpcsr
;
63 static TCGv_i64 cpu_mac
; /* MACHI:MACLO */
64 static TCGv_i32 cpu_dflag
;
65 #include "exec/gen-icount.h"
67 void openrisc_translate_init(void)
69 static const char * const regnames
[] = {
70 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
71 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
72 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
73 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
77 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
78 tcg_ctx
.tcg_env
= cpu_env
;
79 cpu_sr
= tcg_global_mem_new(cpu_env
,
80 offsetof(CPUOpenRISCState
, sr
), "sr");
81 cpu_dflag
= tcg_global_mem_new_i32(cpu_env
,
82 offsetof(CPUOpenRISCState
, dflag
),
84 cpu_pc
= tcg_global_mem_new(cpu_env
,
85 offsetof(CPUOpenRISCState
, pc
), "pc");
86 cpu_ppc
= tcg_global_mem_new(cpu_env
,
87 offsetof(CPUOpenRISCState
, ppc
), "ppc");
88 jmp_pc
= tcg_global_mem_new(cpu_env
,
89 offsetof(CPUOpenRISCState
, jmp_pc
), "jmp_pc");
90 cpu_sr_f
= tcg_global_mem_new(cpu_env
,
91 offsetof(CPUOpenRISCState
, sr_f
), "sr_f");
92 cpu_sr_cy
= tcg_global_mem_new(cpu_env
,
93 offsetof(CPUOpenRISCState
, sr_cy
), "sr_cy");
94 cpu_sr_ov
= tcg_global_mem_new(cpu_env
,
95 offsetof(CPUOpenRISCState
, sr_ov
), "sr_ov");
96 cpu_lock_addr
= tcg_global_mem_new(cpu_env
,
97 offsetof(CPUOpenRISCState
, lock_addr
),
99 cpu_lock_value
= tcg_global_mem_new(cpu_env
,
100 offsetof(CPUOpenRISCState
, lock_value
),
102 fpcsr
= tcg_global_mem_new_i32(cpu_env
,
103 offsetof(CPUOpenRISCState
, fpcsr
),
105 cpu_mac
= tcg_global_mem_new_i64(cpu_env
,
106 offsetof(CPUOpenRISCState
, mac
),
108 for (i
= 0; i
< 32; i
++) {
109 cpu_R
[i
] = tcg_global_mem_new(cpu_env
,
110 offsetof(CPUOpenRISCState
, gpr
[i
]),
116 static void gen_exception(DisasContext
*dc
, unsigned int excp
)
118 TCGv_i32 tmp
= tcg_const_i32(excp
);
119 gen_helper_exception(cpu_env
, tmp
);
120 tcg_temp_free_i32(tmp
);
123 static void gen_illegal_exception(DisasContext
*dc
)
125 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
126 gen_exception(dc
, EXCP_ILLEGAL
);
127 dc
->is_jmp
= DISAS_UPDATE
;
130 /* not used yet, open it when we need or64. */
131 /*#ifdef TARGET_OPENRISC64
132 static void check_ob64s(DisasContext *dc)
134 if (!(dc->flags & CPUCFGR_OB64S)) {
135 gen_illegal_exception(dc);
139 static void check_of64s(DisasContext *dc)
141 if (!(dc->flags & CPUCFGR_OF64S)) {
142 gen_illegal_exception(dc);
146 static void check_ov64s(DisasContext *dc)
148 if (!(dc->flags & CPUCFGR_OV64S)) {
149 gen_illegal_exception(dc);
154 /* We're about to write to REG. On the off-chance that the user is
155 writing to R0, re-instate the architectural register. */
156 #define check_r0_write(reg) \
158 if (unlikely(reg == 0)) { \
163 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
165 if (unlikely(dc
->singlestep_enabled
)) {
169 #ifndef CONFIG_USER_ONLY
170 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
176 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
178 if (use_goto_tb(dc
, dest
)) {
179 tcg_gen_movi_tl(cpu_pc
, dest
);
181 tcg_gen_exit_tb((uintptr_t)dc
->tb
+ n
);
183 tcg_gen_movi_tl(cpu_pc
, dest
);
184 if (dc
->singlestep_enabled
) {
185 gen_exception(dc
, EXCP_DEBUG
);
191 static void gen_jump(DisasContext
*dc
, int32_t n26
, uint32_t reg
, uint32_t op0
)
193 target_ulong tmp_pc
= dc
->pc
+ n26
* 4;
197 tcg_gen_movi_tl(jmp_pc
, tmp_pc
);
199 case 0x01: /* l.jal */
200 tcg_gen_movi_tl(cpu_R
[9], dc
->pc
+ 8);
201 /* Optimize jal being used to load the PC for PIC. */
202 if (tmp_pc
== dc
->pc
+ 8) {
205 tcg_gen_movi_tl(jmp_pc
, tmp_pc
);
207 case 0x03: /* l.bnf */
208 case 0x04: /* l.bf */
210 TCGv t_next
= tcg_const_tl(dc
->pc
+ 8);
211 TCGv t_true
= tcg_const_tl(tmp_pc
);
212 TCGv t_zero
= tcg_const_tl(0);
214 tcg_gen_movcond_tl(op0
== 0x03 ? TCG_COND_EQ
: TCG_COND_NE
,
215 jmp_pc
, cpu_sr_f
, t_zero
, t_true
, t_next
);
217 tcg_temp_free(t_next
);
218 tcg_temp_free(t_true
);
219 tcg_temp_free(t_zero
);
222 case 0x11: /* l.jr */
223 tcg_gen_mov_tl(jmp_pc
, cpu_R
[reg
]);
225 case 0x12: /* l.jalr */
226 tcg_gen_movi_tl(cpu_R
[9], (dc
->pc
+ 8));
227 tcg_gen_mov_tl(jmp_pc
, cpu_R
[reg
]);
230 gen_illegal_exception(dc
);
234 dc
->delayed_branch
= 2;
237 static void gen_ove_cy(DisasContext
*dc
)
239 if (dc
->tb_flags
& SR_OVE
) {
240 gen_helper_ove_cy(cpu_env
);
244 static void gen_ove_ov(DisasContext
*dc
)
246 if (dc
->tb_flags
& SR_OVE
) {
247 gen_helper_ove_ov(cpu_env
);
251 static void gen_ove_cyov(DisasContext
*dc
)
253 if (dc
->tb_flags
& SR_OVE
) {
254 gen_helper_ove_cyov(cpu_env
);
258 static void gen_add(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
260 TCGv t0
= tcg_const_tl(0);
261 TCGv res
= tcg_temp_new();
263 tcg_gen_add2_tl(res
, cpu_sr_cy
, srca
, t0
, srcb
, t0
);
264 tcg_gen_xor_tl(cpu_sr_ov
, srca
, srcb
);
265 tcg_gen_xor_tl(t0
, res
, srcb
);
266 tcg_gen_andc_tl(cpu_sr_ov
, t0
, cpu_sr_ov
);
269 tcg_gen_mov_tl(dest
, res
);
275 static void gen_addc(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
277 TCGv t0
= tcg_const_tl(0);
278 TCGv res
= tcg_temp_new();
280 tcg_gen_add2_tl(res
, cpu_sr_cy
, srca
, t0
, cpu_sr_cy
, t0
);
281 tcg_gen_add2_tl(res
, cpu_sr_cy
, res
, cpu_sr_cy
, srcb
, t0
);
282 tcg_gen_xor_tl(cpu_sr_ov
, srca
, srcb
);
283 tcg_gen_xor_tl(t0
, res
, srcb
);
284 tcg_gen_andc_tl(cpu_sr_ov
, t0
, cpu_sr_ov
);
287 tcg_gen_mov_tl(dest
, res
);
293 static void gen_sub(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
295 TCGv res
= tcg_temp_new();
297 tcg_gen_sub_tl(res
, srca
, srcb
);
298 tcg_gen_xor_tl(cpu_sr_cy
, srca
, srcb
);
299 tcg_gen_xor_tl(cpu_sr_ov
, res
, srcb
);
300 tcg_gen_and_tl(cpu_sr_ov
, cpu_sr_ov
, cpu_sr_cy
);
301 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_sr_cy
, srca
, srcb
);
303 tcg_gen_mov_tl(dest
, res
);
309 static void gen_mul(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
311 TCGv t0
= tcg_temp_new();
313 tcg_gen_muls2_tl(dest
, cpu_sr_ov
, srca
, srcb
);
314 tcg_gen_sari_tl(t0
, dest
, TARGET_LONG_BITS
- 1);
315 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_sr_ov
, cpu_sr_ov
, t0
);
318 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
322 static void gen_mulu(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
324 tcg_gen_muls2_tl(dest
, cpu_sr_cy
, srca
, srcb
);
325 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_sr_cy
, cpu_sr_cy
, 0);
330 static void gen_div(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
332 TCGv t0
= tcg_temp_new();
334 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_ov
, srcb
, 0);
335 /* The result of divide-by-zero is undefined.
336 Supress the host-side exception by dividing by 1. */
337 tcg_gen_or_tl(t0
, srcb
, cpu_sr_ov
);
338 tcg_gen_div_tl(dest
, srca
, t0
);
341 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
345 static void gen_divu(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
347 TCGv t0
= tcg_temp_new();
349 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_cy
, srcb
, 0);
350 /* The result of divide-by-zero is undefined.
351 Supress the host-side exception by dividing by 1. */
352 tcg_gen_or_tl(t0
, srcb
, cpu_sr_cy
);
353 tcg_gen_divu_tl(dest
, srca
, t0
);
359 static void gen_muld(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
361 TCGv_i64 t1
= tcg_temp_new_i64();
362 TCGv_i64 t2
= tcg_temp_new_i64();
364 tcg_gen_ext_tl_i64(t1
, srca
);
365 tcg_gen_ext_tl_i64(t2
, srcb
);
366 if (TARGET_LONG_BITS
== 32) {
367 tcg_gen_mul_i64(cpu_mac
, t1
, t2
);
368 tcg_gen_movi_tl(cpu_sr_ov
, 0);
370 TCGv_i64 high
= tcg_temp_new_i64();
372 tcg_gen_muls2_i64(cpu_mac
, high
, t1
, t2
);
373 tcg_gen_sari_i64(t1
, cpu_mac
, 63);
374 tcg_gen_setcond_i64(TCG_COND_NE
, t1
, t1
, high
);
375 tcg_temp_free_i64(high
);
376 tcg_gen_trunc_i64_tl(cpu_sr_ov
, t1
);
377 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
381 tcg_temp_free_i64(t1
);
382 tcg_temp_free_i64(t2
);
385 static void gen_muldu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
387 TCGv_i64 t1
= tcg_temp_new_i64();
388 TCGv_i64 t2
= tcg_temp_new_i64();
390 tcg_gen_extu_tl_i64(t1
, srca
);
391 tcg_gen_extu_tl_i64(t2
, srcb
);
392 if (TARGET_LONG_BITS
== 32) {
393 tcg_gen_mul_i64(cpu_mac
, t1
, t2
);
394 tcg_gen_movi_tl(cpu_sr_cy
, 0);
396 TCGv_i64 high
= tcg_temp_new_i64();
398 tcg_gen_mulu2_i64(cpu_mac
, high
, t1
, t2
);
399 tcg_gen_setcondi_i64(TCG_COND_NE
, high
, high
, 0);
400 tcg_gen_trunc_i64_tl(cpu_sr_cy
, high
);
401 tcg_temp_free_i64(high
);
405 tcg_temp_free_i64(t1
);
406 tcg_temp_free_i64(t2
);
409 static void gen_mac(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
411 TCGv_i64 t1
= tcg_temp_new_i64();
412 TCGv_i64 t2
= tcg_temp_new_i64();
414 tcg_gen_ext_tl_i64(t1
, srca
);
415 tcg_gen_ext_tl_i64(t2
, srcb
);
416 tcg_gen_mul_i64(t1
, t1
, t2
);
418 /* Note that overflow is only computed during addition stage. */
419 tcg_gen_xor_i64(t2
, cpu_mac
, t1
);
420 tcg_gen_add_i64(cpu_mac
, cpu_mac
, t1
);
421 tcg_gen_xor_i64(t1
, t1
, cpu_mac
);
422 tcg_gen_andc_i64(t1
, t1
, t2
);
423 tcg_temp_free_i64(t2
);
425 #if TARGET_LONG_BITS == 32
426 tcg_gen_extrh_i64_i32(cpu_sr_ov
, t1
);
428 tcg_gen_mov_i64(cpu_sr_ov
, t1
);
430 tcg_temp_free_i64(t1
);
435 static void gen_macu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
437 TCGv_i64 t1
= tcg_temp_new_i64();
438 TCGv_i64 t2
= tcg_temp_new_i64();
440 tcg_gen_extu_tl_i64(t1
, srca
);
441 tcg_gen_extu_tl_i64(t2
, srcb
);
442 tcg_gen_mul_i64(t1
, t1
, t2
);
443 tcg_temp_free_i64(t2
);
445 /* Note that overflow is only computed during addition stage. */
446 tcg_gen_add_i64(cpu_mac
, cpu_mac
, t1
);
447 tcg_gen_setcond_i64(TCG_COND_LTU
, t1
, cpu_mac
, t1
);
448 tcg_gen_trunc_i64_tl(cpu_sr_cy
, t1
);
449 tcg_temp_free_i64(t1
);
454 static void gen_msb(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
456 TCGv_i64 t1
= tcg_temp_new_i64();
457 TCGv_i64 t2
= tcg_temp_new_i64();
459 tcg_gen_ext_tl_i64(t1
, srca
);
460 tcg_gen_ext_tl_i64(t2
, srcb
);
461 tcg_gen_mul_i64(t1
, t1
, t2
);
463 /* Note that overflow is only computed during subtraction stage. */
464 tcg_gen_xor_i64(t2
, cpu_mac
, t1
);
465 tcg_gen_sub_i64(cpu_mac
, cpu_mac
, t1
);
466 tcg_gen_xor_i64(t1
, t1
, cpu_mac
);
467 tcg_gen_and_i64(t1
, t1
, t2
);
468 tcg_temp_free_i64(t2
);
470 #if TARGET_LONG_BITS == 32
471 tcg_gen_extrh_i64_i32(cpu_sr_ov
, t1
);
473 tcg_gen_mov_i64(cpu_sr_ov
, t1
);
475 tcg_temp_free_i64(t1
);
480 static void gen_msbu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
482 TCGv_i64 t1
= tcg_temp_new_i64();
483 TCGv_i64 t2
= tcg_temp_new_i64();
485 tcg_gen_extu_tl_i64(t1
, srca
);
486 tcg_gen_extu_tl_i64(t2
, srcb
);
487 tcg_gen_mul_i64(t1
, t1
, t2
);
489 /* Note that overflow is only computed during subtraction stage. */
490 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, cpu_mac
, t1
);
491 tcg_gen_sub_i64(cpu_mac
, cpu_mac
, t1
);
492 tcg_gen_trunc_i64_tl(cpu_sr_cy
, t2
);
493 tcg_temp_free_i64(t2
);
494 tcg_temp_free_i64(t1
);
499 static void gen_lwa(DisasContext
*dc
, TCGv rd
, TCGv ra
, int32_t ofs
)
501 TCGv ea
= tcg_temp_new();
503 tcg_gen_addi_tl(ea
, ra
, ofs
);
504 tcg_gen_qemu_ld_tl(rd
, ea
, dc
->mem_idx
, MO_TEUL
);
505 tcg_gen_mov_tl(cpu_lock_addr
, ea
);
506 tcg_gen_mov_tl(cpu_lock_value
, rd
);
510 static void gen_swa(DisasContext
*dc
, int b
, TCGv ra
, int32_t ofs
)
513 TCGLabel
*lab_fail
, *lab_done
;
516 tcg_gen_addi_tl(ea
, ra
, ofs
);
518 /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned
519 to cpu_R[0]. Since l.swa is quite often immediately followed by a
520 branch, don't bother reallocating; finish the TB using the "real" R0.
521 This also takes care of RB input across the branch. */
524 lab_fail
= gen_new_label();
525 lab_done
= gen_new_label();
526 tcg_gen_brcond_tl(TCG_COND_NE
, ea
, cpu_lock_addr
, lab_fail
);
529 val
= tcg_temp_new();
530 tcg_gen_atomic_cmpxchg_tl(val
, cpu_lock_addr
, cpu_lock_value
,
531 cpu_R
[b
], dc
->mem_idx
, MO_TEUL
);
532 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_sr_f
, val
, cpu_lock_value
);
535 tcg_gen_br(lab_done
);
537 gen_set_label(lab_fail
);
538 tcg_gen_movi_tl(cpu_sr_f
, 0);
540 gen_set_label(lab_done
);
541 tcg_gen_movi_tl(cpu_lock_addr
, -1);
544 static void dec_calc(DisasContext
*dc
, uint32_t insn
)
546 uint32_t op0
, op1
, op2
;
548 op0
= extract32(insn
, 0, 4);
549 op1
= extract32(insn
, 8, 2);
550 op2
= extract32(insn
, 6, 2);
551 ra
= extract32(insn
, 16, 5);
552 rb
= extract32(insn
, 11, 5);
553 rd
= extract32(insn
, 21, 5);
558 case 0x0: /* l.add */
559 LOG_DIS("l.add r%d, r%d, r%d\n", rd
, ra
, rb
);
560 gen_add(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
563 case 0x1: /* l.addc */
564 LOG_DIS("l.addc r%d, r%d, r%d\n", rd
, ra
, rb
);
565 gen_addc(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
568 case 0x2: /* l.sub */
569 LOG_DIS("l.sub r%d, r%d, r%d\n", rd
, ra
, rb
);
570 gen_sub(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
573 case 0x3: /* l.and */
574 LOG_DIS("l.and r%d, r%d, r%d\n", rd
, ra
, rb
);
575 tcg_gen_and_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
579 LOG_DIS("l.or r%d, r%d, r%d\n", rd
, ra
, rb
);
580 tcg_gen_or_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
583 case 0x5: /* l.xor */
584 LOG_DIS("l.xor r%d, r%d, r%d\n", rd
, ra
, rb
);
585 tcg_gen_xor_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
591 LOG_DIS("l.sll r%d, r%d, r%d\n", rd
, ra
, rb
);
592 tcg_gen_shl_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
595 LOG_DIS("l.srl r%d, r%d, r%d\n", rd
, ra
, rb
);
596 tcg_gen_shr_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
599 LOG_DIS("l.sra r%d, r%d, r%d\n", rd
, ra
, rb
);
600 tcg_gen_sar_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
603 LOG_DIS("l.ror r%d, r%d, r%d\n", rd
, ra
, rb
);
604 tcg_gen_rotr_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
611 case 0: /* l.exths */
612 LOG_DIS("l.exths r%d, r%d\n", rd
, ra
);
613 tcg_gen_ext16s_tl(cpu_R
[rd
], cpu_R
[ra
]);
615 case 1: /* l.extbs */
616 LOG_DIS("l.extbs r%d, r%d\n", rd
, ra
);
617 tcg_gen_ext8s_tl(cpu_R
[rd
], cpu_R
[ra
]);
619 case 2: /* l.exthz */
620 LOG_DIS("l.exthz r%d, r%d\n", rd
, ra
);
621 tcg_gen_ext16u_tl(cpu_R
[rd
], cpu_R
[ra
]);
623 case 3: /* l.extbz */
624 LOG_DIS("l.extbz r%d, r%d\n", rd
, ra
);
625 tcg_gen_ext8u_tl(cpu_R
[rd
], cpu_R
[ra
]);
632 case 0: /* l.extws */
633 LOG_DIS("l.extws r%d, r%d\n", rd
, ra
);
634 tcg_gen_ext32s_tl(cpu_R
[rd
], cpu_R
[ra
]);
636 case 1: /* l.extwz */
637 LOG_DIS("l.extwz r%d, r%d\n", rd
, ra
);
638 tcg_gen_ext32u_tl(cpu_R
[rd
], cpu_R
[ra
]);
643 case 0xe: /* l.cmov */
644 LOG_DIS("l.cmov r%d, r%d, r%d\n", rd
, ra
, rb
);
646 TCGv zero
= tcg_const_tl(0);
647 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_R
[rd
], cpu_sr_f
, zero
,
648 cpu_R
[ra
], cpu_R
[rb
]);
653 case 0xf: /* l.ff1 */
654 LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd
, ra
, rb
);
655 tcg_gen_ctzi_tl(cpu_R
[rd
], cpu_R
[ra
], -1);
656 tcg_gen_addi_tl(cpu_R
[rd
], cpu_R
[rd
], 1);
663 case 0xf: /* l.fl1 */
664 LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd
, ra
, rb
);
665 tcg_gen_clzi_tl(cpu_R
[rd
], cpu_R
[ra
], TARGET_LONG_BITS
);
666 tcg_gen_subfi_tl(cpu_R
[rd
], TARGET_LONG_BITS
, cpu_R
[rd
]);
676 case 0x6: /* l.mul */
677 LOG_DIS("l.mul r%d, r%d, r%d\n", rd
, ra
, rb
);
678 gen_mul(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
681 case 0x7: /* l.muld */
682 LOG_DIS("l.muld r%d, r%d\n", ra
, rb
);
683 gen_muld(dc
, cpu_R
[ra
], cpu_R
[rb
]);
686 case 0x9: /* l.div */
687 LOG_DIS("l.div r%d, r%d, r%d\n", rd
, ra
, rb
);
688 gen_div(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
691 case 0xa: /* l.divu */
692 LOG_DIS("l.divu r%d, r%d, r%d\n", rd
, ra
, rb
);
693 gen_divu(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
696 case 0xb: /* l.mulu */
697 LOG_DIS("l.mulu r%d, r%d, r%d\n", rd
, ra
, rb
);
698 gen_mulu(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
701 case 0xc: /* l.muldu */
702 LOG_DIS("l.muldu r%d, r%d\n", ra
, rb
);
703 gen_muldu(dc
, cpu_R
[ra
], cpu_R
[rb
]);
708 gen_illegal_exception(dc
);
711 static void dec_misc(DisasContext
*dc
, uint32_t insn
)
715 uint32_t L6
, K5
, K16
, K5_11
;
716 int32_t I16
, I5_11
, N26
;
720 op0
= extract32(insn
, 26, 6);
721 op1
= extract32(insn
, 24, 2);
722 ra
= extract32(insn
, 16, 5);
723 rb
= extract32(insn
, 11, 5);
724 rd
= extract32(insn
, 21, 5);
725 L6
= extract32(insn
, 5, 6);
726 K5
= extract32(insn
, 0, 5);
727 K16
= extract32(insn
, 0, 16);
729 N26
= sextract32(insn
, 0, 26);
730 K5_11
= (extract32(insn
, 21, 5) << 11) | extract32(insn
, 0, 11);
731 I5_11
= (int16_t)K5_11
;
735 LOG_DIS("l.j %d\n", N26
);
736 gen_jump(dc
, N26
, 0, op0
);
739 case 0x01: /* l.jal */
740 LOG_DIS("l.jal %d\n", N26
);
741 gen_jump(dc
, N26
, 0, op0
);
744 case 0x03: /* l.bnf */
745 LOG_DIS("l.bnf %d\n", N26
);
746 gen_jump(dc
, N26
, 0, op0
);
749 case 0x04: /* l.bf */
750 LOG_DIS("l.bf %d\n", N26
);
751 gen_jump(dc
, N26
, 0, op0
);
756 case 0x01: /* l.nop */
757 LOG_DIS("l.nop %d\n", I16
);
761 gen_illegal_exception(dc
);
766 case 0x11: /* l.jr */
767 LOG_DIS("l.jr r%d\n", rb
);
768 gen_jump(dc
, 0, rb
, op0
);
771 case 0x12: /* l.jalr */
772 LOG_DIS("l.jalr r%d\n", rb
);
773 gen_jump(dc
, 0, rb
, op0
);
776 case 0x13: /* l.maci */
777 LOG_DIS("l.maci r%d, %d\n", ra
, I16
);
778 t0
= tcg_const_tl(I16
);
779 gen_mac(dc
, cpu_R
[ra
], t0
);
783 case 0x09: /* l.rfe */
786 #if defined(CONFIG_USER_ONLY)
789 if (dc
->mem_idx
== MMU_USER_IDX
) {
790 gen_illegal_exception(dc
);
793 gen_helper_rfe(cpu_env
);
794 dc
->is_jmp
= DISAS_UPDATE
;
799 case 0x1b: /* l.lwa */
800 LOG_DIS("l.lwa r%d, r%d, %d\n", rd
, ra
, I16
);
802 gen_lwa(dc
, cpu_R
[rd
], cpu_R
[ra
], I16
);
805 case 0x1c: /* l.cust1 */
806 LOG_DIS("l.cust1\n");
809 case 0x1d: /* l.cust2 */
810 LOG_DIS("l.cust2\n");
813 case 0x1e: /* l.cust3 */
814 LOG_DIS("l.cust3\n");
817 case 0x1f: /* l.cust4 */
818 LOG_DIS("l.cust4\n");
821 case 0x3c: /* l.cust5 */
822 LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd
, ra
, rb
, L6
, K5
);
825 case 0x3d: /* l.cust6 */
826 LOG_DIS("l.cust6\n");
829 case 0x3e: /* l.cust7 */
830 LOG_DIS("l.cust7\n");
833 case 0x3f: /* l.cust8 */
834 LOG_DIS("l.cust8\n");
837 /* not used yet, open it when we need or64. */
838 /*#ifdef TARGET_OPENRISC64
840 LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16);
846 case 0x21: /* l.lwz */
847 LOG_DIS("l.lwz r%d, r%d, %d\n", rd
, ra
, I16
);
851 case 0x22: /* l.lws */
852 LOG_DIS("l.lws r%d, r%d, %d\n", rd
, ra
, I16
);
856 case 0x23: /* l.lbz */
857 LOG_DIS("l.lbz r%d, r%d, %d\n", rd
, ra
, I16
);
861 case 0x24: /* l.lbs */
862 LOG_DIS("l.lbs r%d, r%d, %d\n", rd
, ra
, I16
);
866 case 0x25: /* l.lhz */
867 LOG_DIS("l.lhz r%d, r%d, %d\n", rd
, ra
, I16
);
871 case 0x26: /* l.lhs */
872 LOG_DIS("l.lhs r%d, r%d, %d\n", rd
, ra
, I16
);
879 tcg_gen_addi_tl(t0
, cpu_R
[ra
], I16
);
880 tcg_gen_qemu_ld_tl(cpu_R
[rd
], t0
, dc
->mem_idx
, mop
);
884 case 0x27: /* l.addi */
885 LOG_DIS("l.addi r%d, r%d, %d\n", rd
, ra
, I16
);
887 t0
= tcg_const_tl(I16
);
888 gen_add(dc
, cpu_R
[rd
], cpu_R
[ra
], t0
);
892 case 0x28: /* l.addic */
893 LOG_DIS("l.addic r%d, r%d, %d\n", rd
, ra
, I16
);
895 t0
= tcg_const_tl(I16
);
896 gen_addc(dc
, cpu_R
[rd
], cpu_R
[ra
], t0
);
900 case 0x29: /* l.andi */
901 LOG_DIS("l.andi r%d, r%d, %d\n", rd
, ra
, K16
);
903 tcg_gen_andi_tl(cpu_R
[rd
], cpu_R
[ra
], K16
);
906 case 0x2a: /* l.ori */
907 LOG_DIS("l.ori r%d, r%d, %d\n", rd
, ra
, K16
);
909 tcg_gen_ori_tl(cpu_R
[rd
], cpu_R
[ra
], K16
);
912 case 0x2b: /* l.xori */
913 LOG_DIS("l.xori r%d, r%d, %d\n", rd
, ra
, I16
);
915 tcg_gen_xori_tl(cpu_R
[rd
], cpu_R
[ra
], I16
);
918 case 0x2c: /* l.muli */
919 LOG_DIS("l.muli r%d, r%d, %d\n", rd
, ra
, I16
);
921 t0
= tcg_const_tl(I16
);
922 gen_mul(dc
, cpu_R
[rd
], cpu_R
[ra
], t0
);
926 case 0x2d: /* l.mfspr */
927 LOG_DIS("l.mfspr r%d, r%d, %d\n", rd
, ra
, K16
);
930 #if defined(CONFIG_USER_ONLY)
933 TCGv_i32 ti
= tcg_const_i32(K16
);
934 if (dc
->mem_idx
== MMU_USER_IDX
) {
935 gen_illegal_exception(dc
);
938 gen_helper_mfspr(cpu_R
[rd
], cpu_env
, cpu_R
[rd
], cpu_R
[ra
], ti
);
939 tcg_temp_free_i32(ti
);
944 case 0x30: /* l.mtspr */
945 LOG_DIS("l.mtspr r%d, r%d, %d\n", ra
, rb
, K5_11
);
947 #if defined(CONFIG_USER_ONLY)
950 TCGv_i32 im
= tcg_const_i32(K5_11
);
951 if (dc
->mem_idx
== MMU_USER_IDX
) {
952 gen_illegal_exception(dc
);
955 gen_helper_mtspr(cpu_env
, cpu_R
[ra
], cpu_R
[rb
], im
);
956 tcg_temp_free_i32(im
);
961 case 0x33: /* l.swa */
962 LOG_DIS("l.swa r%d, r%d, %d\n", ra
, rb
, I5_11
);
963 gen_swa(dc
, rb
, cpu_R
[ra
], I5_11
);
966 /* not used yet, open it when we need or64. */
967 /*#ifdef TARGET_OPENRISC64
969 LOG_DIS("l.sd r%d, r%d, %d\n", ra, rb, I5_11);
975 case 0x35: /* l.sw */
976 LOG_DIS("l.sw r%d, r%d, %d\n", ra
, rb
, I5_11
);
980 case 0x36: /* l.sb */
981 LOG_DIS("l.sb r%d, r%d, %d\n", ra
, rb
, I5_11
);
985 case 0x37: /* l.sh */
986 LOG_DIS("l.sh r%d, r%d, %d\n", ra
, rb
, I5_11
);
992 TCGv t0
= tcg_temp_new();
993 tcg_gen_addi_tl(t0
, cpu_R
[ra
], I5_11
);
994 tcg_gen_qemu_st_tl(cpu_R
[rb
], t0
, dc
->mem_idx
, mop
);
1000 gen_illegal_exception(dc
);
1005 static void dec_mac(DisasContext
*dc
, uint32_t insn
)
1009 op0
= extract32(insn
, 0, 4);
1010 ra
= extract32(insn
, 16, 5);
1011 rb
= extract32(insn
, 11, 5);
1014 case 0x0001: /* l.mac */
1015 LOG_DIS("l.mac r%d, r%d\n", ra
, rb
);
1016 gen_mac(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1019 case 0x0002: /* l.msb */
1020 LOG_DIS("l.msb r%d, r%d\n", ra
, rb
);
1021 gen_msb(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1024 case 0x0003: /* l.macu */
1025 LOG_DIS("l.macu r%d, r%d\n", ra
, rb
);
1026 gen_macu(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1029 case 0x0004: /* l.msbu */
1030 LOG_DIS("l.msbu r%d, r%d\n", ra
, rb
);
1031 gen_msbu(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1035 gen_illegal_exception(dc
);
1040 static void dec_logic(DisasContext
*dc
, uint32_t insn
)
1043 uint32_t rd
, ra
, L6
, S6
;
1044 op0
= extract32(insn
, 6, 2);
1045 rd
= extract32(insn
, 21, 5);
1046 ra
= extract32(insn
, 16, 5);
1047 L6
= extract32(insn
, 0, 6);
1048 S6
= L6
& (TARGET_LONG_BITS
- 1);
1052 case 0x00: /* l.slli */
1053 LOG_DIS("l.slli r%d, r%d, %d\n", rd
, ra
, L6
);
1054 tcg_gen_shli_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1057 case 0x01: /* l.srli */
1058 LOG_DIS("l.srli r%d, r%d, %d\n", rd
, ra
, L6
);
1059 tcg_gen_shri_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1062 case 0x02: /* l.srai */
1063 LOG_DIS("l.srai r%d, r%d, %d\n", rd
, ra
, L6
);
1064 tcg_gen_sari_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1067 case 0x03: /* l.rori */
1068 LOG_DIS("l.rori r%d, r%d, %d\n", rd
, ra
, L6
);
1069 tcg_gen_rotri_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1073 gen_illegal_exception(dc
);
1078 static void dec_M(DisasContext
*dc
, uint32_t insn
)
1083 op0
= extract32(insn
, 16, 1);
1084 rd
= extract32(insn
, 21, 5);
1085 K16
= extract32(insn
, 0, 16);
1089 case 0x0: /* l.movhi */
1090 LOG_DIS("l.movhi r%d, %d\n", rd
, K16
);
1091 tcg_gen_movi_tl(cpu_R
[rd
], (K16
<< 16));
1094 case 0x1: /* l.macrc */
1095 LOG_DIS("l.macrc r%d\n", rd
);
1096 tcg_gen_trunc_i64_tl(cpu_R
[rd
], cpu_mac
);
1097 tcg_gen_movi_i64(cpu_mac
, 0);
1101 gen_illegal_exception(dc
);
1106 static void dec_comp(DisasContext
*dc
, uint32_t insn
)
1111 op0
= extract32(insn
, 21, 5);
1112 ra
= extract32(insn
, 16, 5);
1113 rb
= extract32(insn
, 11, 5);
1115 /* unsigned integers */
1116 tcg_gen_ext32u_tl(cpu_R
[ra
], cpu_R
[ra
]);
1117 tcg_gen_ext32u_tl(cpu_R
[rb
], cpu_R
[rb
]);
1120 case 0x0: /* l.sfeq */
1121 LOG_DIS("l.sfeq r%d, r%d\n", ra
, rb
);
1122 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1125 case 0x1: /* l.sfne */
1126 LOG_DIS("l.sfne r%d, r%d\n", ra
, rb
);
1127 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1130 case 0x2: /* l.sfgtu */
1131 LOG_DIS("l.sfgtu r%d, r%d\n", ra
, rb
);
1132 tcg_gen_setcond_tl(TCG_COND_GTU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1135 case 0x3: /* l.sfgeu */
1136 LOG_DIS("l.sfgeu r%d, r%d\n", ra
, rb
);
1137 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1140 case 0x4: /* l.sfltu */
1141 LOG_DIS("l.sfltu r%d, r%d\n", ra
, rb
);
1142 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1145 case 0x5: /* l.sfleu */
1146 LOG_DIS("l.sfleu r%d, r%d\n", ra
, rb
);
1147 tcg_gen_setcond_tl(TCG_COND_LEU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1150 case 0xa: /* l.sfgts */
1151 LOG_DIS("l.sfgts r%d, r%d\n", ra
, rb
);
1152 tcg_gen_setcond_tl(TCG_COND_GT
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1155 case 0xb: /* l.sfges */
1156 LOG_DIS("l.sfges r%d, r%d\n", ra
, rb
);
1157 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1160 case 0xc: /* l.sflts */
1161 LOG_DIS("l.sflts r%d, r%d\n", ra
, rb
);
1162 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1165 case 0xd: /* l.sfles */
1166 LOG_DIS("l.sfles r%d, r%d\n", ra
, rb
);
1167 tcg_gen_setcond_tl(TCG_COND_LE
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1171 gen_illegal_exception(dc
);
1176 static void dec_compi(DisasContext
*dc
, uint32_t insn
)
1181 op0
= extract32(insn
, 21, 5);
1182 ra
= extract32(insn
, 16, 5);
1183 I16
= sextract32(insn
, 0, 16);
1186 case 0x0: /* l.sfeqi */
1187 LOG_DIS("l.sfeqi r%d, %d\n", ra
, I16
);
1188 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_f
, cpu_R
[ra
], I16
);
1191 case 0x1: /* l.sfnei */
1192 LOG_DIS("l.sfnei r%d, %d\n", ra
, I16
);
1193 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_sr_f
, cpu_R
[ra
], I16
);
1196 case 0x2: /* l.sfgtui */
1197 LOG_DIS("l.sfgtui r%d, %d\n", ra
, I16
);
1198 tcg_gen_setcondi_tl(TCG_COND_GTU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1201 case 0x3: /* l.sfgeui */
1202 LOG_DIS("l.sfgeui r%d, %d\n", ra
, I16
);
1203 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1206 case 0x4: /* l.sfltui */
1207 LOG_DIS("l.sfltui r%d, %d\n", ra
, I16
);
1208 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1211 case 0x5: /* l.sfleui */
1212 LOG_DIS("l.sfleui r%d, %d\n", ra
, I16
);
1213 tcg_gen_setcondi_tl(TCG_COND_LEU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1216 case 0xa: /* l.sfgtsi */
1217 LOG_DIS("l.sfgtsi r%d, %d\n", ra
, I16
);
1218 tcg_gen_setcondi_tl(TCG_COND_GT
, cpu_sr_f
, cpu_R
[ra
], I16
);
1221 case 0xb: /* l.sfgesi */
1222 LOG_DIS("l.sfgesi r%d, %d\n", ra
, I16
);
1223 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_sr_f
, cpu_R
[ra
], I16
);
1226 case 0xc: /* l.sfltsi */
1227 LOG_DIS("l.sfltsi r%d, %d\n", ra
, I16
);
1228 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_sr_f
, cpu_R
[ra
], I16
);
1231 case 0xd: /* l.sflesi */
1232 LOG_DIS("l.sflesi r%d, %d\n", ra
, I16
);
1233 tcg_gen_setcondi_tl(TCG_COND_LE
, cpu_sr_f
, cpu_R
[ra
], I16
);
1237 gen_illegal_exception(dc
);
1242 static void dec_sys(DisasContext
*dc
, uint32_t insn
)
1247 op0
= extract32(insn
, 16, 10);
1248 K16
= extract32(insn
, 0, 16);
1251 case 0x000: /* l.sys */
1252 LOG_DIS("l.sys %d\n", K16
);
1253 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1254 gen_exception(dc
, EXCP_SYSCALL
);
1255 dc
->is_jmp
= DISAS_UPDATE
;
1258 case 0x100: /* l.trap */
1259 LOG_DIS("l.trap %d\n", K16
);
1260 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1261 gen_exception(dc
, EXCP_TRAP
);
1264 case 0x300: /* l.csync */
1265 LOG_DIS("l.csync\n");
1268 case 0x200: /* l.msync */
1269 LOG_DIS("l.msync\n");
1270 tcg_gen_mb(TCG_MO_ALL
);
1273 case 0x270: /* l.psync */
1274 LOG_DIS("l.psync\n");
1278 gen_illegal_exception(dc
);
1283 static void dec_float(DisasContext
*dc
, uint32_t insn
)
1286 uint32_t ra
, rb
, rd
;
1287 op0
= extract32(insn
, 0, 8);
1288 ra
= extract32(insn
, 16, 5);
1289 rb
= extract32(insn
, 11, 5);
1290 rd
= extract32(insn
, 21, 5);
1293 case 0x00: /* lf.add.s */
1294 LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1296 gen_helper_float_add_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1299 case 0x01: /* lf.sub.s */
1300 LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1302 gen_helper_float_sub_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1305 case 0x02: /* lf.mul.s */
1306 LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1308 gen_helper_float_mul_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1311 case 0x03: /* lf.div.s */
1312 LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1314 gen_helper_float_div_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1317 case 0x04: /* lf.itof.s */
1318 LOG_DIS("lf.itof r%d, r%d\n", rd
, ra
);
1320 gen_helper_itofs(cpu_R
[rd
], cpu_env
, cpu_R
[ra
]);
1323 case 0x05: /* lf.ftoi.s */
1324 LOG_DIS("lf.ftoi r%d, r%d\n", rd
, ra
);
1326 gen_helper_ftois(cpu_R
[rd
], cpu_env
, cpu_R
[ra
]);
1329 case 0x06: /* lf.rem.s */
1330 LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1332 gen_helper_float_rem_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1335 case 0x07: /* lf.madd.s */
1336 LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1338 gen_helper_float_madd_s(cpu_R
[rd
], cpu_env
, cpu_R
[rd
],
1339 cpu_R
[ra
], cpu_R
[rb
]);
1342 case 0x08: /* lf.sfeq.s */
1343 LOG_DIS("lf.sfeq.s r%d, r%d\n", ra
, rb
);
1344 gen_helper_float_eq_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1347 case 0x09: /* lf.sfne.s */
1348 LOG_DIS("lf.sfne.s r%d, r%d\n", ra
, rb
);
1349 gen_helper_float_ne_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1352 case 0x0a: /* lf.sfgt.s */
1353 LOG_DIS("lf.sfgt.s r%d, r%d\n", ra
, rb
);
1354 gen_helper_float_gt_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1357 case 0x0b: /* lf.sfge.s */
1358 LOG_DIS("lf.sfge.s r%d, r%d\n", ra
, rb
);
1359 gen_helper_float_ge_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1362 case 0x0c: /* lf.sflt.s */
1363 LOG_DIS("lf.sflt.s r%d, r%d\n", ra
, rb
);
1364 gen_helper_float_lt_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1367 case 0x0d: /* lf.sfle.s */
1368 LOG_DIS("lf.sfle.s r%d, r%d\n", ra
, rb
);
1369 gen_helper_float_le_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1372 /* not used yet, open it when we need or64. */
1373 /*#ifdef TARGET_OPENRISC64
1375 LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb);
1378 gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1382 LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb);
1385 gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1389 LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb);
1392 gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1396 LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb);
1399 gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1402 case 0x14: lf.itof.d
1403 LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
1406 gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]);
1409 case 0x15: lf.ftoi.d
1410 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
1413 gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]);
1417 LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb);
1420 gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1423 case 0x17: lf.madd.d
1424 LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb);
1427 gen_helper_float_madd_d(cpu_R[rd], cpu_env, cpu_R[rd],
1428 cpu_R[ra], cpu_R[rb]);
1431 case 0x18: lf.sfeq.d
1432 LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb);
1434 gen_helper_float_eq_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1437 case 0x1a: lf.sfgt.d
1438 LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb);
1440 gen_helper_float_gt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1443 case 0x1b: lf.sfge.d
1444 LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb);
1446 gen_helper_float_ge_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1449 case 0x19: lf.sfne.d
1450 LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb);
1452 gen_helper_float_ne_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1455 case 0x1c: lf.sflt.d
1456 LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb);
1458 gen_helper_float_lt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1461 case 0x1d: lf.sfle.d
1462 LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb);
1464 gen_helper_float_le_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1469 gen_illegal_exception(dc
);
1474 static void disas_openrisc_insn(DisasContext
*dc
, OpenRISCCPU
*cpu
)
1478 insn
= cpu_ldl_code(&cpu
->env
, dc
->pc
);
1479 op0
= extract32(insn
, 26, 6);
1491 dec_logic(dc
, insn
);
1495 dec_compi(dc
, insn
);
1503 dec_float(dc
, insn
);
1520 void gen_intermediate_code(CPUOpenRISCState
*env
, struct TranslationBlock
*tb
)
1522 OpenRISCCPU
*cpu
= openrisc_env_get_cpu(env
);
1523 CPUState
*cs
= CPU(cpu
);
1524 struct DisasContext ctx
, *dc
= &ctx
;
1526 uint32_t next_page_start
;
1533 dc
->is_jmp
= DISAS_NEXT
;
1535 dc
->mem_idx
= cpu_mmu_index(&cpu
->env
, false);
1536 dc
->tb_flags
= tb
->flags
;
1537 dc
->delayed_branch
= (dc
->tb_flags
& TB_FLAGS_DFLAG
) != 0;
1538 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1540 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1542 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1544 if (max_insns
== 0) {
1545 max_insns
= CF_COUNT_MASK
;
1547 if (max_insns
> TCG_MAX_INSNS
) {
1548 max_insns
= TCG_MAX_INSNS
;
1551 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1552 && qemu_log_in_addr_range(pc_start
)) {
1554 qemu_log("----------------\n");
1555 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
1560 /* Allow the TCG optimizer to see that R0 == 0,
1561 when it's true, which is the common case. */
1562 if (dc
->tb_flags
& TB_FLAGS_R0_0
) {
1563 cpu_R
[0] = tcg_const_tl(0);
1569 tcg_gen_insn_start(dc
->pc
, (dc
->delayed_branch
? 1 : 0)
1570 | (num_insns
? 2 : 0));
1573 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1574 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1575 gen_exception(dc
, EXCP_DEBUG
);
1576 dc
->is_jmp
= DISAS_UPDATE
;
1577 /* The address covered by the breakpoint must be included in
1578 [tb->pc, tb->pc + tb->size) in order to for it to be
1579 properly cleared -- thus we increment the PC here so that
1580 the logic setting tb->size below does the right thing. */
1585 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1588 disas_openrisc_insn(dc
, cpu
);
1589 dc
->pc
= dc
->pc
+ 4;
1592 if (dc
->delayed_branch
) {
1593 dc
->delayed_branch
--;
1594 if (!dc
->delayed_branch
) {
1595 tcg_gen_mov_tl(cpu_pc
, jmp_pc
);
1596 tcg_gen_discard_tl(jmp_pc
);
1597 dc
->is_jmp
= DISAS_UPDATE
;
1601 } while (!dc
->is_jmp
1602 && !tcg_op_buf_full()
1603 && !cs
->singlestep_enabled
1605 && (dc
->pc
< next_page_start
)
1606 && num_insns
< max_insns
);
1608 if (tb
->cflags
& CF_LAST_IO
) {
1612 if ((dc
->tb_flags
& TB_FLAGS_DFLAG
? 1 : 0) != (dc
->delayed_branch
!= 0)) {
1613 tcg_gen_movi_i32(cpu_dflag
, dc
->delayed_branch
!= 0);
1616 tcg_gen_movi_tl(cpu_ppc
, dc
->pc
- 4);
1617 if (dc
->is_jmp
== DISAS_NEXT
) {
1618 dc
->is_jmp
= DISAS_UPDATE
;
1619 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1621 if (unlikely(cs
->singlestep_enabled
)) {
1622 gen_exception(dc
, EXCP_DEBUG
);
1624 switch (dc
->is_jmp
) {
1626 gen_goto_tb(dc
, 0, dc
->pc
);
1632 /* indicate that the hash table must be used
1633 to find the next TB */
1637 /* nothing more to generate */
1642 gen_tb_end(tb
, num_insns
);
1644 tb
->size
= dc
->pc
- pc_start
;
1645 tb
->icount
= num_insns
;
1647 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1648 && qemu_log_in_addr_range(pc_start
)) {
1649 log_target_disas(cs
, pc_start
, tb
->size
, 0);
1655 void openrisc_cpu_dump_state(CPUState
*cs
, FILE *f
,
1656 fprintf_function cpu_fprintf
,
1659 OpenRISCCPU
*cpu
= OPENRISC_CPU(cs
);
1660 CPUOpenRISCState
*env
= &cpu
->env
;
1663 cpu_fprintf(f
, "PC=%08x\n", env
->pc
);
1664 for (i
= 0; i
< 32; ++i
) {
1665 cpu_fprintf(f
, "R%02d=%08x%c", i
, env
->gpr
[i
],
1666 (i
% 4) == 3 ? '\n' : ' ');
1670 void restore_state_to_opc(CPUOpenRISCState
*env
, TranslationBlock
*tb
,
1674 env
->dflag
= data
[1] & 1;
1676 env
->ppc
= env
->pc
- 4;