4 * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
5 * Feng Gao <gf91597@gmail.com>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "disas/disas.h"
26 #include "qemu-common.h"
28 #include "qemu/bitops.h"
29 #include "exec/cpu_ldst.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
34 #include "trace-tcg.h"
37 #define LOG_DIS(str, ...) \
38 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->pc, ## __VA_ARGS__)
40 typedef struct DisasContext
{
46 uint32_t delayed_branch
;
47 bool singlestep_enabled
;
50 static TCGv_env cpu_env
;
52 static TCGv cpu_R
[32];
55 static TCGv jmp_pc
; /* l.jr/l.jalr temp pc */
57 static TCGv cpu_sr_f
; /* bf/bnf, F flag taken */
58 static TCGv cpu_sr_cy
; /* carry (unsigned overflow) */
59 static TCGv cpu_sr_ov
; /* signed overflow */
60 static TCGv cpu_lock_addr
;
61 static TCGv cpu_lock_value
;
62 static TCGv_i32 fpcsr
;
63 static TCGv_i64 cpu_mac
; /* MACHI:MACLO */
64 static TCGv_i32 cpu_dflag
;
65 #include "exec/gen-icount.h"
67 void openrisc_translate_init(void)
69 static const char * const regnames
[] = {
70 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
71 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
72 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
73 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
77 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
78 tcg_ctx
.tcg_env
= cpu_env
;
79 cpu_sr
= tcg_global_mem_new(cpu_env
,
80 offsetof(CPUOpenRISCState
, sr
), "sr");
81 cpu_dflag
= tcg_global_mem_new_i32(cpu_env
,
82 offsetof(CPUOpenRISCState
, dflag
),
84 cpu_pc
= tcg_global_mem_new(cpu_env
,
85 offsetof(CPUOpenRISCState
, pc
), "pc");
86 cpu_ppc
= tcg_global_mem_new(cpu_env
,
87 offsetof(CPUOpenRISCState
, ppc
), "ppc");
88 jmp_pc
= tcg_global_mem_new(cpu_env
,
89 offsetof(CPUOpenRISCState
, jmp_pc
), "jmp_pc");
90 cpu_sr_f
= tcg_global_mem_new(cpu_env
,
91 offsetof(CPUOpenRISCState
, sr_f
), "sr_f");
92 cpu_sr_cy
= tcg_global_mem_new(cpu_env
,
93 offsetof(CPUOpenRISCState
, sr_cy
), "sr_cy");
94 cpu_sr_ov
= tcg_global_mem_new(cpu_env
,
95 offsetof(CPUOpenRISCState
, sr_ov
), "sr_ov");
96 cpu_lock_addr
= tcg_global_mem_new(cpu_env
,
97 offsetof(CPUOpenRISCState
, lock_addr
),
99 cpu_lock_value
= tcg_global_mem_new(cpu_env
,
100 offsetof(CPUOpenRISCState
, lock_value
),
102 fpcsr
= tcg_global_mem_new_i32(cpu_env
,
103 offsetof(CPUOpenRISCState
, fpcsr
),
105 cpu_mac
= tcg_global_mem_new_i64(cpu_env
,
106 offsetof(CPUOpenRISCState
, mac
),
108 for (i
= 0; i
< 32; i
++) {
109 cpu_R
[i
] = tcg_global_mem_new(cpu_env
,
110 offsetof(CPUOpenRISCState
,
117 static void gen_exception(DisasContext
*dc
, unsigned int excp
)
119 TCGv_i32 tmp
= tcg_const_i32(excp
);
120 gen_helper_exception(cpu_env
, tmp
);
121 tcg_temp_free_i32(tmp
);
124 static void gen_illegal_exception(DisasContext
*dc
)
126 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
127 gen_exception(dc
, EXCP_ILLEGAL
);
128 dc
->is_jmp
= DISAS_UPDATE
;
131 /* not used yet, open it when we need or64. */
132 /*#ifdef TARGET_OPENRISC64
133 static void check_ob64s(DisasContext *dc)
135 if (!(dc->flags & CPUCFGR_OB64S)) {
136 gen_illegal_exception(dc);
140 static void check_of64s(DisasContext *dc)
142 if (!(dc->flags & CPUCFGR_OF64S)) {
143 gen_illegal_exception(dc);
147 static void check_ov64s(DisasContext *dc)
149 if (!(dc->flags & CPUCFGR_OV64S)) {
150 gen_illegal_exception(dc);
155 /* We're about to write to REG. On the off-chance that the user is
156 writing to R0, re-instate the architectural register. */
157 #define check_r0_write(reg) \
159 if (unlikely(reg == 0)) { \
164 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
166 if (unlikely(dc
->singlestep_enabled
)) {
170 #ifndef CONFIG_USER_ONLY
171 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
177 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
179 if (use_goto_tb(dc
, dest
)) {
180 tcg_gen_movi_tl(cpu_pc
, dest
);
182 tcg_gen_exit_tb((uintptr_t)dc
->tb
+ n
);
184 tcg_gen_movi_tl(cpu_pc
, dest
);
185 if (dc
->singlestep_enabled
) {
186 gen_exception(dc
, EXCP_DEBUG
);
192 static void gen_jump(DisasContext
*dc
, int32_t n26
, uint32_t reg
, uint32_t op0
)
194 target_ulong tmp_pc
= dc
->pc
+ n26
* 4;
198 tcg_gen_movi_tl(jmp_pc
, tmp_pc
);
200 case 0x01: /* l.jal */
201 tcg_gen_movi_tl(cpu_R
[9], dc
->pc
+ 8);
202 /* Optimize jal being used to load the PC for PIC. */
203 if (tmp_pc
== dc
->pc
+ 8) {
206 tcg_gen_movi_tl(jmp_pc
, tmp_pc
);
208 case 0x03: /* l.bnf */
209 case 0x04: /* l.bf */
211 TCGv t_next
= tcg_const_tl(dc
->pc
+ 8);
212 TCGv t_true
= tcg_const_tl(tmp_pc
);
213 TCGv t_zero
= tcg_const_tl(0);
215 tcg_gen_movcond_tl(op0
== 0x03 ? TCG_COND_EQ
: TCG_COND_NE
,
216 jmp_pc
, cpu_sr_f
, t_zero
, t_true
, t_next
);
218 tcg_temp_free(t_next
);
219 tcg_temp_free(t_true
);
220 tcg_temp_free(t_zero
);
223 case 0x11: /* l.jr */
224 tcg_gen_mov_tl(jmp_pc
, cpu_R
[reg
]);
226 case 0x12: /* l.jalr */
227 tcg_gen_movi_tl(cpu_R
[9], (dc
->pc
+ 8));
228 tcg_gen_mov_tl(jmp_pc
, cpu_R
[reg
]);
231 gen_illegal_exception(dc
);
235 dc
->delayed_branch
= 2;
238 static void gen_ove_cy(DisasContext
*dc
)
240 if (dc
->tb_flags
& SR_OVE
) {
241 gen_helper_ove_cy(cpu_env
);
245 static void gen_ove_ov(DisasContext
*dc
)
247 if (dc
->tb_flags
& SR_OVE
) {
248 gen_helper_ove_ov(cpu_env
);
252 static void gen_ove_cyov(DisasContext
*dc
)
254 if (dc
->tb_flags
& SR_OVE
) {
255 gen_helper_ove_cyov(cpu_env
);
259 static void gen_add(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
261 TCGv t0
= tcg_const_tl(0);
262 TCGv res
= tcg_temp_new();
264 tcg_gen_add2_tl(res
, cpu_sr_cy
, srca
, t0
, srcb
, t0
);
265 tcg_gen_xor_tl(cpu_sr_ov
, srca
, srcb
);
266 tcg_gen_xor_tl(t0
, res
, srcb
);
267 tcg_gen_andc_tl(cpu_sr_ov
, t0
, cpu_sr_ov
);
270 tcg_gen_mov_tl(dest
, res
);
276 static void gen_addc(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
278 TCGv t0
= tcg_const_tl(0);
279 TCGv res
= tcg_temp_new();
281 tcg_gen_add2_tl(res
, cpu_sr_cy
, srca
, t0
, cpu_sr_cy
, t0
);
282 tcg_gen_add2_tl(res
, cpu_sr_cy
, res
, cpu_sr_cy
, srcb
, t0
);
283 tcg_gen_xor_tl(cpu_sr_ov
, srca
, srcb
);
284 tcg_gen_xor_tl(t0
, res
, srcb
);
285 tcg_gen_andc_tl(cpu_sr_ov
, t0
, cpu_sr_ov
);
288 tcg_gen_mov_tl(dest
, res
);
294 static void gen_sub(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
296 TCGv res
= tcg_temp_new();
298 tcg_gen_sub_tl(res
, srca
, srcb
);
299 tcg_gen_xor_tl(cpu_sr_cy
, srca
, srcb
);
300 tcg_gen_xor_tl(cpu_sr_ov
, res
, srcb
);
301 tcg_gen_and_tl(cpu_sr_ov
, cpu_sr_ov
, cpu_sr_cy
);
302 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_sr_cy
, srca
, srcb
);
304 tcg_gen_mov_tl(dest
, res
);
310 static void gen_mul(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
312 TCGv t0
= tcg_temp_new();
314 tcg_gen_muls2_tl(dest
, cpu_sr_ov
, srca
, srcb
);
315 tcg_gen_sari_tl(t0
, dest
, TARGET_LONG_BITS
- 1);
316 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_sr_ov
, cpu_sr_ov
, t0
);
319 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
323 static void gen_mulu(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
325 tcg_gen_muls2_tl(dest
, cpu_sr_cy
, srca
, srcb
);
326 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_sr_cy
, cpu_sr_cy
, 0);
331 static void gen_div(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
333 TCGv t0
= tcg_temp_new();
335 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_ov
, srcb
, 0);
336 /* The result of divide-by-zero is undefined.
337 Supress the host-side exception by dividing by 1. */
338 tcg_gen_or_tl(t0
, srcb
, cpu_sr_ov
);
339 tcg_gen_div_tl(dest
, srca
, t0
);
342 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
346 static void gen_divu(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
348 TCGv t0
= tcg_temp_new();
350 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_cy
, srcb
, 0);
351 /* The result of divide-by-zero is undefined.
352 Supress the host-side exception by dividing by 1. */
353 tcg_gen_or_tl(t0
, srcb
, cpu_sr_cy
);
354 tcg_gen_divu_tl(dest
, srca
, t0
);
360 static void gen_muld(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
362 TCGv_i64 t1
= tcg_temp_new_i64();
363 TCGv_i64 t2
= tcg_temp_new_i64();
365 tcg_gen_ext_tl_i64(t1
, srca
);
366 tcg_gen_ext_tl_i64(t2
, srcb
);
367 if (TARGET_LONG_BITS
== 32) {
368 tcg_gen_mul_i64(cpu_mac
, t1
, t2
);
369 tcg_gen_movi_tl(cpu_sr_ov
, 0);
371 TCGv_i64 high
= tcg_temp_new_i64();
373 tcg_gen_muls2_i64(cpu_mac
, high
, t1
, t2
);
374 tcg_gen_sari_i64(t1
, cpu_mac
, 63);
375 tcg_gen_setcond_i64(TCG_COND_NE
, t1
, t1
, high
);
376 tcg_temp_free_i64(high
);
377 tcg_gen_trunc_i64_tl(cpu_sr_ov
, t1
);
378 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
382 tcg_temp_free_i64(t1
);
383 tcg_temp_free_i64(t2
);
386 static void gen_muldu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
388 TCGv_i64 t1
= tcg_temp_new_i64();
389 TCGv_i64 t2
= tcg_temp_new_i64();
391 tcg_gen_extu_tl_i64(t1
, srca
);
392 tcg_gen_extu_tl_i64(t2
, srcb
);
393 if (TARGET_LONG_BITS
== 32) {
394 tcg_gen_mul_i64(cpu_mac
, t1
, t2
);
395 tcg_gen_movi_tl(cpu_sr_cy
, 0);
397 TCGv_i64 high
= tcg_temp_new_i64();
399 tcg_gen_mulu2_i64(cpu_mac
, high
, t1
, t2
);
400 tcg_gen_setcondi_i64(TCG_COND_NE
, high
, high
, 0);
401 tcg_gen_trunc_i64_tl(cpu_sr_cy
, high
);
402 tcg_temp_free_i64(high
);
406 tcg_temp_free_i64(t1
);
407 tcg_temp_free_i64(t2
);
410 static void gen_mac(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
412 TCGv_i64 t1
= tcg_temp_new_i64();
413 TCGv_i64 t2
= tcg_temp_new_i64();
415 tcg_gen_ext_tl_i64(t1
, srca
);
416 tcg_gen_ext_tl_i64(t2
, srcb
);
417 tcg_gen_mul_i64(t1
, t1
, t2
);
419 /* Note that overflow is only computed during addition stage. */
420 tcg_gen_xor_i64(t2
, cpu_mac
, t1
);
421 tcg_gen_add_i64(cpu_mac
, cpu_mac
, t1
);
422 tcg_gen_xor_i64(t1
, t1
, cpu_mac
);
423 tcg_gen_andc_i64(t1
, t1
, t2
);
424 tcg_temp_free_i64(t2
);
426 #if TARGET_LONG_BITS == 32
427 tcg_gen_extrh_i64_i32(cpu_sr_ov
, t1
);
429 tcg_gen_mov_i64(cpu_sr_ov
, t1
);
431 tcg_temp_free_i64(t1
);
436 static void gen_macu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
438 TCGv_i64 t1
= tcg_temp_new_i64();
439 TCGv_i64 t2
= tcg_temp_new_i64();
441 tcg_gen_extu_tl_i64(t1
, srca
);
442 tcg_gen_extu_tl_i64(t2
, srcb
);
443 tcg_gen_mul_i64(t1
, t1
, t2
);
444 tcg_temp_free_i64(t2
);
446 /* Note that overflow is only computed during addition stage. */
447 tcg_gen_add_i64(cpu_mac
, cpu_mac
, t1
);
448 tcg_gen_setcond_i64(TCG_COND_LTU
, t1
, cpu_mac
, t1
);
449 tcg_gen_trunc_i64_tl(cpu_sr_cy
, t1
);
450 tcg_temp_free_i64(t1
);
455 static void gen_msb(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
457 TCGv_i64 t1
= tcg_temp_new_i64();
458 TCGv_i64 t2
= tcg_temp_new_i64();
460 tcg_gen_ext_tl_i64(t1
, srca
);
461 tcg_gen_ext_tl_i64(t2
, srcb
);
462 tcg_gen_mul_i64(t1
, t1
, t2
);
464 /* Note that overflow is only computed during subtraction stage. */
465 tcg_gen_xor_i64(t2
, cpu_mac
, t1
);
466 tcg_gen_sub_i64(cpu_mac
, cpu_mac
, t1
);
467 tcg_gen_xor_i64(t1
, t1
, cpu_mac
);
468 tcg_gen_and_i64(t1
, t1
, t2
);
469 tcg_temp_free_i64(t2
);
471 #if TARGET_LONG_BITS == 32
472 tcg_gen_extrh_i64_i32(cpu_sr_ov
, t1
);
474 tcg_gen_mov_i64(cpu_sr_ov
, t1
);
476 tcg_temp_free_i64(t1
);
481 static void gen_msbu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
483 TCGv_i64 t1
= tcg_temp_new_i64();
484 TCGv_i64 t2
= tcg_temp_new_i64();
486 tcg_gen_extu_tl_i64(t1
, srca
);
487 tcg_gen_extu_tl_i64(t2
, srcb
);
488 tcg_gen_mul_i64(t1
, t1
, t2
);
490 /* Note that overflow is only computed during subtraction stage. */
491 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, cpu_mac
, t1
);
492 tcg_gen_sub_i64(cpu_mac
, cpu_mac
, t1
);
493 tcg_gen_trunc_i64_tl(cpu_sr_cy
, t2
);
494 tcg_temp_free_i64(t2
);
495 tcg_temp_free_i64(t1
);
500 static void gen_lwa(DisasContext
*dc
, TCGv rd
, TCGv ra
, int32_t ofs
)
502 TCGv ea
= tcg_temp_new();
504 tcg_gen_addi_tl(ea
, ra
, ofs
);
505 tcg_gen_qemu_ld_tl(rd
, ea
, dc
->mem_idx
, MO_TEUL
);
506 tcg_gen_mov_tl(cpu_lock_addr
, ea
);
507 tcg_gen_mov_tl(cpu_lock_value
, rd
);
511 static void gen_swa(DisasContext
*dc
, int b
, TCGv ra
, int32_t ofs
)
514 TCGLabel
*lab_fail
, *lab_done
;
517 tcg_gen_addi_tl(ea
, ra
, ofs
);
519 /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned
520 to cpu_R[0]. Since l.swa is quite often immediately followed by a
521 branch, don't bother reallocating; finish the TB using the "real" R0.
522 This also takes care of RB input across the branch. */
525 lab_fail
= gen_new_label();
526 lab_done
= gen_new_label();
527 tcg_gen_brcond_tl(TCG_COND_NE
, ea
, cpu_lock_addr
, lab_fail
);
530 val
= tcg_temp_new();
531 tcg_gen_atomic_cmpxchg_tl(val
, cpu_lock_addr
, cpu_lock_value
,
532 cpu_R
[b
], dc
->mem_idx
, MO_TEUL
);
533 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_sr_f
, val
, cpu_lock_value
);
536 tcg_gen_br(lab_done
);
538 gen_set_label(lab_fail
);
539 tcg_gen_movi_tl(cpu_sr_f
, 0);
541 gen_set_label(lab_done
);
542 tcg_gen_movi_tl(cpu_lock_addr
, -1);
545 static void dec_calc(DisasContext
*dc
, uint32_t insn
)
547 uint32_t op0
, op1
, op2
;
549 op0
= extract32(insn
, 0, 4);
550 op1
= extract32(insn
, 8, 2);
551 op2
= extract32(insn
, 6, 2);
552 ra
= extract32(insn
, 16, 5);
553 rb
= extract32(insn
, 11, 5);
554 rd
= extract32(insn
, 21, 5);
559 case 0x0: /* l.add */
560 LOG_DIS("l.add r%d, r%d, r%d\n", rd
, ra
, rb
);
561 gen_add(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
564 case 0x1: /* l.addc */
565 LOG_DIS("l.addc r%d, r%d, r%d\n", rd
, ra
, rb
);
566 gen_addc(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
569 case 0x2: /* l.sub */
570 LOG_DIS("l.sub r%d, r%d, r%d\n", rd
, ra
, rb
);
571 gen_sub(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
574 case 0x3: /* l.and */
575 LOG_DIS("l.and r%d, r%d, r%d\n", rd
, ra
, rb
);
576 tcg_gen_and_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
580 LOG_DIS("l.or r%d, r%d, r%d\n", rd
, ra
, rb
);
581 tcg_gen_or_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
584 case 0x5: /* l.xor */
585 LOG_DIS("l.xor r%d, r%d, r%d\n", rd
, ra
, rb
);
586 tcg_gen_xor_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
592 LOG_DIS("l.sll r%d, r%d, r%d\n", rd
, ra
, rb
);
593 tcg_gen_shl_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
596 LOG_DIS("l.srl r%d, r%d, r%d\n", rd
, ra
, rb
);
597 tcg_gen_shr_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
600 LOG_DIS("l.sra r%d, r%d, r%d\n", rd
, ra
, rb
);
601 tcg_gen_sar_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
604 LOG_DIS("l.ror r%d, r%d, r%d\n", rd
, ra
, rb
);
605 tcg_gen_rotr_tl(cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
612 case 0: /* l.exths */
613 LOG_DIS("l.exths r%d, r%d\n", rd
, ra
);
614 tcg_gen_ext16s_tl(cpu_R
[rd
], cpu_R
[ra
]);
616 case 1: /* l.extbs */
617 LOG_DIS("l.extbs r%d, r%d\n", rd
, ra
);
618 tcg_gen_ext8s_tl(cpu_R
[rd
], cpu_R
[ra
]);
620 case 2: /* l.exthz */
621 LOG_DIS("l.exthz r%d, r%d\n", rd
, ra
);
622 tcg_gen_ext16u_tl(cpu_R
[rd
], cpu_R
[ra
]);
624 case 3: /* l.extbz */
625 LOG_DIS("l.extbz r%d, r%d\n", rd
, ra
);
626 tcg_gen_ext8u_tl(cpu_R
[rd
], cpu_R
[ra
]);
633 case 0: /* l.extws */
634 LOG_DIS("l.extws r%d, r%d\n", rd
, ra
);
635 tcg_gen_ext32s_tl(cpu_R
[rd
], cpu_R
[ra
]);
637 case 1: /* l.extwz */
638 LOG_DIS("l.extwz r%d, r%d\n", rd
, ra
);
639 tcg_gen_ext32u_tl(cpu_R
[rd
], cpu_R
[ra
]);
644 case 0xe: /* l.cmov */
645 LOG_DIS("l.cmov r%d, r%d, r%d\n", rd
, ra
, rb
);
647 TCGv zero
= tcg_const_tl(0);
648 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_R
[rd
], cpu_sr_f
, zero
,
649 cpu_R
[ra
], cpu_R
[rb
]);
654 case 0xf: /* l.ff1 */
655 LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd
, ra
, rb
);
656 tcg_gen_ctzi_tl(cpu_R
[rd
], cpu_R
[ra
], -1);
657 tcg_gen_addi_tl(cpu_R
[rd
], cpu_R
[rd
], 1);
664 case 0xf: /* l.fl1 */
665 LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd
, ra
, rb
);
666 tcg_gen_clzi_tl(cpu_R
[rd
], cpu_R
[ra
], TARGET_LONG_BITS
);
667 tcg_gen_subfi_tl(cpu_R
[rd
], TARGET_LONG_BITS
, cpu_R
[rd
]);
677 case 0x6: /* l.mul */
678 LOG_DIS("l.mul r%d, r%d, r%d\n", rd
, ra
, rb
);
679 gen_mul(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
682 case 0x7: /* l.muld */
683 LOG_DIS("l.muld r%d, r%d\n", ra
, rb
);
684 gen_muld(dc
, cpu_R
[ra
], cpu_R
[rb
]);
687 case 0x9: /* l.div */
688 LOG_DIS("l.div r%d, r%d, r%d\n", rd
, ra
, rb
);
689 gen_div(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
692 case 0xa: /* l.divu */
693 LOG_DIS("l.divu r%d, r%d, r%d\n", rd
, ra
, rb
);
694 gen_divu(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
697 case 0xb: /* l.mulu */
698 LOG_DIS("l.mulu r%d, r%d, r%d\n", rd
, ra
, rb
);
699 gen_mulu(dc
, cpu_R
[rd
], cpu_R
[ra
], cpu_R
[rb
]);
702 case 0xc: /* l.muldu */
703 LOG_DIS("l.muldu r%d, r%d\n", ra
, rb
);
704 gen_muldu(dc
, cpu_R
[ra
], cpu_R
[rb
]);
709 gen_illegal_exception(dc
);
712 static void dec_misc(DisasContext
*dc
, uint32_t insn
)
716 uint32_t L6
, K5
, K16
, K5_11
;
717 int32_t I16
, I5_11
, N26
;
721 op0
= extract32(insn
, 26, 6);
722 op1
= extract32(insn
, 24, 2);
723 ra
= extract32(insn
, 16, 5);
724 rb
= extract32(insn
, 11, 5);
725 rd
= extract32(insn
, 21, 5);
726 L6
= extract32(insn
, 5, 6);
727 K5
= extract32(insn
, 0, 5);
728 K16
= extract32(insn
, 0, 16);
730 N26
= sextract32(insn
, 0, 26);
731 K5_11
= (extract32(insn
, 21, 5) << 11) | extract32(insn
, 0, 11);
732 I5_11
= (int16_t)K5_11
;
736 LOG_DIS("l.j %d\n", N26
);
737 gen_jump(dc
, N26
, 0, op0
);
740 case 0x01: /* l.jal */
741 LOG_DIS("l.jal %d\n", N26
);
742 gen_jump(dc
, N26
, 0, op0
);
745 case 0x03: /* l.bnf */
746 LOG_DIS("l.bnf %d\n", N26
);
747 gen_jump(dc
, N26
, 0, op0
);
750 case 0x04: /* l.bf */
751 LOG_DIS("l.bf %d\n", N26
);
752 gen_jump(dc
, N26
, 0, op0
);
757 case 0x01: /* l.nop */
758 LOG_DIS("l.nop %d\n", I16
);
762 gen_illegal_exception(dc
);
767 case 0x11: /* l.jr */
768 LOG_DIS("l.jr r%d\n", rb
);
769 gen_jump(dc
, 0, rb
, op0
);
772 case 0x12: /* l.jalr */
773 LOG_DIS("l.jalr r%d\n", rb
);
774 gen_jump(dc
, 0, rb
, op0
);
777 case 0x13: /* l.maci */
778 LOG_DIS("l.maci r%d, %d\n", ra
, I16
);
779 t0
= tcg_const_tl(I16
);
780 gen_mac(dc
, cpu_R
[ra
], t0
);
784 case 0x09: /* l.rfe */
787 #if defined(CONFIG_USER_ONLY)
790 if (dc
->mem_idx
== MMU_USER_IDX
) {
791 gen_illegal_exception(dc
);
794 gen_helper_rfe(cpu_env
);
795 dc
->is_jmp
= DISAS_UPDATE
;
800 case 0x1b: /* l.lwa */
801 LOG_DIS("l.lwa r%d, r%d, %d\n", rd
, ra
, I16
);
803 gen_lwa(dc
, cpu_R
[rd
], cpu_R
[ra
], I16
);
806 case 0x1c: /* l.cust1 */
807 LOG_DIS("l.cust1\n");
810 case 0x1d: /* l.cust2 */
811 LOG_DIS("l.cust2\n");
814 case 0x1e: /* l.cust3 */
815 LOG_DIS("l.cust3\n");
818 case 0x1f: /* l.cust4 */
819 LOG_DIS("l.cust4\n");
822 case 0x3c: /* l.cust5 */
823 LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd
, ra
, rb
, L6
, K5
);
826 case 0x3d: /* l.cust6 */
827 LOG_DIS("l.cust6\n");
830 case 0x3e: /* l.cust7 */
831 LOG_DIS("l.cust7\n");
834 case 0x3f: /* l.cust8 */
835 LOG_DIS("l.cust8\n");
838 /* not used yet, open it when we need or64. */
839 /*#ifdef TARGET_OPENRISC64
841 LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16);
847 case 0x21: /* l.lwz */
848 LOG_DIS("l.lwz r%d, r%d, %d\n", rd
, ra
, I16
);
852 case 0x22: /* l.lws */
853 LOG_DIS("l.lws r%d, r%d, %d\n", rd
, ra
, I16
);
857 case 0x23: /* l.lbz */
858 LOG_DIS("l.lbz r%d, r%d, %d\n", rd
, ra
, I16
);
862 case 0x24: /* l.lbs */
863 LOG_DIS("l.lbs r%d, r%d, %d\n", rd
, ra
, I16
);
867 case 0x25: /* l.lhz */
868 LOG_DIS("l.lhz r%d, r%d, %d\n", rd
, ra
, I16
);
872 case 0x26: /* l.lhs */
873 LOG_DIS("l.lhs r%d, r%d, %d\n", rd
, ra
, I16
);
880 tcg_gen_addi_tl(t0
, cpu_R
[ra
], I16
);
881 tcg_gen_qemu_ld_tl(cpu_R
[rd
], t0
, dc
->mem_idx
, mop
);
885 case 0x27: /* l.addi */
886 LOG_DIS("l.addi r%d, r%d, %d\n", rd
, ra
, I16
);
888 t0
= tcg_const_tl(I16
);
889 gen_add(dc
, cpu_R
[rd
], cpu_R
[ra
], t0
);
893 case 0x28: /* l.addic */
894 LOG_DIS("l.addic r%d, r%d, %d\n", rd
, ra
, I16
);
896 t0
= tcg_const_tl(I16
);
897 gen_addc(dc
, cpu_R
[rd
], cpu_R
[ra
], t0
);
901 case 0x29: /* l.andi */
902 LOG_DIS("l.andi r%d, r%d, %d\n", rd
, ra
, K16
);
904 tcg_gen_andi_tl(cpu_R
[rd
], cpu_R
[ra
], K16
);
907 case 0x2a: /* l.ori */
908 LOG_DIS("l.ori r%d, r%d, %d\n", rd
, ra
, K16
);
910 tcg_gen_ori_tl(cpu_R
[rd
], cpu_R
[ra
], K16
);
913 case 0x2b: /* l.xori */
914 LOG_DIS("l.xori r%d, r%d, %d\n", rd
, ra
, I16
);
916 tcg_gen_xori_tl(cpu_R
[rd
], cpu_R
[ra
], I16
);
919 case 0x2c: /* l.muli */
920 LOG_DIS("l.muli r%d, r%d, %d\n", rd
, ra
, I16
);
922 t0
= tcg_const_tl(I16
);
923 gen_mul(dc
, cpu_R
[rd
], cpu_R
[ra
], t0
);
927 case 0x2d: /* l.mfspr */
928 LOG_DIS("l.mfspr r%d, r%d, %d\n", rd
, ra
, K16
);
931 #if defined(CONFIG_USER_ONLY)
934 TCGv_i32 ti
= tcg_const_i32(K16
);
935 if (dc
->mem_idx
== MMU_USER_IDX
) {
936 gen_illegal_exception(dc
);
939 gen_helper_mfspr(cpu_R
[rd
], cpu_env
, cpu_R
[rd
], cpu_R
[ra
], ti
);
940 tcg_temp_free_i32(ti
);
945 case 0x30: /* l.mtspr */
946 LOG_DIS("l.mtspr r%d, r%d, %d\n", ra
, rb
, K5_11
);
948 #if defined(CONFIG_USER_ONLY)
951 TCGv_i32 im
= tcg_const_i32(K5_11
);
952 if (dc
->mem_idx
== MMU_USER_IDX
) {
953 gen_illegal_exception(dc
);
956 gen_helper_mtspr(cpu_env
, cpu_R
[ra
], cpu_R
[rb
], im
);
957 tcg_temp_free_i32(im
);
962 case 0x33: /* l.swa */
963 LOG_DIS("l.swa r%d, r%d, %d\n", ra
, rb
, I5_11
);
964 gen_swa(dc
, rb
, cpu_R
[ra
], I5_11
);
967 /* not used yet, open it when we need or64. */
968 /*#ifdef TARGET_OPENRISC64
970 LOG_DIS("l.sd r%d, r%d, %d\n", ra, rb, I5_11);
976 case 0x35: /* l.sw */
977 LOG_DIS("l.sw r%d, r%d, %d\n", ra
, rb
, I5_11
);
981 case 0x36: /* l.sb */
982 LOG_DIS("l.sb r%d, r%d, %d\n", ra
, rb
, I5_11
);
986 case 0x37: /* l.sh */
987 LOG_DIS("l.sh r%d, r%d, %d\n", ra
, rb
, I5_11
);
993 TCGv t0
= tcg_temp_new();
994 tcg_gen_addi_tl(t0
, cpu_R
[ra
], I5_11
);
995 tcg_gen_qemu_st_tl(cpu_R
[rb
], t0
, dc
->mem_idx
, mop
);
1001 gen_illegal_exception(dc
);
1006 static void dec_mac(DisasContext
*dc
, uint32_t insn
)
1010 op0
= extract32(insn
, 0, 4);
1011 ra
= extract32(insn
, 16, 5);
1012 rb
= extract32(insn
, 11, 5);
1015 case 0x0001: /* l.mac */
1016 LOG_DIS("l.mac r%d, r%d\n", ra
, rb
);
1017 gen_mac(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1020 case 0x0002: /* l.msb */
1021 LOG_DIS("l.msb r%d, r%d\n", ra
, rb
);
1022 gen_msb(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1025 case 0x0003: /* l.macu */
1026 LOG_DIS("l.macu r%d, r%d\n", ra
, rb
);
1027 gen_macu(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1030 case 0x0004: /* l.msbu */
1031 LOG_DIS("l.msbu r%d, r%d\n", ra
, rb
);
1032 gen_msbu(dc
, cpu_R
[ra
], cpu_R
[rb
]);
1036 gen_illegal_exception(dc
);
1041 static void dec_logic(DisasContext
*dc
, uint32_t insn
)
1044 uint32_t rd
, ra
, L6
, S6
;
1045 op0
= extract32(insn
, 6, 2);
1046 rd
= extract32(insn
, 21, 5);
1047 ra
= extract32(insn
, 16, 5);
1048 L6
= extract32(insn
, 0, 6);
1049 S6
= L6
& (TARGET_LONG_BITS
- 1);
1053 case 0x00: /* l.slli */
1054 LOG_DIS("l.slli r%d, r%d, %d\n", rd
, ra
, L6
);
1055 tcg_gen_shli_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1058 case 0x01: /* l.srli */
1059 LOG_DIS("l.srli r%d, r%d, %d\n", rd
, ra
, L6
);
1060 tcg_gen_shri_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1063 case 0x02: /* l.srai */
1064 LOG_DIS("l.srai r%d, r%d, %d\n", rd
, ra
, L6
);
1065 tcg_gen_sari_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1068 case 0x03: /* l.rori */
1069 LOG_DIS("l.rori r%d, r%d, %d\n", rd
, ra
, L6
);
1070 tcg_gen_rotri_tl(cpu_R
[rd
], cpu_R
[ra
], S6
);
1074 gen_illegal_exception(dc
);
1079 static void dec_M(DisasContext
*dc
, uint32_t insn
)
1084 op0
= extract32(insn
, 16, 1);
1085 rd
= extract32(insn
, 21, 5);
1086 K16
= extract32(insn
, 0, 16);
1090 case 0x0: /* l.movhi */
1091 LOG_DIS("l.movhi r%d, %d\n", rd
, K16
);
1092 tcg_gen_movi_tl(cpu_R
[rd
], (K16
<< 16));
1095 case 0x1: /* l.macrc */
1096 LOG_DIS("l.macrc r%d\n", rd
);
1097 tcg_gen_trunc_i64_tl(cpu_R
[rd
], cpu_mac
);
1098 tcg_gen_movi_i64(cpu_mac
, 0);
1102 gen_illegal_exception(dc
);
1107 static void dec_comp(DisasContext
*dc
, uint32_t insn
)
1112 op0
= extract32(insn
, 21, 5);
1113 ra
= extract32(insn
, 16, 5);
1114 rb
= extract32(insn
, 11, 5);
1116 /* unsigned integers */
1117 tcg_gen_ext32u_tl(cpu_R
[ra
], cpu_R
[ra
]);
1118 tcg_gen_ext32u_tl(cpu_R
[rb
], cpu_R
[rb
]);
1121 case 0x0: /* l.sfeq */
1122 LOG_DIS("l.sfeq r%d, r%d\n", ra
, rb
);
1123 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1126 case 0x1: /* l.sfne */
1127 LOG_DIS("l.sfne r%d, r%d\n", ra
, rb
);
1128 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1131 case 0x2: /* l.sfgtu */
1132 LOG_DIS("l.sfgtu r%d, r%d\n", ra
, rb
);
1133 tcg_gen_setcond_tl(TCG_COND_GTU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1136 case 0x3: /* l.sfgeu */
1137 LOG_DIS("l.sfgeu r%d, r%d\n", ra
, rb
);
1138 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1141 case 0x4: /* l.sfltu */
1142 LOG_DIS("l.sfltu r%d, r%d\n", ra
, rb
);
1143 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1146 case 0x5: /* l.sfleu */
1147 LOG_DIS("l.sfleu r%d, r%d\n", ra
, rb
);
1148 tcg_gen_setcond_tl(TCG_COND_LEU
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1151 case 0xa: /* l.sfgts */
1152 LOG_DIS("l.sfgts r%d, r%d\n", ra
, rb
);
1153 tcg_gen_setcond_tl(TCG_COND_GT
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1156 case 0xb: /* l.sfges */
1157 LOG_DIS("l.sfges r%d, r%d\n", ra
, rb
);
1158 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1161 case 0xc: /* l.sflts */
1162 LOG_DIS("l.sflts r%d, r%d\n", ra
, rb
);
1163 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1166 case 0xd: /* l.sfles */
1167 LOG_DIS("l.sfles r%d, r%d\n", ra
, rb
);
1168 tcg_gen_setcond_tl(TCG_COND_LE
, cpu_sr_f
, cpu_R
[ra
], cpu_R
[rb
]);
1172 gen_illegal_exception(dc
);
1177 static void dec_compi(DisasContext
*dc
, uint32_t insn
)
1182 op0
= extract32(insn
, 21, 5);
1183 ra
= extract32(insn
, 16, 5);
1184 I16
= sextract32(insn
, 0, 16);
1187 case 0x0: /* l.sfeqi */
1188 LOG_DIS("l.sfeqi r%d, %d\n", ra
, I16
);
1189 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_f
, cpu_R
[ra
], I16
);
1192 case 0x1: /* l.sfnei */
1193 LOG_DIS("l.sfnei r%d, %d\n", ra
, I16
);
1194 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_sr_f
, cpu_R
[ra
], I16
);
1197 case 0x2: /* l.sfgtui */
1198 LOG_DIS("l.sfgtui r%d, %d\n", ra
, I16
);
1199 tcg_gen_setcondi_tl(TCG_COND_GTU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1202 case 0x3: /* l.sfgeui */
1203 LOG_DIS("l.sfgeui r%d, %d\n", ra
, I16
);
1204 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1207 case 0x4: /* l.sfltui */
1208 LOG_DIS("l.sfltui r%d, %d\n", ra
, I16
);
1209 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1212 case 0x5: /* l.sfleui */
1213 LOG_DIS("l.sfleui r%d, %d\n", ra
, I16
);
1214 tcg_gen_setcondi_tl(TCG_COND_LEU
, cpu_sr_f
, cpu_R
[ra
], I16
);
1217 case 0xa: /* l.sfgtsi */
1218 LOG_DIS("l.sfgtsi r%d, %d\n", ra
, I16
);
1219 tcg_gen_setcondi_tl(TCG_COND_GT
, cpu_sr_f
, cpu_R
[ra
], I16
);
1222 case 0xb: /* l.sfgesi */
1223 LOG_DIS("l.sfgesi r%d, %d\n", ra
, I16
);
1224 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_sr_f
, cpu_R
[ra
], I16
);
1227 case 0xc: /* l.sfltsi */
1228 LOG_DIS("l.sfltsi r%d, %d\n", ra
, I16
);
1229 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_sr_f
, cpu_R
[ra
], I16
);
1232 case 0xd: /* l.sflesi */
1233 LOG_DIS("l.sflesi r%d, %d\n", ra
, I16
);
1234 tcg_gen_setcondi_tl(TCG_COND_LE
, cpu_sr_f
, cpu_R
[ra
], I16
);
1238 gen_illegal_exception(dc
);
1243 static void dec_sys(DisasContext
*dc
, uint32_t insn
)
1248 op0
= extract32(insn
, 16, 10);
1249 K16
= extract32(insn
, 0, 16);
1252 case 0x000: /* l.sys */
1253 LOG_DIS("l.sys %d\n", K16
);
1254 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1255 gen_exception(dc
, EXCP_SYSCALL
);
1256 dc
->is_jmp
= DISAS_UPDATE
;
1259 case 0x100: /* l.trap */
1260 LOG_DIS("l.trap %d\n", K16
);
1261 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1262 gen_exception(dc
, EXCP_TRAP
);
1265 case 0x300: /* l.csync */
1266 LOG_DIS("l.csync\n");
1269 case 0x200: /* l.msync */
1270 LOG_DIS("l.msync\n");
1271 tcg_gen_mb(TCG_MO_ALL
);
1274 case 0x270: /* l.psync */
1275 LOG_DIS("l.psync\n");
1279 gen_illegal_exception(dc
);
1284 static void dec_float(DisasContext
*dc
, uint32_t insn
)
1287 uint32_t ra
, rb
, rd
;
1288 op0
= extract32(insn
, 0, 8);
1289 ra
= extract32(insn
, 16, 5);
1290 rb
= extract32(insn
, 11, 5);
1291 rd
= extract32(insn
, 21, 5);
1294 case 0x00: /* lf.add.s */
1295 LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1297 gen_helper_float_add_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1300 case 0x01: /* lf.sub.s */
1301 LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1303 gen_helper_float_sub_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1306 case 0x02: /* lf.mul.s */
1307 LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1309 gen_helper_float_mul_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1312 case 0x03: /* lf.div.s */
1313 LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1315 gen_helper_float_div_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1318 case 0x04: /* lf.itof.s */
1319 LOG_DIS("lf.itof r%d, r%d\n", rd
, ra
);
1321 gen_helper_itofs(cpu_R
[rd
], cpu_env
, cpu_R
[ra
]);
1324 case 0x05: /* lf.ftoi.s */
1325 LOG_DIS("lf.ftoi r%d, r%d\n", rd
, ra
);
1327 gen_helper_ftois(cpu_R
[rd
], cpu_env
, cpu_R
[ra
]);
1330 case 0x06: /* lf.rem.s */
1331 LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1333 gen_helper_float_rem_s(cpu_R
[rd
], cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1336 case 0x07: /* lf.madd.s */
1337 LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd
, ra
, rb
);
1339 gen_helper_float_madd_s(cpu_R
[rd
], cpu_env
, cpu_R
[rd
],
1340 cpu_R
[ra
], cpu_R
[rb
]);
1343 case 0x08: /* lf.sfeq.s */
1344 LOG_DIS("lf.sfeq.s r%d, r%d\n", ra
, rb
);
1345 gen_helper_float_eq_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1348 case 0x09: /* lf.sfne.s */
1349 LOG_DIS("lf.sfne.s r%d, r%d\n", ra
, rb
);
1350 gen_helper_float_ne_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1353 case 0x0a: /* lf.sfgt.s */
1354 LOG_DIS("lf.sfgt.s r%d, r%d\n", ra
, rb
);
1355 gen_helper_float_gt_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1358 case 0x0b: /* lf.sfge.s */
1359 LOG_DIS("lf.sfge.s r%d, r%d\n", ra
, rb
);
1360 gen_helper_float_ge_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1363 case 0x0c: /* lf.sflt.s */
1364 LOG_DIS("lf.sflt.s r%d, r%d\n", ra
, rb
);
1365 gen_helper_float_lt_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1368 case 0x0d: /* lf.sfle.s */
1369 LOG_DIS("lf.sfle.s r%d, r%d\n", ra
, rb
);
1370 gen_helper_float_le_s(cpu_sr_f
, cpu_env
, cpu_R
[ra
], cpu_R
[rb
]);
1373 /* not used yet, open it when we need or64. */
1374 /*#ifdef TARGET_OPENRISC64
1376 LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb);
1379 gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1383 LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb);
1386 gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1390 LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb);
1393 gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1397 LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb);
1400 gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1403 case 0x14: lf.itof.d
1404 LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
1407 gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]);
1410 case 0x15: lf.ftoi.d
1411 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
1414 gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]);
1418 LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb);
1421 gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1424 case 0x17: lf.madd.d
1425 LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb);
1428 gen_helper_float_madd_d(cpu_R[rd], cpu_env, cpu_R[rd],
1429 cpu_R[ra], cpu_R[rb]);
1432 case 0x18: lf.sfeq.d
1433 LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb);
1435 gen_helper_float_eq_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1438 case 0x1a: lf.sfgt.d
1439 LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb);
1441 gen_helper_float_gt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1444 case 0x1b: lf.sfge.d
1445 LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb);
1447 gen_helper_float_ge_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1450 case 0x19: lf.sfne.d
1451 LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb);
1453 gen_helper_float_ne_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1456 case 0x1c: lf.sflt.d
1457 LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb);
1459 gen_helper_float_lt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1462 case 0x1d: lf.sfle.d
1463 LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb);
1465 gen_helper_float_le_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1470 gen_illegal_exception(dc
);
1475 static void disas_openrisc_insn(DisasContext
*dc
, OpenRISCCPU
*cpu
)
1479 insn
= cpu_ldl_code(&cpu
->env
, dc
->pc
);
1480 op0
= extract32(insn
, 26, 6);
1492 dec_logic(dc
, insn
);
1496 dec_compi(dc
, insn
);
1504 dec_float(dc
, insn
);
1521 void gen_intermediate_code(CPUOpenRISCState
*env
, struct TranslationBlock
*tb
)
1523 OpenRISCCPU
*cpu
= openrisc_env_get_cpu(env
);
1524 CPUState
*cs
= CPU(cpu
);
1525 struct DisasContext ctx
, *dc
= &ctx
;
1527 uint32_t next_page_start
;
1534 dc
->is_jmp
= DISAS_NEXT
;
1536 dc
->mem_idx
= cpu_mmu_index(&cpu
->env
, false);
1537 dc
->tb_flags
= tb
->flags
;
1538 dc
->delayed_branch
= (dc
->tb_flags
& TB_FLAGS_DFLAG
) != 0;
1539 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1541 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1543 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1545 if (max_insns
== 0) {
1546 max_insns
= CF_COUNT_MASK
;
1548 if (max_insns
> TCG_MAX_INSNS
) {
1549 max_insns
= TCG_MAX_INSNS
;
1552 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1553 && qemu_log_in_addr_range(pc_start
)) {
1555 qemu_log("----------------\n");
1556 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
1561 /* Allow the TCG optimizer to see that R0 == 0,
1562 when it's true, which is the common case. */
1563 if (dc
->tb_flags
& TB_FLAGS_R0_0
) {
1564 cpu_R
[0] = tcg_const_tl(0);
1570 tcg_gen_insn_start(dc
->pc
, (dc
->delayed_branch
? 1 : 0)
1571 | (num_insns
? 2 : 0));
1574 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1575 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1576 gen_exception(dc
, EXCP_DEBUG
);
1577 dc
->is_jmp
= DISAS_UPDATE
;
1578 /* The address covered by the breakpoint must be included in
1579 [tb->pc, tb->pc + tb->size) in order to for it to be
1580 properly cleared -- thus we increment the PC here so that
1581 the logic setting tb->size below does the right thing. */
1586 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1589 disas_openrisc_insn(dc
, cpu
);
1590 dc
->pc
= dc
->pc
+ 4;
1593 if (dc
->delayed_branch
) {
1594 dc
->delayed_branch
--;
1595 if (!dc
->delayed_branch
) {
1596 tcg_gen_mov_tl(cpu_pc
, jmp_pc
);
1597 tcg_gen_discard_tl(jmp_pc
);
1598 dc
->is_jmp
= DISAS_UPDATE
;
1602 } while (!dc
->is_jmp
1603 && !tcg_op_buf_full()
1604 && !cs
->singlestep_enabled
1606 && (dc
->pc
< next_page_start
)
1607 && num_insns
< max_insns
);
1609 if (tb
->cflags
& CF_LAST_IO
) {
1613 if ((dc
->tb_flags
& TB_FLAGS_DFLAG
? 1 : 0) != (dc
->delayed_branch
!= 0)) {
1614 tcg_gen_movi_i32(cpu_dflag
, dc
->delayed_branch
!= 0);
1617 tcg_gen_movi_tl(cpu_ppc
, dc
->pc
- 4);
1618 if (dc
->is_jmp
== DISAS_NEXT
) {
1619 dc
->is_jmp
= DISAS_UPDATE
;
1620 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1622 if (unlikely(cs
->singlestep_enabled
)) {
1623 gen_exception(dc
, EXCP_DEBUG
);
1625 switch (dc
->is_jmp
) {
1627 gen_goto_tb(dc
, 0, dc
->pc
);
1633 /* indicate that the hash table must be used
1634 to find the next TB */
1638 /* nothing more to generate */
1643 gen_tb_end(tb
, num_insns
);
1645 tb
->size
= dc
->pc
- pc_start
;
1646 tb
->icount
= num_insns
;
1648 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1649 && qemu_log_in_addr_range(pc_start
)) {
1650 log_target_disas(cs
, pc_start
, tb
->size
, 0);
1656 void openrisc_cpu_dump_state(CPUState
*cs
, FILE *f
,
1657 fprintf_function cpu_fprintf
,
1660 OpenRISCCPU
*cpu
= OPENRISC_CPU(cs
);
1661 CPUOpenRISCState
*env
= &cpu
->env
;
1664 cpu_fprintf(f
, "PC=%08x\n", env
->pc
);
1665 for (i
= 0; i
< 32; ++i
) {
1666 cpu_fprintf(f
, "R%02d=%08x%c", i
, cpu_get_gpr(env
, i
),
1667 (i
% 4) == 3 ? '\n' : ' ');
1671 void restore_state_to_opc(CPUOpenRISCState
*env
, TranslationBlock
*tb
,
1675 env
->dflag
= data
[1] & 1;
1677 env
->ppc
= env
->pc
- 4;