4 * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
5 * Feng Gao <gf91597@gmail.com>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "disas/disas.h"
25 #include "tcg/tcg-op.h"
27 #include "qemu/bitops.h"
28 #include "qemu/qemu-print.h"
29 #include "exec/cpu_ldst.h"
30 #include "exec/translator.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
34 #include "exec/gen-icount.h"
38 /* is_jmp field values */
39 #define DISAS_EXIT DISAS_TARGET_0 /* force exit to main loop */
40 #define DISAS_JUMP DISAS_TARGET_1 /* exit via jmp_pc/jmp_pc_imm */
42 typedef struct DisasContext
{
43 DisasContextBase base
;
46 uint32_t delayed_branch
;
50 /* If not -1, jmp_pc contains this value and so is a direct jump. */
51 target_ulong jmp_pc_imm
;
53 /* The temporary corresponding to register 0 for this compilation. */
55 /* The constant zero. */
59 static inline bool is_user(DisasContext
*dc
)
61 #ifdef CONFIG_USER_ONLY
64 return !(dc
->tb_flags
& TB_FLAGS_SM
);
68 /* Include the auto-generated decoder. */
69 #include "decode-insns.c.inc"
72 static TCGv cpu_regs
[32];
74 static TCGv jmp_pc
; /* l.jr/l.jalr temp pc */
76 static TCGv cpu_sr_f
; /* bf/bnf, F flag taken */
77 static TCGv cpu_sr_cy
; /* carry (unsigned overflow) */
78 static TCGv cpu_sr_ov
; /* signed overflow */
79 static TCGv cpu_lock_addr
;
80 static TCGv cpu_lock_value
;
81 static TCGv_i32 fpcsr
;
82 static TCGv_i64 cpu_mac
; /* MACHI:MACLO */
83 static TCGv_i32 cpu_dflag
;
85 void openrisc_translate_init(void)
87 static const char * const regnames
[] = {
88 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
89 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
90 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
91 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
95 cpu_sr
= tcg_global_mem_new(cpu_env
,
96 offsetof(CPUOpenRISCState
, sr
), "sr");
97 cpu_dflag
= tcg_global_mem_new_i32(cpu_env
,
98 offsetof(CPUOpenRISCState
, dflag
),
100 cpu_pc
= tcg_global_mem_new(cpu_env
,
101 offsetof(CPUOpenRISCState
, pc
), "pc");
102 cpu_ppc
= tcg_global_mem_new(cpu_env
,
103 offsetof(CPUOpenRISCState
, ppc
), "ppc");
104 jmp_pc
= tcg_global_mem_new(cpu_env
,
105 offsetof(CPUOpenRISCState
, jmp_pc
), "jmp_pc");
106 cpu_sr_f
= tcg_global_mem_new(cpu_env
,
107 offsetof(CPUOpenRISCState
, sr_f
), "sr_f");
108 cpu_sr_cy
= tcg_global_mem_new(cpu_env
,
109 offsetof(CPUOpenRISCState
, sr_cy
), "sr_cy");
110 cpu_sr_ov
= tcg_global_mem_new(cpu_env
,
111 offsetof(CPUOpenRISCState
, sr_ov
), "sr_ov");
112 cpu_lock_addr
= tcg_global_mem_new(cpu_env
,
113 offsetof(CPUOpenRISCState
, lock_addr
),
115 cpu_lock_value
= tcg_global_mem_new(cpu_env
,
116 offsetof(CPUOpenRISCState
, lock_value
),
118 fpcsr
= tcg_global_mem_new_i32(cpu_env
,
119 offsetof(CPUOpenRISCState
, fpcsr
),
121 cpu_mac
= tcg_global_mem_new_i64(cpu_env
,
122 offsetof(CPUOpenRISCState
, mac
),
124 for (i
= 0; i
< 32; i
++) {
125 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
126 offsetof(CPUOpenRISCState
,
132 static void gen_exception(DisasContext
*dc
, unsigned int excp
)
134 gen_helper_exception(cpu_env
, tcg_constant_i32(excp
));
137 static void gen_illegal_exception(DisasContext
*dc
)
139 tcg_gen_movi_tl(cpu_pc
, dc
->base
.pc_next
);
140 gen_exception(dc
, EXCP_ILLEGAL
);
141 dc
->base
.is_jmp
= DISAS_NORETURN
;
144 static bool check_v1_3(DisasContext
*dc
)
146 return dc
->avr
>= 0x01030000;
149 static bool check_of32s(DisasContext
*dc
)
151 return dc
->cpucfgr
& CPUCFGR_OF32S
;
154 static bool check_of64a32s(DisasContext
*dc
)
156 return dc
->cpucfgr
& CPUCFGR_OF64A32S
;
159 static TCGv
cpu_R(DisasContext
*dc
, int reg
)
164 return cpu_regs
[reg
];
169 * We're about to write to REG. On the off-chance that the user is
170 * writing to R0, re-instate the architectural register.
172 static void check_r0_write(DisasContext
*dc
, int reg
)
174 if (unlikely(reg
== 0)) {
175 dc
->R0
= cpu_regs
[0];
179 static void gen_ove_cy(DisasContext
*dc
)
181 if (dc
->tb_flags
& SR_OVE
) {
182 gen_helper_ove_cy(cpu_env
);
186 static void gen_ove_ov(DisasContext
*dc
)
188 if (dc
->tb_flags
& SR_OVE
) {
189 gen_helper_ove_ov(cpu_env
);
193 static void gen_ove_cyov(DisasContext
*dc
)
195 if (dc
->tb_flags
& SR_OVE
) {
196 gen_helper_ove_cyov(cpu_env
);
200 static void gen_add(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
202 TCGv t0
= tcg_temp_new();
203 TCGv res
= tcg_temp_new();
205 tcg_gen_add2_tl(res
, cpu_sr_cy
, srca
, dc
->zero
, srcb
, dc
->zero
);
206 tcg_gen_xor_tl(cpu_sr_ov
, srca
, srcb
);
207 tcg_gen_xor_tl(t0
, res
, srcb
);
208 tcg_gen_andc_tl(cpu_sr_ov
, t0
, cpu_sr_ov
);
211 tcg_gen_mov_tl(dest
, res
);
217 static void gen_addc(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
219 TCGv t0
= tcg_temp_new();
220 TCGv res
= tcg_temp_new();
222 tcg_gen_add2_tl(res
, cpu_sr_cy
, srca
, dc
->zero
, cpu_sr_cy
, dc
->zero
);
223 tcg_gen_add2_tl(res
, cpu_sr_cy
, res
, cpu_sr_cy
, srcb
, dc
->zero
);
224 tcg_gen_xor_tl(cpu_sr_ov
, srca
, srcb
);
225 tcg_gen_xor_tl(t0
, res
, srcb
);
226 tcg_gen_andc_tl(cpu_sr_ov
, t0
, cpu_sr_ov
);
229 tcg_gen_mov_tl(dest
, res
);
235 static void gen_sub(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
237 TCGv res
= tcg_temp_new();
239 tcg_gen_sub_tl(res
, srca
, srcb
);
240 tcg_gen_xor_tl(cpu_sr_cy
, srca
, srcb
);
241 tcg_gen_xor_tl(cpu_sr_ov
, res
, srcb
);
242 tcg_gen_and_tl(cpu_sr_ov
, cpu_sr_ov
, cpu_sr_cy
);
243 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_sr_cy
, srca
, srcb
);
245 tcg_gen_mov_tl(dest
, res
);
251 static void gen_mul(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
253 TCGv t0
= tcg_temp_new();
255 tcg_gen_muls2_tl(dest
, cpu_sr_ov
, srca
, srcb
);
256 tcg_gen_sari_tl(t0
, dest
, TARGET_LONG_BITS
- 1);
257 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_sr_ov
, cpu_sr_ov
, t0
);
260 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
264 static void gen_mulu(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
266 tcg_gen_muls2_tl(dest
, cpu_sr_cy
, srca
, srcb
);
267 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_sr_cy
, cpu_sr_cy
, 0);
272 static void gen_div(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
274 TCGv t0
= tcg_temp_new();
276 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_ov
, srcb
, 0);
277 /* The result of divide-by-zero is undefined.
278 Supress the host-side exception by dividing by 1. */
279 tcg_gen_or_tl(t0
, srcb
, cpu_sr_ov
);
280 tcg_gen_div_tl(dest
, srca
, t0
);
283 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
287 static void gen_divu(DisasContext
*dc
, TCGv dest
, TCGv srca
, TCGv srcb
)
289 TCGv t0
= tcg_temp_new();
291 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_cy
, srcb
, 0);
292 /* The result of divide-by-zero is undefined.
293 Supress the host-side exception by dividing by 1. */
294 tcg_gen_or_tl(t0
, srcb
, cpu_sr_cy
);
295 tcg_gen_divu_tl(dest
, srca
, t0
);
301 static void gen_muld(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
303 TCGv_i64 t1
= tcg_temp_new_i64();
304 TCGv_i64 t2
= tcg_temp_new_i64();
306 tcg_gen_ext_tl_i64(t1
, srca
);
307 tcg_gen_ext_tl_i64(t2
, srcb
);
308 if (TARGET_LONG_BITS
== 32) {
309 tcg_gen_mul_i64(cpu_mac
, t1
, t2
);
310 tcg_gen_movi_tl(cpu_sr_ov
, 0);
312 TCGv_i64 high
= tcg_temp_new_i64();
314 tcg_gen_muls2_i64(cpu_mac
, high
, t1
, t2
);
315 tcg_gen_sari_i64(t1
, cpu_mac
, 63);
316 tcg_gen_setcond_i64(TCG_COND_NE
, t1
, t1
, high
);
317 tcg_temp_free_i64(high
);
318 tcg_gen_trunc_i64_tl(cpu_sr_ov
, t1
);
319 tcg_gen_neg_tl(cpu_sr_ov
, cpu_sr_ov
);
323 tcg_temp_free_i64(t1
);
324 tcg_temp_free_i64(t2
);
327 static void gen_muldu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
329 TCGv_i64 t1
= tcg_temp_new_i64();
330 TCGv_i64 t2
= tcg_temp_new_i64();
332 tcg_gen_extu_tl_i64(t1
, srca
);
333 tcg_gen_extu_tl_i64(t2
, srcb
);
334 if (TARGET_LONG_BITS
== 32) {
335 tcg_gen_mul_i64(cpu_mac
, t1
, t2
);
336 tcg_gen_movi_tl(cpu_sr_cy
, 0);
338 TCGv_i64 high
= tcg_temp_new_i64();
340 tcg_gen_mulu2_i64(cpu_mac
, high
, t1
, t2
);
341 tcg_gen_setcondi_i64(TCG_COND_NE
, high
, high
, 0);
342 tcg_gen_trunc_i64_tl(cpu_sr_cy
, high
);
343 tcg_temp_free_i64(high
);
347 tcg_temp_free_i64(t1
);
348 tcg_temp_free_i64(t2
);
351 static void gen_mac(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
353 TCGv_i64 t1
= tcg_temp_new_i64();
354 TCGv_i64 t2
= tcg_temp_new_i64();
356 tcg_gen_ext_tl_i64(t1
, srca
);
357 tcg_gen_ext_tl_i64(t2
, srcb
);
358 tcg_gen_mul_i64(t1
, t1
, t2
);
360 /* Note that overflow is only computed during addition stage. */
361 tcg_gen_xor_i64(t2
, cpu_mac
, t1
);
362 tcg_gen_add_i64(cpu_mac
, cpu_mac
, t1
);
363 tcg_gen_xor_i64(t1
, t1
, cpu_mac
);
364 tcg_gen_andc_i64(t1
, t1
, t2
);
365 tcg_temp_free_i64(t2
);
367 #if TARGET_LONG_BITS == 32
368 tcg_gen_extrh_i64_i32(cpu_sr_ov
, t1
);
370 tcg_gen_mov_i64(cpu_sr_ov
, t1
);
372 tcg_temp_free_i64(t1
);
377 static void gen_macu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
379 TCGv_i64 t1
= tcg_temp_new_i64();
380 TCGv_i64 t2
= tcg_temp_new_i64();
382 tcg_gen_extu_tl_i64(t1
, srca
);
383 tcg_gen_extu_tl_i64(t2
, srcb
);
384 tcg_gen_mul_i64(t1
, t1
, t2
);
385 tcg_temp_free_i64(t2
);
387 /* Note that overflow is only computed during addition stage. */
388 tcg_gen_add_i64(cpu_mac
, cpu_mac
, t1
);
389 tcg_gen_setcond_i64(TCG_COND_LTU
, t1
, cpu_mac
, t1
);
390 tcg_gen_trunc_i64_tl(cpu_sr_cy
, t1
);
391 tcg_temp_free_i64(t1
);
396 static void gen_msb(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
398 TCGv_i64 t1
= tcg_temp_new_i64();
399 TCGv_i64 t2
= tcg_temp_new_i64();
401 tcg_gen_ext_tl_i64(t1
, srca
);
402 tcg_gen_ext_tl_i64(t2
, srcb
);
403 tcg_gen_mul_i64(t1
, t1
, t2
);
405 /* Note that overflow is only computed during subtraction stage. */
406 tcg_gen_xor_i64(t2
, cpu_mac
, t1
);
407 tcg_gen_sub_i64(cpu_mac
, cpu_mac
, t1
);
408 tcg_gen_xor_i64(t1
, t1
, cpu_mac
);
409 tcg_gen_and_i64(t1
, t1
, t2
);
410 tcg_temp_free_i64(t2
);
412 #if TARGET_LONG_BITS == 32
413 tcg_gen_extrh_i64_i32(cpu_sr_ov
, t1
);
415 tcg_gen_mov_i64(cpu_sr_ov
, t1
);
417 tcg_temp_free_i64(t1
);
422 static void gen_msbu(DisasContext
*dc
, TCGv srca
, TCGv srcb
)
424 TCGv_i64 t1
= tcg_temp_new_i64();
425 TCGv_i64 t2
= tcg_temp_new_i64();
427 tcg_gen_extu_tl_i64(t1
, srca
);
428 tcg_gen_extu_tl_i64(t2
, srcb
);
429 tcg_gen_mul_i64(t1
, t1
, t2
);
431 /* Note that overflow is only computed during subtraction stage. */
432 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, cpu_mac
, t1
);
433 tcg_gen_sub_i64(cpu_mac
, cpu_mac
, t1
);
434 tcg_gen_trunc_i64_tl(cpu_sr_cy
, t2
);
435 tcg_temp_free_i64(t2
);
436 tcg_temp_free_i64(t1
);
441 static bool trans_l_add(DisasContext
*dc
, arg_dab
*a
)
443 check_r0_write(dc
, a
->d
);
444 gen_add(dc
, cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
448 static bool trans_l_addc(DisasContext
*dc
, arg_dab
*a
)
450 check_r0_write(dc
, a
->d
);
451 gen_addc(dc
, cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
455 static bool trans_l_sub(DisasContext
*dc
, arg_dab
*a
)
457 check_r0_write(dc
, a
->d
);
458 gen_sub(dc
, cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
462 static bool trans_l_and(DisasContext
*dc
, arg_dab
*a
)
464 check_r0_write(dc
, a
->d
);
465 tcg_gen_and_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
469 static bool trans_l_or(DisasContext
*dc
, arg_dab
*a
)
471 check_r0_write(dc
, a
->d
);
472 tcg_gen_or_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
476 static bool trans_l_xor(DisasContext
*dc
, arg_dab
*a
)
478 check_r0_write(dc
, a
->d
);
479 tcg_gen_xor_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
483 static bool trans_l_sll(DisasContext
*dc
, arg_dab
*a
)
485 check_r0_write(dc
, a
->d
);
486 tcg_gen_shl_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
490 static bool trans_l_srl(DisasContext
*dc
, arg_dab
*a
)
492 check_r0_write(dc
, a
->d
);
493 tcg_gen_shr_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
497 static bool trans_l_sra(DisasContext
*dc
, arg_dab
*a
)
499 check_r0_write(dc
, a
->d
);
500 tcg_gen_sar_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
504 static bool trans_l_ror(DisasContext
*dc
, arg_dab
*a
)
506 check_r0_write(dc
, a
->d
);
507 tcg_gen_rotr_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
511 static bool trans_l_exths(DisasContext
*dc
, arg_da
*a
)
513 check_r0_write(dc
, a
->d
);
514 tcg_gen_ext16s_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
));
518 static bool trans_l_extbs(DisasContext
*dc
, arg_da
*a
)
520 check_r0_write(dc
, a
->d
);
521 tcg_gen_ext8s_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
));
525 static bool trans_l_exthz(DisasContext
*dc
, arg_da
*a
)
527 check_r0_write(dc
, a
->d
);
528 tcg_gen_ext16u_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
));
532 static bool trans_l_extbz(DisasContext
*dc
, arg_da
*a
)
534 check_r0_write(dc
, a
->d
);
535 tcg_gen_ext8u_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
));
539 static bool trans_l_cmov(DisasContext
*dc
, arg_dab
*a
)
541 check_r0_write(dc
, a
->d
);
542 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_R(dc
, a
->d
), cpu_sr_f
, dc
->zero
,
543 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
547 static bool trans_l_ff1(DisasContext
*dc
, arg_da
*a
)
549 check_r0_write(dc
, a
->d
);
550 tcg_gen_ctzi_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), -1);
551 tcg_gen_addi_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->d
), 1);
555 static bool trans_l_fl1(DisasContext
*dc
, arg_da
*a
)
557 check_r0_write(dc
, a
->d
);
558 tcg_gen_clzi_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), TARGET_LONG_BITS
);
559 tcg_gen_subfi_tl(cpu_R(dc
, a
->d
), TARGET_LONG_BITS
, cpu_R(dc
, a
->d
));
563 static bool trans_l_mul(DisasContext
*dc
, arg_dab
*a
)
565 check_r0_write(dc
, a
->d
);
566 gen_mul(dc
, cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
570 static bool trans_l_mulu(DisasContext
*dc
, arg_dab
*a
)
572 check_r0_write(dc
, a
->d
);
573 gen_mulu(dc
, cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
577 static bool trans_l_div(DisasContext
*dc
, arg_dab
*a
)
579 check_r0_write(dc
, a
->d
);
580 gen_div(dc
, cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
584 static bool trans_l_divu(DisasContext
*dc
, arg_dab
*a
)
586 check_r0_write(dc
, a
->d
);
587 gen_divu(dc
, cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
591 static bool trans_l_muld(DisasContext
*dc
, arg_ab
*a
)
593 gen_muld(dc
, cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
597 static bool trans_l_muldu(DisasContext
*dc
, arg_ab
*a
)
599 gen_muldu(dc
, cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
603 static bool trans_l_j(DisasContext
*dc
, arg_l_j
*a
)
605 target_ulong tmp_pc
= dc
->base
.pc_next
+ a
->n
* 4;
607 tcg_gen_movi_tl(jmp_pc
, tmp_pc
);
608 dc
->jmp_pc_imm
= tmp_pc
;
609 dc
->delayed_branch
= 2;
613 static bool trans_l_jal(DisasContext
*dc
, arg_l_jal
*a
)
615 target_ulong tmp_pc
= dc
->base
.pc_next
+ a
->n
* 4;
616 target_ulong ret_pc
= dc
->base
.pc_next
+ 8;
618 tcg_gen_movi_tl(cpu_regs
[9], ret_pc
);
619 /* Optimize jal being used to load the PC for PIC. */
620 if (tmp_pc
!= ret_pc
) {
621 tcg_gen_movi_tl(jmp_pc
, tmp_pc
);
622 dc
->jmp_pc_imm
= tmp_pc
;
623 dc
->delayed_branch
= 2;
628 static void do_bf(DisasContext
*dc
, arg_l_bf
*a
, TCGCond cond
)
630 target_ulong tmp_pc
= dc
->base
.pc_next
+ a
->n
* 4;
631 TCGv t_next
= tcg_constant_tl(dc
->base
.pc_next
+ 8);
632 TCGv t_true
= tcg_constant_tl(tmp_pc
);
634 tcg_gen_movcond_tl(cond
, jmp_pc
, cpu_sr_f
, dc
->zero
, t_true
, t_next
);
635 dc
->delayed_branch
= 2;
638 static bool trans_l_bf(DisasContext
*dc
, arg_l_bf
*a
)
640 do_bf(dc
, a
, TCG_COND_NE
);
644 static bool trans_l_bnf(DisasContext
*dc
, arg_l_bf
*a
)
646 do_bf(dc
, a
, TCG_COND_EQ
);
650 static bool trans_l_jr(DisasContext
*dc
, arg_l_jr
*a
)
652 tcg_gen_mov_tl(jmp_pc
, cpu_R(dc
, a
->b
));
653 dc
->delayed_branch
= 2;
657 static bool trans_l_jalr(DisasContext
*dc
, arg_l_jalr
*a
)
659 tcg_gen_mov_tl(jmp_pc
, cpu_R(dc
, a
->b
));
660 tcg_gen_movi_tl(cpu_regs
[9], dc
->base
.pc_next
+ 8);
661 dc
->delayed_branch
= 2;
665 static bool trans_l_lwa(DisasContext
*dc
, arg_load
*a
)
669 check_r0_write(dc
, a
->d
);
671 tcg_gen_addi_tl(ea
, cpu_R(dc
, a
->a
), a
->i
);
672 tcg_gen_qemu_ld_tl(cpu_R(dc
, a
->d
), ea
, dc
->mem_idx
, MO_TEUL
);
673 tcg_gen_mov_tl(cpu_lock_addr
, ea
);
674 tcg_gen_mov_tl(cpu_lock_value
, cpu_R(dc
, a
->d
));
679 static void do_load(DisasContext
*dc
, arg_load
*a
, MemOp mop
)
683 check_r0_write(dc
, a
->d
);
685 tcg_gen_addi_tl(ea
, cpu_R(dc
, a
->a
), a
->i
);
686 tcg_gen_qemu_ld_tl(cpu_R(dc
, a
->d
), ea
, dc
->mem_idx
, mop
);
690 static bool trans_l_lwz(DisasContext
*dc
, arg_load
*a
)
692 do_load(dc
, a
, MO_TEUL
);
696 static bool trans_l_lws(DisasContext
*dc
, arg_load
*a
)
698 do_load(dc
, a
, MO_TESL
);
702 static bool trans_l_lbz(DisasContext
*dc
, arg_load
*a
)
704 do_load(dc
, a
, MO_UB
);
708 static bool trans_l_lbs(DisasContext
*dc
, arg_load
*a
)
710 do_load(dc
, a
, MO_SB
);
714 static bool trans_l_lhz(DisasContext
*dc
, arg_load
*a
)
716 do_load(dc
, a
, MO_TEUW
);
720 static bool trans_l_lhs(DisasContext
*dc
, arg_load
*a
)
722 do_load(dc
, a
, MO_TESW
);
726 static bool trans_l_swa(DisasContext
*dc
, arg_store
*a
)
729 TCGLabel
*lab_fail
, *lab_done
;
732 tcg_gen_addi_tl(ea
, cpu_R(dc
, a
->a
), a
->i
);
734 lab_fail
= gen_new_label();
735 lab_done
= gen_new_label();
736 tcg_gen_brcond_tl(TCG_COND_NE
, ea
, cpu_lock_addr
, lab_fail
);
739 val
= tcg_temp_new();
740 tcg_gen_atomic_cmpxchg_tl(val
, cpu_lock_addr
, cpu_lock_value
,
741 cpu_R(dc
, a
->b
), dc
->mem_idx
, MO_TEUL
);
742 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_sr_f
, val
, cpu_lock_value
);
745 tcg_gen_br(lab_done
);
747 gen_set_label(lab_fail
);
748 tcg_gen_movi_tl(cpu_sr_f
, 0);
750 gen_set_label(lab_done
);
751 tcg_gen_movi_tl(cpu_lock_addr
, -1);
755 static void do_store(DisasContext
*dc
, arg_store
*a
, MemOp mop
)
757 TCGv t0
= tcg_temp_new();
758 tcg_gen_addi_tl(t0
, cpu_R(dc
, a
->a
), a
->i
);
759 tcg_gen_qemu_st_tl(cpu_R(dc
, a
->b
), t0
, dc
->mem_idx
, mop
);
763 static bool trans_l_sw(DisasContext
*dc
, arg_store
*a
)
765 do_store(dc
, a
, MO_TEUL
);
769 static bool trans_l_sb(DisasContext
*dc
, arg_store
*a
)
771 do_store(dc
, a
, MO_UB
);
775 static bool trans_l_sh(DisasContext
*dc
, arg_store
*a
)
777 do_store(dc
, a
, MO_TEUW
);
781 static bool trans_l_nop(DisasContext
*dc
, arg_l_nop
*a
)
786 static bool trans_l_adrp(DisasContext
*dc
, arg_l_adrp
*a
)
788 if (!check_v1_3(dc
)) {
791 check_r0_write(dc
, a
->d
);
793 tcg_gen_movi_i32(cpu_R(dc
, a
->d
),
794 (dc
->base
.pc_next
& TARGET_PAGE_MASK
) +
795 ((target_long
)a
->i
<< TARGET_PAGE_BITS
));
799 static bool trans_l_addi(DisasContext
*dc
, arg_rri
*a
)
801 check_r0_write(dc
, a
->d
);
802 gen_add(dc
, cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), tcg_constant_tl(a
->i
));
806 static bool trans_l_addic(DisasContext
*dc
, arg_rri
*a
)
808 check_r0_write(dc
, a
->d
);
809 gen_addc(dc
, cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), tcg_constant_tl(a
->i
));
813 static bool trans_l_muli(DisasContext
*dc
, arg_rri
*a
)
815 check_r0_write(dc
, a
->d
);
816 gen_mul(dc
, cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), tcg_constant_tl(a
->i
));
820 static bool trans_l_maci(DisasContext
*dc
, arg_l_maci
*a
)
822 gen_mac(dc
, cpu_R(dc
, a
->a
), tcg_constant_tl(a
->i
));
826 static bool trans_l_andi(DisasContext
*dc
, arg_rrk
*a
)
828 check_r0_write(dc
, a
->d
);
829 tcg_gen_andi_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), a
->k
);
833 static bool trans_l_ori(DisasContext
*dc
, arg_rrk
*a
)
835 check_r0_write(dc
, a
->d
);
836 tcg_gen_ori_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), a
->k
);
840 static bool trans_l_xori(DisasContext
*dc
, arg_rri
*a
)
842 check_r0_write(dc
, a
->d
);
843 tcg_gen_xori_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
), a
->i
);
847 static bool trans_l_mfspr(DisasContext
*dc
, arg_l_mfspr
*a
)
849 check_r0_write(dc
, a
->d
);
852 gen_illegal_exception(dc
);
854 TCGv spr
= tcg_temp_new();
856 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
858 if (dc
->delayed_branch
) {
859 tcg_gen_mov_tl(cpu_pc
, jmp_pc
);
860 tcg_gen_discard_tl(jmp_pc
);
862 tcg_gen_movi_tl(cpu_pc
, dc
->base
.pc_next
+ 4);
864 dc
->base
.is_jmp
= DISAS_EXIT
;
867 tcg_gen_ori_tl(spr
, cpu_R(dc
, a
->a
), a
->k
);
868 gen_helper_mfspr(cpu_R(dc
, a
->d
), cpu_env
, cpu_R(dc
, a
->d
), spr
);
874 static bool trans_l_mtspr(DisasContext
*dc
, arg_l_mtspr
*a
)
877 gen_illegal_exception(dc
);
881 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
884 /* For SR, we will need to exit the TB to recognize the new
885 * exception state. For NPC, in theory this counts as a branch
886 * (although the SPR only exists for use by an ICE). Save all
887 * of the cpu state first, allowing it to be overwritten.
889 if (dc
->delayed_branch
) {
890 tcg_gen_mov_tl(cpu_pc
, jmp_pc
);
891 tcg_gen_discard_tl(jmp_pc
);
893 tcg_gen_movi_tl(cpu_pc
, dc
->base
.pc_next
+ 4);
895 dc
->base
.is_jmp
= DISAS_EXIT
;
897 spr
= tcg_temp_new();
898 tcg_gen_ori_tl(spr
, cpu_R(dc
, a
->a
), a
->k
);
899 gen_helper_mtspr(cpu_env
, spr
, cpu_R(dc
, a
->b
));
905 static bool trans_l_mac(DisasContext
*dc
, arg_ab
*a
)
907 gen_mac(dc
, cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
911 static bool trans_l_msb(DisasContext
*dc
, arg_ab
*a
)
913 gen_msb(dc
, cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
917 static bool trans_l_macu(DisasContext
*dc
, arg_ab
*a
)
919 gen_macu(dc
, cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
923 static bool trans_l_msbu(DisasContext
*dc
, arg_ab
*a
)
925 gen_msbu(dc
, cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
929 static bool trans_l_slli(DisasContext
*dc
, arg_dal
*a
)
931 check_r0_write(dc
, a
->d
);
932 tcg_gen_shli_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
),
933 a
->l
& (TARGET_LONG_BITS
- 1));
937 static bool trans_l_srli(DisasContext
*dc
, arg_dal
*a
)
939 check_r0_write(dc
, a
->d
);
940 tcg_gen_shri_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
),
941 a
->l
& (TARGET_LONG_BITS
- 1));
945 static bool trans_l_srai(DisasContext
*dc
, arg_dal
*a
)
947 check_r0_write(dc
, a
->d
);
948 tcg_gen_sari_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
),
949 a
->l
& (TARGET_LONG_BITS
- 1));
953 static bool trans_l_rori(DisasContext
*dc
, arg_dal
*a
)
955 check_r0_write(dc
, a
->d
);
956 tcg_gen_rotri_tl(cpu_R(dc
, a
->d
), cpu_R(dc
, a
->a
),
957 a
->l
& (TARGET_LONG_BITS
- 1));
961 static bool trans_l_movhi(DisasContext
*dc
, arg_l_movhi
*a
)
963 check_r0_write(dc
, a
->d
);
964 tcg_gen_movi_tl(cpu_R(dc
, a
->d
), a
->k
<< 16);
968 static bool trans_l_macrc(DisasContext
*dc
, arg_l_macrc
*a
)
970 check_r0_write(dc
, a
->d
);
971 tcg_gen_trunc_i64_tl(cpu_R(dc
, a
->d
), cpu_mac
);
972 tcg_gen_movi_i64(cpu_mac
, 0);
976 static bool trans_l_sfeq(DisasContext
*dc
, arg_ab
*a
)
978 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_sr_f
,
979 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
983 static bool trans_l_sfne(DisasContext
*dc
, arg_ab
*a
)
985 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_sr_f
,
986 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
990 static bool trans_l_sfgtu(DisasContext
*dc
, arg_ab
*a
)
992 tcg_gen_setcond_tl(TCG_COND_GTU
, cpu_sr_f
,
993 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
997 static bool trans_l_sfgeu(DisasContext
*dc
, arg_ab
*a
)
999 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_sr_f
,
1000 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
1004 static bool trans_l_sfltu(DisasContext
*dc
, arg_ab
*a
)
1006 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_sr_f
,
1007 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
1011 static bool trans_l_sfleu(DisasContext
*dc
, arg_ab
*a
)
1013 tcg_gen_setcond_tl(TCG_COND_LEU
, cpu_sr_f
,
1014 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
1018 static bool trans_l_sfgts(DisasContext
*dc
, arg_ab
*a
)
1020 tcg_gen_setcond_tl(TCG_COND_GT
, cpu_sr_f
,
1021 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
1025 static bool trans_l_sfges(DisasContext
*dc
, arg_ab
*a
)
1027 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_sr_f
,
1028 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
1032 static bool trans_l_sflts(DisasContext
*dc
, arg_ab
*a
)
1034 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_sr_f
,
1035 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
1039 static bool trans_l_sfles(DisasContext
*dc
, arg_ab
*a
)
1041 tcg_gen_setcond_tl(TCG_COND_LE
,
1042 cpu_sr_f
, cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
1046 static bool trans_l_sfeqi(DisasContext
*dc
, arg_ai
*a
)
1048 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_sr_f
, cpu_R(dc
, a
->a
), a
->i
);
1052 static bool trans_l_sfnei(DisasContext
*dc
, arg_ai
*a
)
1054 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_sr_f
, cpu_R(dc
, a
->a
), a
->i
);
1058 static bool trans_l_sfgtui(DisasContext
*dc
, arg_ai
*a
)
1060 tcg_gen_setcondi_tl(TCG_COND_GTU
, cpu_sr_f
, cpu_R(dc
, a
->a
), a
->i
);
1064 static bool trans_l_sfgeui(DisasContext
*dc
, arg_ai
*a
)
1066 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_sr_f
, cpu_R(dc
, a
->a
), a
->i
);
1070 static bool trans_l_sfltui(DisasContext
*dc
, arg_ai
*a
)
1072 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_sr_f
, cpu_R(dc
, a
->a
), a
->i
);
1076 static bool trans_l_sfleui(DisasContext
*dc
, arg_ai
*a
)
1078 tcg_gen_setcondi_tl(TCG_COND_LEU
, cpu_sr_f
, cpu_R(dc
, a
->a
), a
->i
);
1082 static bool trans_l_sfgtsi(DisasContext
*dc
, arg_ai
*a
)
1084 tcg_gen_setcondi_tl(TCG_COND_GT
, cpu_sr_f
, cpu_R(dc
, a
->a
), a
->i
);
1088 static bool trans_l_sfgesi(DisasContext
*dc
, arg_ai
*a
)
1090 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_sr_f
, cpu_R(dc
, a
->a
), a
->i
);
1094 static bool trans_l_sfltsi(DisasContext
*dc
, arg_ai
*a
)
1096 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_sr_f
, cpu_R(dc
, a
->a
), a
->i
);
1100 static bool trans_l_sflesi(DisasContext
*dc
, arg_ai
*a
)
1102 tcg_gen_setcondi_tl(TCG_COND_LE
, cpu_sr_f
, cpu_R(dc
, a
->a
), a
->i
);
1106 static bool trans_l_sys(DisasContext
*dc
, arg_l_sys
*a
)
1108 tcg_gen_movi_tl(cpu_pc
, dc
->base
.pc_next
);
1109 gen_exception(dc
, EXCP_SYSCALL
);
1110 dc
->base
.is_jmp
= DISAS_NORETURN
;
1114 static bool trans_l_trap(DisasContext
*dc
, arg_l_trap
*a
)
1116 tcg_gen_movi_tl(cpu_pc
, dc
->base
.pc_next
);
1117 gen_exception(dc
, EXCP_TRAP
);
1118 dc
->base
.is_jmp
= DISAS_NORETURN
;
1122 static bool trans_l_msync(DisasContext
*dc
, arg_l_msync
*a
)
1124 tcg_gen_mb(TCG_MO_ALL
);
1128 static bool trans_l_psync(DisasContext
*dc
, arg_l_psync
*a
)
1133 static bool trans_l_csync(DisasContext
*dc
, arg_l_csync
*a
)
1138 static bool trans_l_rfe(DisasContext
*dc
, arg_l_rfe
*a
)
1141 gen_illegal_exception(dc
);
1143 gen_helper_rfe(cpu_env
);
1144 dc
->base
.is_jmp
= DISAS_EXIT
;
1149 static bool do_fp2(DisasContext
*dc
, arg_da
*a
,
1150 void (*fn
)(TCGv
, TCGv_env
, TCGv
))
1152 if (!check_of32s(dc
)) {
1155 check_r0_write(dc
, a
->d
);
1156 fn(cpu_R(dc
, a
->d
), cpu_env
, cpu_R(dc
, a
->a
));
1157 gen_helper_update_fpcsr(cpu_env
);
1161 static bool do_fp3(DisasContext
*dc
, arg_dab
*a
,
1162 void (*fn
)(TCGv
, TCGv_env
, TCGv
, TCGv
))
1164 if (!check_of32s(dc
)) {
1167 check_r0_write(dc
, a
->d
);
1168 fn(cpu_R(dc
, a
->d
), cpu_env
, cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
1169 gen_helper_update_fpcsr(cpu_env
);
1173 static bool do_fpcmp(DisasContext
*dc
, arg_ab
*a
,
1174 void (*fn
)(TCGv
, TCGv_env
, TCGv
, TCGv
),
1175 bool inv
, bool swap
)
1177 if (!check_of32s(dc
)) {
1181 fn(cpu_sr_f
, cpu_env
, cpu_R(dc
, a
->b
), cpu_R(dc
, a
->a
));
1183 fn(cpu_sr_f
, cpu_env
, cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
1186 tcg_gen_xori_tl(cpu_sr_f
, cpu_sr_f
, 1);
1188 gen_helper_update_fpcsr(cpu_env
);
1192 static bool trans_lf_add_s(DisasContext
*dc
, arg_dab
*a
)
1194 return do_fp3(dc
, a
, gen_helper_float_add_s
);
1197 static bool trans_lf_sub_s(DisasContext
*dc
, arg_dab
*a
)
1199 return do_fp3(dc
, a
, gen_helper_float_sub_s
);
1202 static bool trans_lf_mul_s(DisasContext
*dc
, arg_dab
*a
)
1204 return do_fp3(dc
, a
, gen_helper_float_mul_s
);
1207 static bool trans_lf_div_s(DisasContext
*dc
, arg_dab
*a
)
1209 return do_fp3(dc
, a
, gen_helper_float_div_s
);
1212 static bool trans_lf_rem_s(DisasContext
*dc
, arg_dab
*a
)
1214 return do_fp3(dc
, a
, gen_helper_float_rem_s
);
1218 static bool trans_lf_itof_s(DisasContext
*dc
, arg_da
*a
)
1220 return do_fp2(dc
, a
, gen_helper_itofs
);
1223 static bool trans_lf_ftoi_s(DisasContext
*dc
, arg_da
*a
)
1225 return do_fp2(dc
, a
, gen_helper_ftois
);
1228 static bool trans_lf_madd_s(DisasContext
*dc
, arg_dab
*a
)
1230 if (!check_of32s(dc
)) {
1233 check_r0_write(dc
, a
->d
);
1234 gen_helper_float_madd_s(cpu_R(dc
, a
->d
), cpu_env
, cpu_R(dc
, a
->d
),
1235 cpu_R(dc
, a
->a
), cpu_R(dc
, a
->b
));
1236 gen_helper_update_fpcsr(cpu_env
);
1240 static bool trans_lf_sfeq_s(DisasContext
*dc
, arg_ab
*a
)
1242 return do_fpcmp(dc
, a
, gen_helper_float_eq_s
, false, false);
1245 static bool trans_lf_sfne_s(DisasContext
*dc
, arg_ab
*a
)
1247 return do_fpcmp(dc
, a
, gen_helper_float_eq_s
, true, false);
1250 static bool trans_lf_sfgt_s(DisasContext
*dc
, arg_ab
*a
)
1252 return do_fpcmp(dc
, a
, gen_helper_float_lt_s
, false, true);
1255 static bool trans_lf_sfge_s(DisasContext
*dc
, arg_ab
*a
)
1257 return do_fpcmp(dc
, a
, gen_helper_float_le_s
, false, true);
1260 static bool trans_lf_sflt_s(DisasContext
*dc
, arg_ab
*a
)
1262 return do_fpcmp(dc
, a
, gen_helper_float_lt_s
, false, false);
1265 static bool trans_lf_sfle_s(DisasContext
*dc
, arg_ab
*a
)
1267 return do_fpcmp(dc
, a
, gen_helper_float_le_s
, false, false);
1270 static bool trans_lf_sfueq_s(DisasContext
*dc
, arg_ab
*a
)
1272 if (!check_v1_3(dc
)) {
1275 return do_fpcmp(dc
, a
, gen_helper_float_ueq_s
, false, false);
1278 static bool trans_lf_sfult_s(DisasContext
*dc
, arg_ab
*a
)
1280 if (!check_v1_3(dc
)) {
1283 return do_fpcmp(dc
, a
, gen_helper_float_ult_s
, false, false);
1286 static bool trans_lf_sfugt_s(DisasContext
*dc
, arg_ab
*a
)
1288 if (!check_v1_3(dc
)) {
1291 return do_fpcmp(dc
, a
, gen_helper_float_ult_s
, false, true);
1294 static bool trans_lf_sfule_s(DisasContext
*dc
, arg_ab
*a
)
1296 if (!check_v1_3(dc
)) {
1299 return do_fpcmp(dc
, a
, gen_helper_float_ule_s
, false, false);
1302 static bool trans_lf_sfuge_s(DisasContext
*dc
, arg_ab
*a
)
1304 if (!check_v1_3(dc
)) {
1307 return do_fpcmp(dc
, a
, gen_helper_float_ule_s
, false, true);
1310 static bool trans_lf_sfun_s(DisasContext
*dc
, arg_ab
*a
)
1312 if (!check_v1_3(dc
)) {
1315 return do_fpcmp(dc
, a
, gen_helper_float_un_s
, false, false);
1318 static bool check_pair(DisasContext
*dc
, int r
, int p
)
1320 return r
+ 1 + p
< 32;
1323 static void load_pair(DisasContext
*dc
, TCGv_i64 t
, int r
, int p
)
1325 tcg_gen_concat_i32_i64(t
, cpu_R(dc
, r
+ 1 + p
), cpu_R(dc
, r
));
1328 static void save_pair(DisasContext
*dc
, TCGv_i64 t
, int r
, int p
)
1330 tcg_gen_extr_i64_i32(cpu_R(dc
, r
+ 1 + p
), cpu_R(dc
, r
), t
);
1333 static bool do_dp3(DisasContext
*dc
, arg_dab_pair
*a
,
1334 void (*fn
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
1338 if (!check_of64a32s(dc
) ||
1339 !check_pair(dc
, a
->a
, a
->ap
) ||
1340 !check_pair(dc
, a
->b
, a
->bp
) ||
1341 !check_pair(dc
, a
->d
, a
->dp
)) {
1344 check_r0_write(dc
, a
->d
);
1346 t0
= tcg_temp_new_i64();
1347 t1
= tcg_temp_new_i64();
1348 load_pair(dc
, t0
, a
->a
, a
->ap
);
1349 load_pair(dc
, t1
, a
->b
, a
->bp
);
1350 fn(t0
, cpu_env
, t0
, t1
);
1351 save_pair(dc
, t0
, a
->d
, a
->dp
);
1352 tcg_temp_free_i64(t0
);
1353 tcg_temp_free_i64(t1
);
1355 gen_helper_update_fpcsr(cpu_env
);
1359 static bool do_dp2(DisasContext
*dc
, arg_da_pair
*a
,
1360 void (*fn
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
1364 if (!check_of64a32s(dc
) ||
1365 !check_pair(dc
, a
->a
, a
->ap
) ||
1366 !check_pair(dc
, a
->d
, a
->dp
)) {
1369 check_r0_write(dc
, a
->d
);
1371 t0
= tcg_temp_new_i64();
1372 load_pair(dc
, t0
, a
->a
, a
->ap
);
1373 fn(t0
, cpu_env
, t0
);
1374 save_pair(dc
, t0
, a
->d
, a
->dp
);
1375 tcg_temp_free_i64(t0
);
1377 gen_helper_update_fpcsr(cpu_env
);
1381 static bool do_dpcmp(DisasContext
*dc
, arg_ab_pair
*a
,
1382 void (*fn
)(TCGv
, TCGv_env
, TCGv_i64
, TCGv_i64
),
1383 bool inv
, bool swap
)
1387 if (!check_of64a32s(dc
) ||
1388 !check_pair(dc
, a
->a
, a
->ap
) ||
1389 !check_pair(dc
, a
->b
, a
->bp
)) {
1393 t0
= tcg_temp_new_i64();
1394 t1
= tcg_temp_new_i64();
1395 load_pair(dc
, t0
, a
->a
, a
->ap
);
1396 load_pair(dc
, t1
, a
->b
, a
->bp
);
1398 fn(cpu_sr_f
, cpu_env
, t1
, t0
);
1400 fn(cpu_sr_f
, cpu_env
, t0
, t1
);
1402 tcg_temp_free_i64(t0
);
1403 tcg_temp_free_i64(t1
);
1406 tcg_gen_xori_tl(cpu_sr_f
, cpu_sr_f
, 1);
1408 gen_helper_update_fpcsr(cpu_env
);
1412 static bool trans_lf_add_d(DisasContext
*dc
, arg_dab_pair
*a
)
1414 return do_dp3(dc
, a
, gen_helper_float_add_d
);
1417 static bool trans_lf_sub_d(DisasContext
*dc
, arg_dab_pair
*a
)
1419 return do_dp3(dc
, a
, gen_helper_float_sub_d
);
1422 static bool trans_lf_mul_d(DisasContext
*dc
, arg_dab_pair
*a
)
1424 return do_dp3(dc
, a
, gen_helper_float_mul_d
);
1427 static bool trans_lf_div_d(DisasContext
*dc
, arg_dab_pair
*a
)
1429 return do_dp3(dc
, a
, gen_helper_float_div_d
);
1432 static bool trans_lf_rem_d(DisasContext
*dc
, arg_dab_pair
*a
)
1434 return do_dp3(dc
, a
, gen_helper_float_rem_d
);
1437 static bool trans_lf_itof_d(DisasContext
*dc
, arg_da_pair
*a
)
1439 return do_dp2(dc
, a
, gen_helper_itofd
);
1442 static bool trans_lf_ftoi_d(DisasContext
*dc
, arg_da_pair
*a
)
1444 return do_dp2(dc
, a
, gen_helper_ftoid
);
1447 static bool trans_lf_stod_d(DisasContext
*dc
, arg_lf_stod_d
*a
)
1451 if (!check_of64a32s(dc
) ||
1452 !check_pair(dc
, a
->d
, a
->dp
)) {
1455 check_r0_write(dc
, a
->d
);
1457 t0
= tcg_temp_new_i64();
1458 gen_helper_stod(t0
, cpu_env
, cpu_R(dc
, a
->a
));
1459 save_pair(dc
, t0
, a
->d
, a
->dp
);
1460 tcg_temp_free_i64(t0
);
1462 gen_helper_update_fpcsr(cpu_env
);
1466 static bool trans_lf_dtos_d(DisasContext
*dc
, arg_lf_dtos_d
*a
)
1470 if (!check_of64a32s(dc
) ||
1471 !check_pair(dc
, a
->a
, a
->ap
)) {
1474 check_r0_write(dc
, a
->d
);
1476 t0
= tcg_temp_new_i64();
1477 load_pair(dc
, t0
, a
->a
, a
->ap
);
1478 gen_helper_dtos(cpu_R(dc
, a
->d
), cpu_env
, t0
);
1479 tcg_temp_free_i64(t0
);
1481 gen_helper_update_fpcsr(cpu_env
);
1485 static bool trans_lf_madd_d(DisasContext
*dc
, arg_dab_pair
*a
)
1487 TCGv_i64 t0
, t1
, t2
;
1489 if (!check_of64a32s(dc
) ||
1490 !check_pair(dc
, a
->a
, a
->ap
) ||
1491 !check_pair(dc
, a
->b
, a
->bp
) ||
1492 !check_pair(dc
, a
->d
, a
->dp
)) {
1495 check_r0_write(dc
, a
->d
);
1497 t0
= tcg_temp_new_i64();
1498 t1
= tcg_temp_new_i64();
1499 t2
= tcg_temp_new_i64();
1500 load_pair(dc
, t0
, a
->d
, a
->dp
);
1501 load_pair(dc
, t1
, a
->a
, a
->ap
);
1502 load_pair(dc
, t2
, a
->b
, a
->bp
);
1503 gen_helper_float_madd_d(t0
, cpu_env
, t0
, t1
, t2
);
1504 save_pair(dc
, t0
, a
->d
, a
->dp
);
1505 tcg_temp_free_i64(t0
);
1506 tcg_temp_free_i64(t1
);
1507 tcg_temp_free_i64(t2
);
1509 gen_helper_update_fpcsr(cpu_env
);
1513 static bool trans_lf_sfeq_d(DisasContext
*dc
, arg_ab_pair
*a
)
1515 return do_dpcmp(dc
, a
, gen_helper_float_eq_d
, false, false);
1518 static bool trans_lf_sfne_d(DisasContext
*dc
, arg_ab_pair
*a
)
1520 return do_dpcmp(dc
, a
, gen_helper_float_eq_d
, true, false);
1523 static bool trans_lf_sfgt_d(DisasContext
*dc
, arg_ab_pair
*a
)
1525 return do_dpcmp(dc
, a
, gen_helper_float_lt_d
, false, true);
1528 static bool trans_lf_sfge_d(DisasContext
*dc
, arg_ab_pair
*a
)
1530 return do_dpcmp(dc
, a
, gen_helper_float_le_d
, false, true);
1533 static bool trans_lf_sflt_d(DisasContext
*dc
, arg_ab_pair
*a
)
1535 return do_dpcmp(dc
, a
, gen_helper_float_lt_d
, false, false);
1538 static bool trans_lf_sfle_d(DisasContext
*dc
, arg_ab_pair
*a
)
1540 return do_dpcmp(dc
, a
, gen_helper_float_le_d
, false, false);
1543 static bool trans_lf_sfueq_d(DisasContext
*dc
, arg_ab_pair
*a
)
1545 return do_dpcmp(dc
, a
, gen_helper_float_ueq_d
, false, false);
1548 static bool trans_lf_sfule_d(DisasContext
*dc
, arg_ab_pair
*a
)
1550 return do_dpcmp(dc
, a
, gen_helper_float_ule_d
, false, false);
1553 static bool trans_lf_sfuge_d(DisasContext
*dc
, arg_ab_pair
*a
)
1555 return do_dpcmp(dc
, a
, gen_helper_float_ule_d
, false, true);
1558 static bool trans_lf_sfult_d(DisasContext
*dc
, arg_ab_pair
*a
)
1560 return do_dpcmp(dc
, a
, gen_helper_float_ult_d
, false, false);
1563 static bool trans_lf_sfugt_d(DisasContext
*dc
, arg_ab_pair
*a
)
1565 return do_dpcmp(dc
, a
, gen_helper_float_ult_d
, false, true);
1568 static bool trans_lf_sfun_d(DisasContext
*dc
, arg_ab_pair
*a
)
1570 return do_dpcmp(dc
, a
, gen_helper_float_un_d
, false, false);
1573 static void openrisc_tr_init_disas_context(DisasContextBase
*dcb
, CPUState
*cs
)
1575 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1576 CPUOpenRISCState
*env
= cs
->env_ptr
;
1579 dc
->mem_idx
= cpu_mmu_index(env
, false);
1580 dc
->tb_flags
= dc
->base
.tb
->flags
;
1581 dc
->delayed_branch
= (dc
->tb_flags
& TB_FLAGS_DFLAG
) != 0;
1582 dc
->cpucfgr
= env
->cpucfgr
;
1584 dc
->jmp_pc_imm
= -1;
1586 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
1587 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
1590 static void openrisc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
1592 DisasContext
*dc
= container_of(db
, DisasContext
, base
);
1594 /* Allow the TCG optimizer to see that R0 == 0,
1595 when it's true, which is the common case. */
1596 dc
->zero
= tcg_constant_tl(0);
1597 if (dc
->tb_flags
& TB_FLAGS_R0_0
) {
1600 dc
->R0
= cpu_regs
[0];
1604 static void openrisc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
1606 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
1608 tcg_gen_insn_start(dc
->base
.pc_next
, (dc
->delayed_branch
? 1 : 0)
1609 | (dc
->base
.num_insns
> 1 ? 2 : 0));
1612 static void openrisc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
1614 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
1615 OpenRISCCPU
*cpu
= OPENRISC_CPU(cs
);
1616 uint32_t insn
= translator_ldl(&cpu
->env
, &dc
->base
, dc
->base
.pc_next
);
1618 if (!decode(dc
, insn
)) {
1619 gen_illegal_exception(dc
);
1621 dc
->base
.pc_next
+= 4;
1623 /* When exiting the delay slot normally, exit via jmp_pc.
1624 * For DISAS_NORETURN, we have raised an exception and already exited.
1625 * For DISAS_EXIT, we found l.rfe in a delay slot. There's nothing
1626 * in the manual saying this is illegal, but it surely it should.
1627 * At least or1ksim overrides pcnext and ignores the branch.
1629 if (dc
->delayed_branch
1630 && --dc
->delayed_branch
== 0
1631 && dc
->base
.is_jmp
== DISAS_NEXT
) {
1632 dc
->base
.is_jmp
= DISAS_JUMP
;
1636 static void openrisc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
1638 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
1639 target_ulong jmp_dest
;
1641 /* If we have already exited the TB, nothing following has effect. */
1642 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
1646 /* Adjust the delayed branch state for the next TB. */
1647 if ((dc
->tb_flags
& TB_FLAGS_DFLAG
? 1 : 0) != (dc
->delayed_branch
!= 0)) {
1648 tcg_gen_movi_i32(cpu_dflag
, dc
->delayed_branch
!= 0);
1651 /* For DISAS_TOO_MANY, jump to the next insn. */
1652 jmp_dest
= dc
->base
.pc_next
;
1653 tcg_gen_movi_tl(cpu_ppc
, jmp_dest
- 4);
1655 switch (dc
->base
.is_jmp
) {
1657 jmp_dest
= dc
->jmp_pc_imm
;
1658 if (jmp_dest
== -1) {
1659 /* The jump destination is indirect/computed; use jmp_pc. */
1660 tcg_gen_mov_tl(cpu_pc
, jmp_pc
);
1661 tcg_gen_discard_tl(jmp_pc
);
1662 tcg_gen_lookup_and_goto_ptr();
1665 /* The jump destination is direct; use jmp_pc_imm.
1666 However, we will have stored into jmp_pc as well;
1667 we know now that it wasn't needed. */
1668 tcg_gen_discard_tl(jmp_pc
);
1671 case DISAS_TOO_MANY
:
1672 if (translator_use_goto_tb(&dc
->base
, jmp_dest
)) {
1674 tcg_gen_movi_tl(cpu_pc
, jmp_dest
);
1675 tcg_gen_exit_tb(dc
->base
.tb
, 0);
1678 tcg_gen_movi_tl(cpu_pc
, jmp_dest
);
1679 tcg_gen_lookup_and_goto_ptr();
1683 tcg_gen_exit_tb(NULL
, 0);
1686 g_assert_not_reached();
1690 static void openrisc_tr_disas_log(const DisasContextBase
*dcbase
,
1691 CPUState
*cs
, FILE *logfile
)
1693 DisasContext
*s
= container_of(dcbase
, DisasContext
, base
);
1695 fprintf(logfile
, "IN: %s\n", lookup_symbol(s
->base
.pc_first
));
1696 target_disas(logfile
, cs
, s
->base
.pc_first
, s
->base
.tb
->size
);
1699 static const TranslatorOps openrisc_tr_ops
= {
1700 .init_disas_context
= openrisc_tr_init_disas_context
,
1701 .tb_start
= openrisc_tr_tb_start
,
1702 .insn_start
= openrisc_tr_insn_start
,
1703 .translate_insn
= openrisc_tr_translate_insn
,
1704 .tb_stop
= openrisc_tr_tb_stop
,
1705 .disas_log
= openrisc_tr_disas_log
,
1708 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
,
1709 target_ulong pc
, void *host_pc
)
1713 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
,
1714 &openrisc_tr_ops
, &ctx
.base
);
1717 void openrisc_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1719 OpenRISCCPU
*cpu
= OPENRISC_CPU(cs
);
1720 CPUOpenRISCState
*env
= &cpu
->env
;
1723 qemu_fprintf(f
, "PC=%08x\n", env
->pc
);
1724 for (i
= 0; i
< 32; ++i
) {
1725 qemu_fprintf(f
, "R%02d=%08x%c", i
, cpu_get_gpr(env
, i
),
1726 (i
% 4) == 3 ? '\n' : ' ');
1730 void restore_state_to_opc(CPUOpenRISCState
*env
, TranslationBlock
*tb
,
1734 env
->dflag
= data
[1] & 1;
1736 env
->ppc
= env
->pc
- 4;