tests/unit/test-qmp-event: Replace fixture by global variables
[qemu/armbru.git] / target / openrisc / translate.c
blobecff4412b7a38984d7bec500e102e601da7603ac
1 /*
2 * OpenRISC translation
4 * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
5 * Feng Gao <gf91597@gmail.com>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "disas/disas.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/bitops.h"
28 #include "qemu/qemu-print.h"
29 #include "exec/translator.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
34 #include "exec/log.h"
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef HELPER_H
41 /* is_jmp field values */
42 #define DISAS_EXIT DISAS_TARGET_0 /* force exit to main loop */
43 #define DISAS_JUMP DISAS_TARGET_1 /* exit via jmp_pc/jmp_pc_imm */
45 typedef struct DisasContext {
46 DisasContextBase base;
47 uint32_t mem_idx;
48 uint32_t tb_flags;
49 uint32_t delayed_branch;
50 uint32_t cpucfgr;
51 uint32_t avr;
53 /* If not -1, jmp_pc contains this value and so is a direct jump. */
54 target_ulong jmp_pc_imm;
56 /* The temporary corresponding to register 0 for this compilation. */
57 TCGv R0;
58 /* The constant zero. */
59 TCGv zero;
60 } DisasContext;
62 static inline bool is_user(DisasContext *dc)
64 #ifdef CONFIG_USER_ONLY
65 return true;
66 #else
67 return !(dc->tb_flags & TB_FLAGS_SM);
68 #endif
71 /* Include the auto-generated decoder. */
72 #include "decode-insns.c.inc"
74 static TCGv cpu_sr;
75 static TCGv cpu_regs[32];
76 static TCGv cpu_pc;
77 static TCGv jmp_pc; /* l.jr/l.jalr temp pc */
78 static TCGv cpu_ppc;
79 static TCGv cpu_sr_f; /* bf/bnf, F flag taken */
80 static TCGv cpu_sr_cy; /* carry (unsigned overflow) */
81 static TCGv cpu_sr_ov; /* signed overflow */
82 static TCGv cpu_lock_addr;
83 static TCGv cpu_lock_value;
84 static TCGv_i32 fpcsr;
85 static TCGv_i64 cpu_mac; /* MACHI:MACLO */
86 static TCGv_i32 cpu_dflag;
88 void openrisc_translate_init(void)
90 static const char * const regnames[] = {
91 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
92 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
93 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
94 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
96 int i;
98 cpu_sr = tcg_global_mem_new(tcg_env,
99 offsetof(CPUOpenRISCState, sr), "sr");
100 cpu_dflag = tcg_global_mem_new_i32(tcg_env,
101 offsetof(CPUOpenRISCState, dflag),
102 "dflag");
103 cpu_pc = tcg_global_mem_new(tcg_env,
104 offsetof(CPUOpenRISCState, pc), "pc");
105 cpu_ppc = tcg_global_mem_new(tcg_env,
106 offsetof(CPUOpenRISCState, ppc), "ppc");
107 jmp_pc = tcg_global_mem_new(tcg_env,
108 offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
109 cpu_sr_f = tcg_global_mem_new(tcg_env,
110 offsetof(CPUOpenRISCState, sr_f), "sr_f");
111 cpu_sr_cy = tcg_global_mem_new(tcg_env,
112 offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
113 cpu_sr_ov = tcg_global_mem_new(tcg_env,
114 offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
115 cpu_lock_addr = tcg_global_mem_new(tcg_env,
116 offsetof(CPUOpenRISCState, lock_addr),
117 "lock_addr");
118 cpu_lock_value = tcg_global_mem_new(tcg_env,
119 offsetof(CPUOpenRISCState, lock_value),
120 "lock_value");
121 fpcsr = tcg_global_mem_new_i32(tcg_env,
122 offsetof(CPUOpenRISCState, fpcsr),
123 "fpcsr");
124 cpu_mac = tcg_global_mem_new_i64(tcg_env,
125 offsetof(CPUOpenRISCState, mac),
126 "mac");
127 for (i = 0; i < 32; i++) {
128 cpu_regs[i] = tcg_global_mem_new(tcg_env,
129 offsetof(CPUOpenRISCState,
130 shadow_gpr[0][i]),
131 regnames[i]);
135 static void gen_exception(DisasContext *dc, unsigned int excp)
137 gen_helper_exception(tcg_env, tcg_constant_i32(excp));
140 static void gen_illegal_exception(DisasContext *dc)
142 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
143 gen_exception(dc, EXCP_ILLEGAL);
144 dc->base.is_jmp = DISAS_NORETURN;
147 static bool check_v1_3(DisasContext *dc)
149 return dc->avr >= 0x01030000;
152 static bool check_of32s(DisasContext *dc)
154 return dc->cpucfgr & CPUCFGR_OF32S;
157 static bool check_of64a32s(DisasContext *dc)
159 return dc->cpucfgr & CPUCFGR_OF64A32S;
162 static TCGv cpu_R(DisasContext *dc, int reg)
164 if (reg == 0) {
165 return dc->R0;
166 } else {
167 return cpu_regs[reg];
172 * We're about to write to REG. On the off-chance that the user is
173 * writing to R0, re-instate the architectural register.
175 static void check_r0_write(DisasContext *dc, int reg)
177 if (unlikely(reg == 0)) {
178 dc->R0 = cpu_regs[0];
182 static void gen_ove_cy(DisasContext *dc)
184 if (dc->tb_flags & SR_OVE) {
185 gen_helper_ove_cy(tcg_env);
189 static void gen_ove_ov(DisasContext *dc)
191 if (dc->tb_flags & SR_OVE) {
192 gen_helper_ove_ov(tcg_env);
196 static void gen_ove_cyov(DisasContext *dc)
198 if (dc->tb_flags & SR_OVE) {
199 gen_helper_ove_cyov(tcg_env);
203 static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
205 TCGv t0 = tcg_temp_new();
206 TCGv res = tcg_temp_new();
208 tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, srcb, dc->zero);
209 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
210 tcg_gen_xor_tl(t0, res, srcb);
211 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
213 tcg_gen_mov_tl(dest, res);
215 gen_ove_cyov(dc);
218 static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
220 TCGv t0 = tcg_temp_new();
221 TCGv res = tcg_temp_new();
223 tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, cpu_sr_cy, dc->zero);
224 tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, dc->zero);
225 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
226 tcg_gen_xor_tl(t0, res, srcb);
227 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
229 tcg_gen_mov_tl(dest, res);
231 gen_ove_cyov(dc);
234 static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
236 TCGv res = tcg_temp_new();
238 tcg_gen_sub_tl(res, srca, srcb);
239 tcg_gen_xor_tl(cpu_sr_cy, srca, srcb);
240 tcg_gen_xor_tl(cpu_sr_ov, res, srcb);
241 tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
242 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
244 tcg_gen_mov_tl(dest, res);
246 gen_ove_cyov(dc);
249 static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
251 TCGv t0 = tcg_temp_new();
253 tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
254 tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
255 tcg_gen_negsetcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
257 gen_ove_ov(dc);
260 static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
262 tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb);
263 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
265 gen_ove_cy(dc);
268 static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
270 TCGv t0 = tcg_temp_new();
272 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
273 /* The result of divide-by-zero is undefined.
274 Suppress the host-side exception by dividing by 1. */
275 tcg_gen_or_tl(t0, srcb, cpu_sr_ov);
276 tcg_gen_div_tl(dest, srca, t0);
278 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
279 gen_ove_ov(dc);
282 static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
284 TCGv t0 = tcg_temp_new();
286 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
287 /* The result of divide-by-zero is undefined.
288 Suppress the host-side exception by dividing by 1. */
289 tcg_gen_or_tl(t0, srcb, cpu_sr_cy);
290 tcg_gen_divu_tl(dest, srca, t0);
292 gen_ove_cy(dc);
295 static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
297 TCGv_i64 t1 = tcg_temp_new_i64();
298 TCGv_i64 t2 = tcg_temp_new_i64();
300 tcg_gen_ext_tl_i64(t1, srca);
301 tcg_gen_ext_tl_i64(t2, srcb);
302 if (TARGET_LONG_BITS == 32) {
303 tcg_gen_mul_i64(cpu_mac, t1, t2);
304 tcg_gen_movi_tl(cpu_sr_ov, 0);
305 } else {
306 TCGv_i64 high = tcg_temp_new_i64();
308 tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
309 tcg_gen_sari_i64(t1, cpu_mac, 63);
310 tcg_gen_negsetcond_i64(TCG_COND_NE, t1, t1, high);
311 tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
313 gen_ove_ov(dc);
317 static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
319 TCGv_i64 t1 = tcg_temp_new_i64();
320 TCGv_i64 t2 = tcg_temp_new_i64();
322 tcg_gen_extu_tl_i64(t1, srca);
323 tcg_gen_extu_tl_i64(t2, srcb);
324 if (TARGET_LONG_BITS == 32) {
325 tcg_gen_mul_i64(cpu_mac, t1, t2);
326 tcg_gen_movi_tl(cpu_sr_cy, 0);
327 } else {
328 TCGv_i64 high = tcg_temp_new_i64();
330 tcg_gen_mulu2_i64(cpu_mac, high, t1, t2);
331 tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0);
332 tcg_gen_trunc_i64_tl(cpu_sr_cy, high);
334 gen_ove_cy(dc);
338 static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
340 TCGv_i64 t1 = tcg_temp_new_i64();
341 TCGv_i64 t2 = tcg_temp_new_i64();
343 tcg_gen_ext_tl_i64(t1, srca);
344 tcg_gen_ext_tl_i64(t2, srcb);
345 tcg_gen_mul_i64(t1, t1, t2);
347 /* Note that overflow is only computed during addition stage. */
348 tcg_gen_xor_i64(t2, cpu_mac, t1);
349 tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
350 tcg_gen_xor_i64(t1, t1, cpu_mac);
351 tcg_gen_andc_i64(t1, t1, t2);
353 #if TARGET_LONG_BITS == 32
354 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
355 #else
356 tcg_gen_mov_i64(cpu_sr_ov, t1);
357 #endif
359 gen_ove_ov(dc);
362 static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
364 TCGv_i64 t1 = tcg_temp_new_i64();
365 TCGv_i64 t2 = tcg_temp_new_i64();
367 tcg_gen_extu_tl_i64(t1, srca);
368 tcg_gen_extu_tl_i64(t2, srcb);
369 tcg_gen_mul_i64(t1, t1, t2);
371 /* Note that overflow is only computed during addition stage. */
372 tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
373 tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1);
374 tcg_gen_trunc_i64_tl(cpu_sr_cy, t1);
376 gen_ove_cy(dc);
379 static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
381 TCGv_i64 t1 = tcg_temp_new_i64();
382 TCGv_i64 t2 = tcg_temp_new_i64();
384 tcg_gen_ext_tl_i64(t1, srca);
385 tcg_gen_ext_tl_i64(t2, srcb);
386 tcg_gen_mul_i64(t1, t1, t2);
388 /* Note that overflow is only computed during subtraction stage. */
389 tcg_gen_xor_i64(t2, cpu_mac, t1);
390 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
391 tcg_gen_xor_i64(t1, t1, cpu_mac);
392 tcg_gen_and_i64(t1, t1, t2);
394 #if TARGET_LONG_BITS == 32
395 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
396 #else
397 tcg_gen_mov_i64(cpu_sr_ov, t1);
398 #endif
400 gen_ove_ov(dc);
403 static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
405 TCGv_i64 t1 = tcg_temp_new_i64();
406 TCGv_i64 t2 = tcg_temp_new_i64();
408 tcg_gen_extu_tl_i64(t1, srca);
409 tcg_gen_extu_tl_i64(t2, srcb);
410 tcg_gen_mul_i64(t1, t1, t2);
412 /* Note that overflow is only computed during subtraction stage. */
413 tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1);
414 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
415 tcg_gen_trunc_i64_tl(cpu_sr_cy, t2);
417 gen_ove_cy(dc);
420 static bool trans_l_add(DisasContext *dc, arg_dab *a)
422 check_r0_write(dc, a->d);
423 gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
424 return true;
427 static bool trans_l_addc(DisasContext *dc, arg_dab *a)
429 check_r0_write(dc, a->d);
430 gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
431 return true;
434 static bool trans_l_sub(DisasContext *dc, arg_dab *a)
436 check_r0_write(dc, a->d);
437 gen_sub(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
438 return true;
441 static bool trans_l_and(DisasContext *dc, arg_dab *a)
443 check_r0_write(dc, a->d);
444 tcg_gen_and_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
445 return true;
448 static bool trans_l_or(DisasContext *dc, arg_dab *a)
450 check_r0_write(dc, a->d);
451 tcg_gen_or_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
452 return true;
455 static bool trans_l_xor(DisasContext *dc, arg_dab *a)
457 check_r0_write(dc, a->d);
458 tcg_gen_xor_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
459 return true;
462 static bool trans_l_sll(DisasContext *dc, arg_dab *a)
464 check_r0_write(dc, a->d);
465 tcg_gen_shl_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
466 return true;
469 static bool trans_l_srl(DisasContext *dc, arg_dab *a)
471 check_r0_write(dc, a->d);
472 tcg_gen_shr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
473 return true;
476 static bool trans_l_sra(DisasContext *dc, arg_dab *a)
478 check_r0_write(dc, a->d);
479 tcg_gen_sar_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
480 return true;
483 static bool trans_l_ror(DisasContext *dc, arg_dab *a)
485 check_r0_write(dc, a->d);
486 tcg_gen_rotr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
487 return true;
490 static bool trans_l_exths(DisasContext *dc, arg_da *a)
492 check_r0_write(dc, a->d);
493 tcg_gen_ext16s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
494 return true;
497 static bool trans_l_extbs(DisasContext *dc, arg_da *a)
499 check_r0_write(dc, a->d);
500 tcg_gen_ext8s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
501 return true;
504 static bool trans_l_exthz(DisasContext *dc, arg_da *a)
506 check_r0_write(dc, a->d);
507 tcg_gen_ext16u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
508 return true;
511 static bool trans_l_extbz(DisasContext *dc, arg_da *a)
513 check_r0_write(dc, a->d);
514 tcg_gen_ext8u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
515 return true;
518 static bool trans_l_cmov(DisasContext *dc, arg_dab *a)
520 check_r0_write(dc, a->d);
521 tcg_gen_movcond_tl(TCG_COND_NE, cpu_R(dc, a->d), cpu_sr_f, dc->zero,
522 cpu_R(dc, a->a), cpu_R(dc, a->b));
523 return true;
526 static bool trans_l_ff1(DisasContext *dc, arg_da *a)
528 check_r0_write(dc, a->d);
529 tcg_gen_ctzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), -1);
530 tcg_gen_addi_tl(cpu_R(dc, a->d), cpu_R(dc, a->d), 1);
531 return true;
534 static bool trans_l_fl1(DisasContext *dc, arg_da *a)
536 check_r0_write(dc, a->d);
537 tcg_gen_clzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), TARGET_LONG_BITS);
538 tcg_gen_subfi_tl(cpu_R(dc, a->d), TARGET_LONG_BITS, cpu_R(dc, a->d));
539 return true;
542 static bool trans_l_mul(DisasContext *dc, arg_dab *a)
544 check_r0_write(dc, a->d);
545 gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
546 return true;
549 static bool trans_l_mulu(DisasContext *dc, arg_dab *a)
551 check_r0_write(dc, a->d);
552 gen_mulu(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
553 return true;
556 static bool trans_l_div(DisasContext *dc, arg_dab *a)
558 check_r0_write(dc, a->d);
559 gen_div(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
560 return true;
563 static bool trans_l_divu(DisasContext *dc, arg_dab *a)
565 check_r0_write(dc, a->d);
566 gen_divu(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
567 return true;
570 static bool trans_l_muld(DisasContext *dc, arg_ab *a)
572 gen_muld(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
573 return true;
576 static bool trans_l_muldu(DisasContext *dc, arg_ab *a)
578 gen_muldu(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
579 return true;
582 static bool trans_l_j(DisasContext *dc, arg_l_j *a)
584 target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
586 tcg_gen_movi_tl(jmp_pc, tmp_pc);
587 dc->jmp_pc_imm = tmp_pc;
588 dc->delayed_branch = 2;
589 return true;
592 static bool trans_l_jal(DisasContext *dc, arg_l_jal *a)
594 target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
595 target_ulong ret_pc = dc->base.pc_next + 8;
597 tcg_gen_movi_tl(cpu_regs[9], ret_pc);
598 /* Optimize jal being used to load the PC for PIC. */
599 if (tmp_pc != ret_pc) {
600 tcg_gen_movi_tl(jmp_pc, tmp_pc);
601 dc->jmp_pc_imm = tmp_pc;
602 dc->delayed_branch = 2;
604 return true;
607 static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond)
609 target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
610 TCGv t_next = tcg_constant_tl(dc->base.pc_next + 8);
611 TCGv t_true = tcg_constant_tl(tmp_pc);
613 tcg_gen_movcond_tl(cond, jmp_pc, cpu_sr_f, dc->zero, t_true, t_next);
614 dc->delayed_branch = 2;
617 static bool trans_l_bf(DisasContext *dc, arg_l_bf *a)
619 do_bf(dc, a, TCG_COND_NE);
620 return true;
623 static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a)
625 do_bf(dc, a, TCG_COND_EQ);
626 return true;
629 static bool trans_l_jr(DisasContext *dc, arg_l_jr *a)
631 tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b));
632 dc->delayed_branch = 2;
633 return true;
636 static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a)
638 tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b));
639 tcg_gen_movi_tl(cpu_regs[9], dc->base.pc_next + 8);
640 dc->delayed_branch = 2;
641 return true;
644 static bool trans_l_lwa(DisasContext *dc, arg_load *a)
646 TCGv ea;
648 check_r0_write(dc, a->d);
649 ea = tcg_temp_new();
650 tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
651 tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, MO_TEUL);
652 tcg_gen_mov_tl(cpu_lock_addr, ea);
653 tcg_gen_mov_tl(cpu_lock_value, cpu_R(dc, a->d));
654 return true;
657 static void do_load(DisasContext *dc, arg_load *a, MemOp mop)
659 TCGv ea;
661 check_r0_write(dc, a->d);
662 ea = tcg_temp_new();
663 tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
664 tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, mop);
667 static bool trans_l_lwz(DisasContext *dc, arg_load *a)
669 do_load(dc, a, MO_TEUL);
670 return true;
673 static bool trans_l_lws(DisasContext *dc, arg_load *a)
675 do_load(dc, a, MO_TESL);
676 return true;
679 static bool trans_l_lbz(DisasContext *dc, arg_load *a)
681 do_load(dc, a, MO_UB);
682 return true;
685 static bool trans_l_lbs(DisasContext *dc, arg_load *a)
687 do_load(dc, a, MO_SB);
688 return true;
691 static bool trans_l_lhz(DisasContext *dc, arg_load *a)
693 do_load(dc, a, MO_TEUW);
694 return true;
697 static bool trans_l_lhs(DisasContext *dc, arg_load *a)
699 do_load(dc, a, MO_TESW);
700 return true;
703 static bool trans_l_swa(DisasContext *dc, arg_store *a)
705 TCGv ea, val;
706 TCGLabel *lab_fail, *lab_done;
708 ea = tcg_temp_new();
709 tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
711 lab_fail = gen_new_label();
712 lab_done = gen_new_label();
713 tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
715 val = tcg_temp_new();
716 tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
717 cpu_R(dc, a->b), dc->mem_idx, MO_TEUL);
718 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
720 tcg_gen_br(lab_done);
722 gen_set_label(lab_fail);
723 tcg_gen_movi_tl(cpu_sr_f, 0);
725 gen_set_label(lab_done);
726 tcg_gen_movi_tl(cpu_lock_addr, -1);
727 return true;
730 static void do_store(DisasContext *dc, arg_store *a, MemOp mop)
732 TCGv t0 = tcg_temp_new();
733 tcg_gen_addi_tl(t0, cpu_R(dc, a->a), a->i);
734 tcg_gen_qemu_st_tl(cpu_R(dc, a->b), t0, dc->mem_idx, mop);
737 static bool trans_l_sw(DisasContext *dc, arg_store *a)
739 do_store(dc, a, MO_TEUL);
740 return true;
743 static bool trans_l_sb(DisasContext *dc, arg_store *a)
745 do_store(dc, a, MO_UB);
746 return true;
749 static bool trans_l_sh(DisasContext *dc, arg_store *a)
751 do_store(dc, a, MO_TEUW);
752 return true;
755 static bool trans_l_nop(DisasContext *dc, arg_l_nop *a)
757 return true;
760 static bool trans_l_adrp(DisasContext *dc, arg_l_adrp *a)
762 if (!check_v1_3(dc)) {
763 return false;
765 check_r0_write(dc, a->d);
767 tcg_gen_movi_i32(cpu_R(dc, a->d),
768 (dc->base.pc_next & TARGET_PAGE_MASK) +
769 ((target_long)a->i << TARGET_PAGE_BITS));
770 return true;
773 static bool trans_l_addi(DisasContext *dc, arg_rri *a)
775 check_r0_write(dc, a->d);
776 gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
777 return true;
780 static bool trans_l_addic(DisasContext *dc, arg_rri *a)
782 check_r0_write(dc, a->d);
783 gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
784 return true;
787 static bool trans_l_muli(DisasContext *dc, arg_rri *a)
789 check_r0_write(dc, a->d);
790 gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
791 return true;
794 static bool trans_l_maci(DisasContext *dc, arg_l_maci *a)
796 gen_mac(dc, cpu_R(dc, a->a), tcg_constant_tl(a->i));
797 return true;
800 static bool trans_l_andi(DisasContext *dc, arg_rrk *a)
802 check_r0_write(dc, a->d);
803 tcg_gen_andi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
804 return true;
807 static bool trans_l_ori(DisasContext *dc, arg_rrk *a)
809 check_r0_write(dc, a->d);
810 tcg_gen_ori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
811 return true;
814 static bool trans_l_xori(DisasContext *dc, arg_rri *a)
816 check_r0_write(dc, a->d);
817 tcg_gen_xori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->i);
818 return true;
821 static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a)
823 TCGv spr = tcg_temp_new();
825 check_r0_write(dc, a->d);
827 if (translator_io_start(&dc->base)) {
828 if (dc->delayed_branch) {
829 tcg_gen_mov_tl(cpu_pc, jmp_pc);
830 tcg_gen_discard_tl(jmp_pc);
831 } else {
832 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
834 dc->base.is_jmp = DISAS_EXIT;
837 tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k);
838 gen_helper_mfspr(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->d), spr);
839 return true;
842 static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a)
844 TCGv spr = tcg_temp_new();
846 translator_io_start(&dc->base);
849 * For SR, we will need to exit the TB to recognize the new
850 * exception state. For NPC, in theory this counts as a branch
851 * (although the SPR only exists for use by an ICE). Save all
852 * of the cpu state first, allowing it to be overwritten.
854 if (dc->delayed_branch) {
855 tcg_gen_mov_tl(cpu_pc, jmp_pc);
856 tcg_gen_discard_tl(jmp_pc);
857 } else {
858 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
860 dc->base.is_jmp = DISAS_EXIT;
862 tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k);
863 gen_helper_mtspr(tcg_env, spr, cpu_R(dc, a->b));
864 return true;
867 static bool trans_l_mac(DisasContext *dc, arg_ab *a)
869 gen_mac(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
870 return true;
873 static bool trans_l_msb(DisasContext *dc, arg_ab *a)
875 gen_msb(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
876 return true;
879 static bool trans_l_macu(DisasContext *dc, arg_ab *a)
881 gen_macu(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
882 return true;
885 static bool trans_l_msbu(DisasContext *dc, arg_ab *a)
887 gen_msbu(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
888 return true;
891 static bool trans_l_slli(DisasContext *dc, arg_dal *a)
893 check_r0_write(dc, a->d);
894 tcg_gen_shli_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
895 a->l & (TARGET_LONG_BITS - 1));
896 return true;
899 static bool trans_l_srli(DisasContext *dc, arg_dal *a)
901 check_r0_write(dc, a->d);
902 tcg_gen_shri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
903 a->l & (TARGET_LONG_BITS - 1));
904 return true;
907 static bool trans_l_srai(DisasContext *dc, arg_dal *a)
909 check_r0_write(dc, a->d);
910 tcg_gen_sari_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
911 a->l & (TARGET_LONG_BITS - 1));
912 return true;
915 static bool trans_l_rori(DisasContext *dc, arg_dal *a)
917 check_r0_write(dc, a->d);
918 tcg_gen_rotri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
919 a->l & (TARGET_LONG_BITS - 1));
920 return true;
923 static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a)
925 check_r0_write(dc, a->d);
926 tcg_gen_movi_tl(cpu_R(dc, a->d), a->k << 16);
927 return true;
930 static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a)
932 check_r0_write(dc, a->d);
933 tcg_gen_trunc_i64_tl(cpu_R(dc, a->d), cpu_mac);
934 tcg_gen_movi_i64(cpu_mac, 0);
935 return true;
938 static bool trans_l_sfeq(DisasContext *dc, arg_ab *a)
940 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f,
941 cpu_R(dc, a->a), cpu_R(dc, a->b));
942 return true;
945 static bool trans_l_sfne(DisasContext *dc, arg_ab *a)
947 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f,
948 cpu_R(dc, a->a), cpu_R(dc, a->b));
949 return true;
952 static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a)
954 tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f,
955 cpu_R(dc, a->a), cpu_R(dc, a->b));
956 return true;
959 static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a)
961 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f,
962 cpu_R(dc, a->a), cpu_R(dc, a->b));
963 return true;
966 static bool trans_l_sfltu(DisasContext *dc, arg_ab *a)
968 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f,
969 cpu_R(dc, a->a), cpu_R(dc, a->b));
970 return true;
973 static bool trans_l_sfleu(DisasContext *dc, arg_ab *a)
975 tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f,
976 cpu_R(dc, a->a), cpu_R(dc, a->b));
977 return true;
980 static bool trans_l_sfgts(DisasContext *dc, arg_ab *a)
982 tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f,
983 cpu_R(dc, a->a), cpu_R(dc, a->b));
984 return true;
987 static bool trans_l_sfges(DisasContext *dc, arg_ab *a)
989 tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f,
990 cpu_R(dc, a->a), cpu_R(dc, a->b));
991 return true;
994 static bool trans_l_sflts(DisasContext *dc, arg_ab *a)
996 tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f,
997 cpu_R(dc, a->a), cpu_R(dc, a->b));
998 return true;
1001 static bool trans_l_sfles(DisasContext *dc, arg_ab *a)
1003 tcg_gen_setcond_tl(TCG_COND_LE,
1004 cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b));
1005 return true;
1008 static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a)
1010 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R(dc, a->a), a->i);
1011 return true;
1014 static bool trans_l_sfnei(DisasContext *dc, arg_ai *a)
1016 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R(dc, a->a), a->i);
1017 return true;
1020 static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a)
1022 tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
1023 return true;
1026 static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a)
1028 tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
1029 return true;
1032 static bool trans_l_sfltui(DisasContext *dc, arg_ai *a)
1034 tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
1035 return true;
1038 static bool trans_l_sfleui(DisasContext *dc, arg_ai *a)
1040 tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
1041 return true;
1044 static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a)
1046 tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R(dc, a->a), a->i);
1047 return true;
1050 static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a)
1052 tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R(dc, a->a), a->i);
1053 return true;
1056 static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a)
1058 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R(dc, a->a), a->i);
1059 return true;
1062 static bool trans_l_sflesi(DisasContext *dc, arg_ai *a)
1064 tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R(dc, a->a), a->i);
1065 return true;
1068 static bool trans_l_sys(DisasContext *dc, arg_l_sys *a)
1070 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1071 gen_exception(dc, EXCP_SYSCALL);
1072 dc->base.is_jmp = DISAS_NORETURN;
1073 return true;
1076 static bool trans_l_trap(DisasContext *dc, arg_l_trap *a)
1078 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1079 gen_exception(dc, EXCP_TRAP);
1080 dc->base.is_jmp = DISAS_NORETURN;
1081 return true;
1084 static bool trans_l_msync(DisasContext *dc, arg_l_msync *a)
1086 tcg_gen_mb(TCG_MO_ALL);
1087 return true;
1090 static bool trans_l_psync(DisasContext *dc, arg_l_psync *a)
1092 return true;
1095 static bool trans_l_csync(DisasContext *dc, arg_l_csync *a)
1097 return true;
1100 static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a)
1102 if (is_user(dc)) {
1103 gen_illegal_exception(dc);
1104 } else {
1105 gen_helper_rfe(tcg_env);
1106 dc->base.is_jmp = DISAS_EXIT;
1108 return true;
1111 static bool do_fp2(DisasContext *dc, arg_da *a,
1112 void (*fn)(TCGv, TCGv_env, TCGv))
1114 if (!check_of32s(dc)) {
1115 return false;
1117 check_r0_write(dc, a->d);
1118 fn(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->a));
1119 gen_helper_update_fpcsr(tcg_env);
1120 return true;
1123 static bool do_fp3(DisasContext *dc, arg_dab *a,
1124 void (*fn)(TCGv, TCGv_env, TCGv, TCGv))
1126 if (!check_of32s(dc)) {
1127 return false;
1129 check_r0_write(dc, a->d);
1130 fn(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->a), cpu_R(dc, a->b));
1131 gen_helper_update_fpcsr(tcg_env);
1132 return true;
1135 static bool do_fpcmp(DisasContext *dc, arg_ab *a,
1136 void (*fn)(TCGv, TCGv_env, TCGv, TCGv),
1137 bool inv, bool swap)
1139 if (!check_of32s(dc)) {
1140 return false;
1142 if (swap) {
1143 fn(cpu_sr_f, tcg_env, cpu_R(dc, a->b), cpu_R(dc, a->a));
1144 } else {
1145 fn(cpu_sr_f, tcg_env, cpu_R(dc, a->a), cpu_R(dc, a->b));
1147 if (inv) {
1148 tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
1150 gen_helper_update_fpcsr(tcg_env);
1151 return true;
1154 static bool trans_lf_add_s(DisasContext *dc, arg_dab *a)
1156 return do_fp3(dc, a, gen_helper_float_add_s);
1159 static bool trans_lf_sub_s(DisasContext *dc, arg_dab *a)
1161 return do_fp3(dc, a, gen_helper_float_sub_s);
1164 static bool trans_lf_mul_s(DisasContext *dc, arg_dab *a)
1166 return do_fp3(dc, a, gen_helper_float_mul_s);
1169 static bool trans_lf_div_s(DisasContext *dc, arg_dab *a)
1171 return do_fp3(dc, a, gen_helper_float_div_s);
1174 static bool trans_lf_rem_s(DisasContext *dc, arg_dab *a)
1176 return do_fp3(dc, a, gen_helper_float_rem_s);
1177 return true;
1180 static bool trans_lf_itof_s(DisasContext *dc, arg_da *a)
1182 return do_fp2(dc, a, gen_helper_itofs);
1185 static bool trans_lf_ftoi_s(DisasContext *dc, arg_da *a)
1187 return do_fp2(dc, a, gen_helper_ftois);
1190 static bool trans_lf_madd_s(DisasContext *dc, arg_dab *a)
1192 if (!check_of32s(dc)) {
1193 return false;
1195 check_r0_write(dc, a->d);
1196 gen_helper_float_madd_s(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->d),
1197 cpu_R(dc, a->a), cpu_R(dc, a->b));
1198 gen_helper_update_fpcsr(tcg_env);
1199 return true;
1202 static bool trans_lf_sfeq_s(DisasContext *dc, arg_ab *a)
1204 return do_fpcmp(dc, a, gen_helper_float_eq_s, false, false);
1207 static bool trans_lf_sfne_s(DisasContext *dc, arg_ab *a)
1209 return do_fpcmp(dc, a, gen_helper_float_eq_s, true, false);
1212 static bool trans_lf_sfgt_s(DisasContext *dc, arg_ab *a)
1214 return do_fpcmp(dc, a, gen_helper_float_lt_s, false, true);
1217 static bool trans_lf_sfge_s(DisasContext *dc, arg_ab *a)
1219 return do_fpcmp(dc, a, gen_helper_float_le_s, false, true);
1222 static bool trans_lf_sflt_s(DisasContext *dc, arg_ab *a)
1224 return do_fpcmp(dc, a, gen_helper_float_lt_s, false, false);
1227 static bool trans_lf_sfle_s(DisasContext *dc, arg_ab *a)
1229 return do_fpcmp(dc, a, gen_helper_float_le_s, false, false);
1232 static bool trans_lf_sfueq_s(DisasContext *dc, arg_ab *a)
1234 if (!check_v1_3(dc)) {
1235 return false;
1237 return do_fpcmp(dc, a, gen_helper_float_ueq_s, false, false);
1240 static bool trans_lf_sfult_s(DisasContext *dc, arg_ab *a)
1242 if (!check_v1_3(dc)) {
1243 return false;
1245 return do_fpcmp(dc, a, gen_helper_float_ult_s, false, false);
1248 static bool trans_lf_sfugt_s(DisasContext *dc, arg_ab *a)
1250 if (!check_v1_3(dc)) {
1251 return false;
1253 return do_fpcmp(dc, a, gen_helper_float_ult_s, false, true);
1256 static bool trans_lf_sfule_s(DisasContext *dc, arg_ab *a)
1258 if (!check_v1_3(dc)) {
1259 return false;
1261 return do_fpcmp(dc, a, gen_helper_float_ule_s, false, false);
1264 static bool trans_lf_sfuge_s(DisasContext *dc, arg_ab *a)
1266 if (!check_v1_3(dc)) {
1267 return false;
1269 return do_fpcmp(dc, a, gen_helper_float_ule_s, false, true);
1272 static bool trans_lf_sfun_s(DisasContext *dc, arg_ab *a)
1274 if (!check_v1_3(dc)) {
1275 return false;
1277 return do_fpcmp(dc, a, gen_helper_float_un_s, false, false);
1280 static bool check_pair(DisasContext *dc, int r, int p)
1282 return r + 1 + p < 32;
1285 static void load_pair(DisasContext *dc, TCGv_i64 t, int r, int p)
1287 tcg_gen_concat_i32_i64(t, cpu_R(dc, r + 1 + p), cpu_R(dc, r));
1290 static void save_pair(DisasContext *dc, TCGv_i64 t, int r, int p)
1292 tcg_gen_extr_i64_i32(cpu_R(dc, r + 1 + p), cpu_R(dc, r), t);
1295 static bool do_dp3(DisasContext *dc, arg_dab_pair *a,
1296 void (*fn)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1298 TCGv_i64 t0, t1;
1300 if (!check_of64a32s(dc) ||
1301 !check_pair(dc, a->a, a->ap) ||
1302 !check_pair(dc, a->b, a->bp) ||
1303 !check_pair(dc, a->d, a->dp)) {
1304 return false;
1306 check_r0_write(dc, a->d);
1308 t0 = tcg_temp_new_i64();
1309 t1 = tcg_temp_new_i64();
1310 load_pair(dc, t0, a->a, a->ap);
1311 load_pair(dc, t1, a->b, a->bp);
1312 fn(t0, tcg_env, t0, t1);
1313 save_pair(dc, t0, a->d, a->dp);
1315 gen_helper_update_fpcsr(tcg_env);
1316 return true;
1319 static bool do_dp2(DisasContext *dc, arg_da_pair *a,
1320 void (*fn)(TCGv_i64, TCGv_env, TCGv_i64))
1322 TCGv_i64 t0;
1324 if (!check_of64a32s(dc) ||
1325 !check_pair(dc, a->a, a->ap) ||
1326 !check_pair(dc, a->d, a->dp)) {
1327 return false;
1329 check_r0_write(dc, a->d);
1331 t0 = tcg_temp_new_i64();
1332 load_pair(dc, t0, a->a, a->ap);
1333 fn(t0, tcg_env, t0);
1334 save_pair(dc, t0, a->d, a->dp);
1336 gen_helper_update_fpcsr(tcg_env);
1337 return true;
1340 static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a,
1341 void (*fn)(TCGv, TCGv_env, TCGv_i64, TCGv_i64),
1342 bool inv, bool swap)
1344 TCGv_i64 t0, t1;
1346 if (!check_of64a32s(dc) ||
1347 !check_pair(dc, a->a, a->ap) ||
1348 !check_pair(dc, a->b, a->bp)) {
1349 return false;
1352 t0 = tcg_temp_new_i64();
1353 t1 = tcg_temp_new_i64();
1354 load_pair(dc, t0, a->a, a->ap);
1355 load_pair(dc, t1, a->b, a->bp);
1356 if (swap) {
1357 fn(cpu_sr_f, tcg_env, t1, t0);
1358 } else {
1359 fn(cpu_sr_f, tcg_env, t0, t1);
1362 if (inv) {
1363 tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
1365 gen_helper_update_fpcsr(tcg_env);
1366 return true;
1369 static bool trans_lf_add_d(DisasContext *dc, arg_dab_pair *a)
1371 return do_dp3(dc, a, gen_helper_float_add_d);
1374 static bool trans_lf_sub_d(DisasContext *dc, arg_dab_pair *a)
1376 return do_dp3(dc, a, gen_helper_float_sub_d);
1379 static bool trans_lf_mul_d(DisasContext *dc, arg_dab_pair *a)
1381 return do_dp3(dc, a, gen_helper_float_mul_d);
1384 static bool trans_lf_div_d(DisasContext *dc, arg_dab_pair *a)
1386 return do_dp3(dc, a, gen_helper_float_div_d);
1389 static bool trans_lf_rem_d(DisasContext *dc, arg_dab_pair *a)
1391 return do_dp3(dc, a, gen_helper_float_rem_d);
1394 static bool trans_lf_itof_d(DisasContext *dc, arg_da_pair *a)
1396 return do_dp2(dc, a, gen_helper_itofd);
1399 static bool trans_lf_ftoi_d(DisasContext *dc, arg_da_pair *a)
1401 return do_dp2(dc, a, gen_helper_ftoid);
1404 static bool trans_lf_stod_d(DisasContext *dc, arg_lf_stod_d *a)
1406 TCGv_i64 t0;
1408 if (!check_of64a32s(dc) ||
1409 !check_pair(dc, a->d, a->dp)) {
1410 return false;
1412 check_r0_write(dc, a->d);
1414 t0 = tcg_temp_new_i64();
1415 gen_helper_stod(t0, tcg_env, cpu_R(dc, a->a));
1416 save_pair(dc, t0, a->d, a->dp);
1418 gen_helper_update_fpcsr(tcg_env);
1419 return true;
1422 static bool trans_lf_dtos_d(DisasContext *dc, arg_lf_dtos_d *a)
1424 TCGv_i64 t0;
1426 if (!check_of64a32s(dc) ||
1427 !check_pair(dc, a->a, a->ap)) {
1428 return false;
1430 check_r0_write(dc, a->d);
1432 t0 = tcg_temp_new_i64();
1433 load_pair(dc, t0, a->a, a->ap);
1434 gen_helper_dtos(cpu_R(dc, a->d), tcg_env, t0);
1436 gen_helper_update_fpcsr(tcg_env);
1437 return true;
1440 static bool trans_lf_madd_d(DisasContext *dc, arg_dab_pair *a)
1442 TCGv_i64 t0, t1, t2;
1444 if (!check_of64a32s(dc) ||
1445 !check_pair(dc, a->a, a->ap) ||
1446 !check_pair(dc, a->b, a->bp) ||
1447 !check_pair(dc, a->d, a->dp)) {
1448 return false;
1450 check_r0_write(dc, a->d);
1452 t0 = tcg_temp_new_i64();
1453 t1 = tcg_temp_new_i64();
1454 t2 = tcg_temp_new_i64();
1455 load_pair(dc, t0, a->d, a->dp);
1456 load_pair(dc, t1, a->a, a->ap);
1457 load_pair(dc, t2, a->b, a->bp);
1458 gen_helper_float_madd_d(t0, tcg_env, t0, t1, t2);
1459 save_pair(dc, t0, a->d, a->dp);
1461 gen_helper_update_fpcsr(tcg_env);
1462 return true;
1465 static bool trans_lf_sfeq_d(DisasContext *dc, arg_ab_pair *a)
1467 return do_dpcmp(dc, a, gen_helper_float_eq_d, false, false);
1470 static bool trans_lf_sfne_d(DisasContext *dc, arg_ab_pair *a)
1472 return do_dpcmp(dc, a, gen_helper_float_eq_d, true, false);
1475 static bool trans_lf_sfgt_d(DisasContext *dc, arg_ab_pair *a)
1477 return do_dpcmp(dc, a, gen_helper_float_lt_d, false, true);
1480 static bool trans_lf_sfge_d(DisasContext *dc, arg_ab_pair *a)
1482 return do_dpcmp(dc, a, gen_helper_float_le_d, false, true);
1485 static bool trans_lf_sflt_d(DisasContext *dc, arg_ab_pair *a)
1487 return do_dpcmp(dc, a, gen_helper_float_lt_d, false, false);
1490 static bool trans_lf_sfle_d(DisasContext *dc, arg_ab_pair *a)
1492 return do_dpcmp(dc, a, gen_helper_float_le_d, false, false);
1495 static bool trans_lf_sfueq_d(DisasContext *dc, arg_ab_pair *a)
1497 return do_dpcmp(dc, a, gen_helper_float_ueq_d, false, false);
1500 static bool trans_lf_sfule_d(DisasContext *dc, arg_ab_pair *a)
1502 return do_dpcmp(dc, a, gen_helper_float_ule_d, false, false);
1505 static bool trans_lf_sfuge_d(DisasContext *dc, arg_ab_pair *a)
1507 return do_dpcmp(dc, a, gen_helper_float_ule_d, false, true);
1510 static bool trans_lf_sfult_d(DisasContext *dc, arg_ab_pair *a)
1512 return do_dpcmp(dc, a, gen_helper_float_ult_d, false, false);
1515 static bool trans_lf_sfugt_d(DisasContext *dc, arg_ab_pair *a)
1517 return do_dpcmp(dc, a, gen_helper_float_ult_d, false, true);
1520 static bool trans_lf_sfun_d(DisasContext *dc, arg_ab_pair *a)
1522 return do_dpcmp(dc, a, gen_helper_float_un_d, false, false);
1525 static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1527 DisasContext *dc = container_of(dcb, DisasContext, base);
1528 CPUOpenRISCState *env = cpu_env(cs);
1529 int bound;
1531 dc->mem_idx = cpu_mmu_index(env, false);
1532 dc->tb_flags = dc->base.tb->flags;
1533 dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
1534 dc->cpucfgr = env->cpucfgr;
1535 dc->avr = env->avr;
1536 dc->jmp_pc_imm = -1;
1538 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1539 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1542 static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs)
1544 DisasContext *dc = container_of(db, DisasContext, base);
1546 /* Allow the TCG optimizer to see that R0 == 0,
1547 when it's true, which is the common case. */
1548 dc->zero = tcg_constant_tl(0);
1549 if (dc->tb_flags & TB_FLAGS_R0_0) {
1550 dc->R0 = dc->zero;
1551 } else {
1552 dc->R0 = cpu_regs[0];
1556 static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
1558 DisasContext *dc = container_of(dcbase, DisasContext, base);
1560 tcg_gen_insn_start(dc->base.pc_next, (dc->delayed_branch ? 1 : 0)
1561 | (dc->base.num_insns > 1 ? 2 : 0));
1564 static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
1566 DisasContext *dc = container_of(dcbase, DisasContext, base);
1567 OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1568 uint32_t insn = translator_ldl(&cpu->env, &dc->base, dc->base.pc_next);
1570 if (!decode(dc, insn)) {
1571 gen_illegal_exception(dc);
1573 dc->base.pc_next += 4;
1575 /* When exiting the delay slot normally, exit via jmp_pc.
1576 * For DISAS_NORETURN, we have raised an exception and already exited.
1577 * For DISAS_EXIT, we found l.rfe in a delay slot. There's nothing
1578 * in the manual saying this is illegal, but it surely it should.
1579 * At least or1ksim overrides pcnext and ignores the branch.
1581 if (dc->delayed_branch
1582 && --dc->delayed_branch == 0
1583 && dc->base.is_jmp == DISAS_NEXT) {
1584 dc->base.is_jmp = DISAS_JUMP;
1588 static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
1590 DisasContext *dc = container_of(dcbase, DisasContext, base);
1591 target_ulong jmp_dest;
1593 /* If we have already exited the TB, nothing following has effect. */
1594 if (dc->base.is_jmp == DISAS_NORETURN) {
1595 return;
1598 /* Adjust the delayed branch state for the next TB. */
1599 if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
1600 tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
1603 /* For DISAS_TOO_MANY, jump to the next insn. */
1604 jmp_dest = dc->base.pc_next;
1605 tcg_gen_movi_tl(cpu_ppc, jmp_dest - 4);
1607 switch (dc->base.is_jmp) {
1608 case DISAS_JUMP:
1609 jmp_dest = dc->jmp_pc_imm;
1610 if (jmp_dest == -1) {
1611 /* The jump destination is indirect/computed; use jmp_pc. */
1612 tcg_gen_mov_tl(cpu_pc, jmp_pc);
1613 tcg_gen_discard_tl(jmp_pc);
1614 tcg_gen_lookup_and_goto_ptr();
1615 break;
1617 /* The jump destination is direct; use jmp_pc_imm.
1618 However, we will have stored into jmp_pc as well;
1619 we know now that it wasn't needed. */
1620 tcg_gen_discard_tl(jmp_pc);
1621 /* fallthru */
1623 case DISAS_TOO_MANY:
1624 if (translator_use_goto_tb(&dc->base, jmp_dest)) {
1625 tcg_gen_goto_tb(0);
1626 tcg_gen_movi_tl(cpu_pc, jmp_dest);
1627 tcg_gen_exit_tb(dc->base.tb, 0);
1628 break;
1630 tcg_gen_movi_tl(cpu_pc, jmp_dest);
1631 tcg_gen_lookup_and_goto_ptr();
1632 break;
1634 case DISAS_EXIT:
1635 tcg_gen_exit_tb(NULL, 0);
1636 break;
1637 default:
1638 g_assert_not_reached();
1642 static void openrisc_tr_disas_log(const DisasContextBase *dcbase,
1643 CPUState *cs, FILE *logfile)
1645 DisasContext *s = container_of(dcbase, DisasContext, base);
1647 fprintf(logfile, "IN: %s\n", lookup_symbol(s->base.pc_first));
1648 target_disas(logfile, cs, s->base.pc_first, s->base.tb->size);
1651 static const TranslatorOps openrisc_tr_ops = {
1652 .init_disas_context = openrisc_tr_init_disas_context,
1653 .tb_start = openrisc_tr_tb_start,
1654 .insn_start = openrisc_tr_insn_start,
1655 .translate_insn = openrisc_tr_translate_insn,
1656 .tb_stop = openrisc_tr_tb_stop,
1657 .disas_log = openrisc_tr_disas_log,
1660 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1661 target_ulong pc, void *host_pc)
1663 DisasContext ctx;
1665 translator_loop(cs, tb, max_insns, pc, host_pc,
1666 &openrisc_tr_ops, &ctx.base);
1669 void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1671 OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1672 CPUOpenRISCState *env = &cpu->env;
1673 int i;
1675 qemu_fprintf(f, "PC=%08x\n", env->pc);
1676 for (i = 0; i < 32; ++i) {
1677 qemu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i),
1678 (i % 4) == 3 ? '\n' : ' ');