4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #define SH4_DEBUG_DISAS
22 //#define SH4_SINGLE_STEP
32 typedef struct DisasContext
{
33 struct TranslationBlock
*tb
;
42 int singlestep_enabled
;
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
50 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
54 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
57 BS_STOP
= 1, /* We want to stop translation for any reason */
58 BS_BRANCH
= 2, /* We reached a branch condition */
59 BS_EXCP
= 3, /* We reached an exception condition */
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
64 static TCGv cpu_gregs
[24];
65 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
66 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
67 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
68 static TCGv cpu_fregs
[32];
70 /* internal register indexes */
71 static TCGv cpu_flags
, cpu_delayed_pc
;
73 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
75 #include "gen-icount.h"
77 static void sh4_translate_init(void)
80 static int done_init
= 0;
81 static const char * const gregnames
[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 static const char * const fregnames
[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
104 for (i
= 0; i
< 24; i
++)
105 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUSH4State
, gregs
[i
]),
109 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUSH4State
, pc
), "PC");
111 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
112 offsetof(CPUSH4State
, sr
), "SR");
113 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUSH4State
, ssr
), "SSR");
115 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
116 offsetof(CPUSH4State
, spc
), "SPC");
117 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
118 offsetof(CPUSH4State
, gbr
), "GBR");
119 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
120 offsetof(CPUSH4State
, vbr
), "VBR");
121 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUSH4State
, sgr
), "SGR");
123 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
124 offsetof(CPUSH4State
, dbr
), "DBR");
125 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUSH4State
, mach
), "MACH");
127 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUSH4State
, macl
), "MACL");
129 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
130 offsetof(CPUSH4State
, pr
), "PR");
131 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
132 offsetof(CPUSH4State
, fpscr
), "FPSCR");
133 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, fpul
), "FPUL");
136 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
137 offsetof(CPUSH4State
, flags
), "_flags_");
138 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, delayed_pc
),
141 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
142 offsetof(CPUSH4State
, ldst
), "_ldst_");
144 for (i
= 0; i
< 32; i
++)
145 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
146 offsetof(CPUSH4State
, fregs
[i
]),
149 /* register helpers */
156 void cpu_dump_state(CPUSH4State
* env
, FILE * f
,
157 int (*cpu_fprintf
) (FILE * f
, const char *fmt
, ...),
161 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
162 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
163 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
164 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
165 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
166 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
167 for (i
= 0; i
< 24; i
+= 4) {
168 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
169 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
170 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
172 if (env
->flags
& DELAY_SLOT
) {
173 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
175 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
176 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
190 static sh4_def_t sh4_defs
[] = {
193 .id
= SH_CPU_SH7750R
,
197 .features
= SH_FEATURE_BCR3_AND_BCR4
,
200 .id
= SH_CPU_SH7751R
,
203 .cvr
= 0x00110000, /* Neutered caches, should be 0x20480000 */
204 .features
= SH_FEATURE_BCR3_AND_BCR4
,
211 .features
= SH_FEATURE_SH4A
,
215 static const sh4_def_t
*cpu_sh4_find_by_name(const char *name
)
219 if (strcasecmp(name
, "any") == 0)
222 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
223 if (strcasecmp(name
, sh4_defs
[i
].name
) == 0)
229 void sh4_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
233 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
234 (*cpu_fprintf
)(f
, "%s\n", sh4_defs
[i
].name
);
237 static void cpu_register(CPUSH4State
*env
, const sh4_def_t
*def
)
245 SuperHCPU
*cpu_sh4_init(const char *cpu_model
)
249 const sh4_def_t
*def
;
251 def
= cpu_sh4_find_by_name(cpu_model
);
254 cpu
= SUPERH_CPU(object_new(TYPE_SUPERH_CPU
));
256 env
->features
= def
->features
;
257 sh4_translate_init();
258 env
->cpu_model_str
= cpu_model
;
260 cpu_register(env
, def
);
265 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
267 TranslationBlock
*tb
;
270 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
271 !ctx
->singlestep_enabled
) {
272 /* Use a direct jump if in same page and singlestep not enabled */
274 tcg_gen_movi_i32(cpu_pc
, dest
);
275 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
277 tcg_gen_movi_i32(cpu_pc
, dest
);
278 if (ctx
->singlestep_enabled
)
284 static void gen_jump(DisasContext
* ctx
)
286 if (ctx
->delayed_pc
== (uint32_t) - 1) {
287 /* Target is not statically known, it comes necessarily from a
288 delayed jump as immediate jump are conditinal jumps */
289 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
290 if (ctx
->singlestep_enabled
)
294 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
298 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
301 int label
= gen_new_label();
302 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
304 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
305 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
306 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
307 gen_set_label(label
);
310 /* Immediate conditional jump (bt or bf) */
311 static void gen_conditional_jump(DisasContext
* ctx
,
312 target_ulong ift
, target_ulong ifnott
)
317 l1
= gen_new_label();
319 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
320 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
321 gen_goto_tb(ctx
, 0, ifnott
);
323 gen_goto_tb(ctx
, 1, ift
);
326 /* Delayed conditional jump (bt or bf) */
327 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
332 l1
= gen_new_label();
334 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
335 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
336 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
338 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
342 static inline void gen_set_t(void)
344 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
347 static inline void gen_clr_t(void)
349 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
352 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
357 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
358 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
359 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
364 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
369 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
370 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
371 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
376 static inline void gen_store_flags(uint32_t flags
)
378 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
379 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
382 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
384 TCGv tmp
= tcg_temp_new();
389 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
390 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
392 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
394 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
395 tcg_gen_or_i32(t0
, t0
, tmp
);
400 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
402 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
405 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
407 TCGv_i32 tmp
= tcg_temp_new_i32();
408 tcg_gen_trunc_i64_i32(tmp
, t
);
409 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
410 tcg_gen_shri_i64(t
, t
, 32);
411 tcg_gen_trunc_i64_i32(tmp
, t
);
412 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
413 tcg_temp_free_i32(tmp
);
416 #define B3_0 (ctx->opcode & 0xf)
417 #define B6_4 ((ctx->opcode >> 4) & 0x7)
418 #define B7_4 ((ctx->opcode >> 4) & 0xf)
419 #define B7_0 (ctx->opcode & 0xff)
420 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
421 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
422 (ctx->opcode & 0xfff))
423 #define B11_8 ((ctx->opcode >> 8) & 0xf)
424 #define B15_12 ((ctx->opcode >> 12) & 0xf)
426 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
427 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
429 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
430 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
432 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
433 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
434 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
435 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
437 #define CHECK_NOT_DELAY_SLOT \
438 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
440 gen_helper_raise_slot_illegal_instruction(); \
441 ctx->bstate = BS_EXCP; \
445 #define CHECK_PRIVILEGED \
446 if (IS_USER(ctx)) { \
447 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
448 gen_helper_raise_slot_illegal_instruction(); \
450 gen_helper_raise_illegal_instruction(); \
452 ctx->bstate = BS_EXCP; \
456 #define CHECK_FPU_ENABLED \
457 if (ctx->flags & SR_FD) { \
458 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
459 gen_helper_raise_slot_fpu_disable(); \
461 gen_helper_raise_fpu_disable(); \
463 ctx->bstate = BS_EXCP; \
467 static void _decode_opc(DisasContext
* ctx
)
469 /* This code tries to make movcal emulation sufficiently
470 accurate for Linux purposes. This instruction writes
471 memory, and prior to that, always allocates a cache line.
472 It is used in two contexts:
473 - in memcpy, where data is copied in blocks, the first write
474 of to a block uses movca.l for performance.
475 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
476 to flush the cache. Here, the data written by movcal.l is never
477 written to memory, and the data written is just bogus.
479 To simulate this, we simulate movcal.l, we store the value to memory,
480 but we also remember the previous content. If we see ocbi, we check
481 if movcal.l for that address was done previously. If so, the write should
482 not have hit the memory, so we restore the previous content.
483 When we see an instruction that is neither movca.l
484 nor ocbi, the previous content is discarded.
486 To optimize, we only try to flush stores when we're at the start of
487 TB, or if we already saw movca.l in this TB and did not flush stores
491 int opcode
= ctx
->opcode
& 0xf0ff;
492 if (opcode
!= 0x0093 /* ocbi */
493 && opcode
!= 0x00c3 /* movca.l */)
495 gen_helper_discard_movcal_backup ();
501 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
504 switch (ctx
->opcode
) {
505 case 0x0019: /* div0u */
506 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
508 case 0x000b: /* rts */
510 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
511 ctx
->flags
|= DELAY_SLOT
;
512 ctx
->delayed_pc
= (uint32_t) - 1;
514 case 0x0028: /* clrmac */
515 tcg_gen_movi_i32(cpu_mach
, 0);
516 tcg_gen_movi_i32(cpu_macl
, 0);
518 case 0x0048: /* clrs */
519 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
521 case 0x0008: /* clrt */
524 case 0x0038: /* ldtlb */
528 case 0x002b: /* rte */
531 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
532 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
533 ctx
->flags
|= DELAY_SLOT
;
534 ctx
->delayed_pc
= (uint32_t) - 1;
536 case 0x0058: /* sets */
537 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
539 case 0x0018: /* sett */
542 case 0xfbfd: /* frchg */
543 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
544 ctx
->bstate
= BS_STOP
;
546 case 0xf3fd: /* fschg */
547 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
548 ctx
->bstate
= BS_STOP
;
550 case 0x0009: /* nop */
552 case 0x001b: /* sleep */
554 gen_helper_sleep(tcg_const_i32(ctx
->pc
+ 2));
558 switch (ctx
->opcode
& 0xf000) {
559 case 0x1000: /* mov.l Rm,@(disp,Rn) */
561 TCGv addr
= tcg_temp_new();
562 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
563 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
567 case 0x5000: /* mov.l @(disp,Rm),Rn */
569 TCGv addr
= tcg_temp_new();
570 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
571 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
575 case 0xe000: /* mov #imm,Rn */
576 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
578 case 0x9000: /* mov.w @(disp,PC),Rn */
580 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
581 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
585 case 0xd000: /* mov.l @(disp,PC),Rn */
587 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
588 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
592 case 0x7000: /* add #imm,Rn */
593 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
595 case 0xa000: /* bra disp */
597 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
598 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
599 ctx
->flags
|= DELAY_SLOT
;
601 case 0xb000: /* bsr disp */
603 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
604 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
605 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
606 ctx
->flags
|= DELAY_SLOT
;
610 switch (ctx
->opcode
& 0xf00f) {
611 case 0x6003: /* mov Rm,Rn */
612 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
614 case 0x2000: /* mov.b Rm,@Rn */
615 tcg_gen_qemu_st8(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
617 case 0x2001: /* mov.w Rm,@Rn */
618 tcg_gen_qemu_st16(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
620 case 0x2002: /* mov.l Rm,@Rn */
621 tcg_gen_qemu_st32(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
623 case 0x6000: /* mov.b @Rm,Rn */
624 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
626 case 0x6001: /* mov.w @Rm,Rn */
627 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
629 case 0x6002: /* mov.l @Rm,Rn */
630 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
632 case 0x2004: /* mov.b Rm,@-Rn */
634 TCGv addr
= tcg_temp_new();
635 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
636 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
); /* might cause re-execution */
637 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
641 case 0x2005: /* mov.w Rm,@-Rn */
643 TCGv addr
= tcg_temp_new();
644 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
645 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
646 tcg_gen_mov_i32(REG(B11_8
), addr
);
650 case 0x2006: /* mov.l Rm,@-Rn */
652 TCGv addr
= tcg_temp_new();
653 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
654 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
655 tcg_gen_mov_i32(REG(B11_8
), addr
);
658 case 0x6004: /* mov.b @Rm+,Rn */
659 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
661 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
663 case 0x6005: /* mov.w @Rm+,Rn */
664 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
666 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
668 case 0x6006: /* mov.l @Rm+,Rn */
669 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
671 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
673 case 0x0004: /* mov.b Rm,@(R0,Rn) */
675 TCGv addr
= tcg_temp_new();
676 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
677 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
);
681 case 0x0005: /* mov.w Rm,@(R0,Rn) */
683 TCGv addr
= tcg_temp_new();
684 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
685 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
689 case 0x0006: /* mov.l Rm,@(R0,Rn) */
691 TCGv addr
= tcg_temp_new();
692 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
693 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
697 case 0x000c: /* mov.b @(R0,Rm),Rn */
699 TCGv addr
= tcg_temp_new();
700 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
701 tcg_gen_qemu_ld8s(REG(B11_8
), addr
, ctx
->memidx
);
705 case 0x000d: /* mov.w @(R0,Rm),Rn */
707 TCGv addr
= tcg_temp_new();
708 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
709 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
713 case 0x000e: /* mov.l @(R0,Rm),Rn */
715 TCGv addr
= tcg_temp_new();
716 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
717 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
721 case 0x6008: /* swap.b Rm,Rn */
724 high
= tcg_temp_new();
725 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
726 low
= tcg_temp_new();
727 tcg_gen_ext16u_i32(low
, REG(B7_4
));
728 tcg_gen_bswap16_i32(low
, low
);
729 tcg_gen_or_i32(REG(B11_8
), high
, low
);
734 case 0x6009: /* swap.w Rm,Rn */
737 high
= tcg_temp_new();
738 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
739 low
= tcg_temp_new();
740 tcg_gen_shri_i32(low
, REG(B7_4
), 16);
741 tcg_gen_ext16u_i32(low
, low
);
742 tcg_gen_or_i32(REG(B11_8
), high
, low
);
747 case 0x200d: /* xtrct Rm,Rn */
750 high
= tcg_temp_new();
751 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
752 low
= tcg_temp_new();
753 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
754 tcg_gen_ext16u_i32(low
, low
);
755 tcg_gen_or_i32(REG(B11_8
), high
, low
);
760 case 0x300c: /* add Rm,Rn */
761 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
763 case 0x300e: /* addc Rm,Rn */
764 gen_helper_addc(REG(B11_8
), REG(B7_4
), REG(B11_8
));
766 case 0x300f: /* addv Rm,Rn */
767 gen_helper_addv(REG(B11_8
), REG(B7_4
), REG(B11_8
));
769 case 0x2009: /* and Rm,Rn */
770 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
772 case 0x3000: /* cmp/eq Rm,Rn */
773 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
775 case 0x3003: /* cmp/ge Rm,Rn */
776 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
778 case 0x3007: /* cmp/gt Rm,Rn */
779 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
781 case 0x3006: /* cmp/hi Rm,Rn */
782 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
784 case 0x3002: /* cmp/hs Rm,Rn */
785 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
787 case 0x200c: /* cmp/str Rm,Rn */
789 TCGv cmp1
= tcg_temp_new();
790 TCGv cmp2
= tcg_temp_new();
791 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
792 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
793 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
794 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
795 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
796 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
797 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
798 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
799 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
800 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
801 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
802 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
803 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
804 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
809 case 0x2007: /* div0s Rm,Rn */
811 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
812 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
813 TCGv val
= tcg_temp_new();
814 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
815 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
819 case 0x3004: /* div1 Rm,Rn */
820 gen_helper_div1(REG(B11_8
), REG(B7_4
), REG(B11_8
));
822 case 0x300d: /* dmuls.l Rm,Rn */
824 TCGv_i64 tmp1
= tcg_temp_new_i64();
825 TCGv_i64 tmp2
= tcg_temp_new_i64();
827 tcg_gen_ext_i32_i64(tmp1
, REG(B7_4
));
828 tcg_gen_ext_i32_i64(tmp2
, REG(B11_8
));
829 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
830 tcg_gen_trunc_i64_i32(cpu_macl
, tmp1
);
831 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
832 tcg_gen_trunc_i64_i32(cpu_mach
, tmp1
);
834 tcg_temp_free_i64(tmp2
);
835 tcg_temp_free_i64(tmp1
);
838 case 0x3005: /* dmulu.l Rm,Rn */
840 TCGv_i64 tmp1
= tcg_temp_new_i64();
841 TCGv_i64 tmp2
= tcg_temp_new_i64();
843 tcg_gen_extu_i32_i64(tmp1
, REG(B7_4
));
844 tcg_gen_extu_i32_i64(tmp2
, REG(B11_8
));
845 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
846 tcg_gen_trunc_i64_i32(cpu_macl
, tmp1
);
847 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
848 tcg_gen_trunc_i64_i32(cpu_mach
, tmp1
);
850 tcg_temp_free_i64(tmp2
);
851 tcg_temp_free_i64(tmp1
);
854 case 0x600e: /* exts.b Rm,Rn */
855 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
857 case 0x600f: /* exts.w Rm,Rn */
858 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
860 case 0x600c: /* extu.b Rm,Rn */
861 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
863 case 0x600d: /* extu.w Rm,Rn */
864 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
866 case 0x000f: /* mac.l @Rm+,@Rn+ */
869 arg0
= tcg_temp_new();
870 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
871 arg1
= tcg_temp_new();
872 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
873 gen_helper_macl(arg0
, arg1
);
876 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
877 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
880 case 0x400f: /* mac.w @Rm+,@Rn+ */
883 arg0
= tcg_temp_new();
884 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
885 arg1
= tcg_temp_new();
886 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
887 gen_helper_macw(arg0
, arg1
);
890 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
891 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
894 case 0x0007: /* mul.l Rm,Rn */
895 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
897 case 0x200f: /* muls.w Rm,Rn */
900 arg0
= tcg_temp_new();
901 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
902 arg1
= tcg_temp_new();
903 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
904 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
909 case 0x200e: /* mulu.w Rm,Rn */
912 arg0
= tcg_temp_new();
913 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
914 arg1
= tcg_temp_new();
915 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
916 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
921 case 0x600b: /* neg Rm,Rn */
922 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
924 case 0x600a: /* negc Rm,Rn */
928 tcg_gen_neg_i32(t0
, REG(B7_4
));
930 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
931 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
932 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
933 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
934 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
935 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
936 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
941 case 0x6007: /* not Rm,Rn */
942 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
944 case 0x200b: /* or Rm,Rn */
945 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
947 case 0x400c: /* shad Rm,Rn */
949 int label1
= gen_new_label();
950 int label2
= gen_new_label();
951 int label3
= gen_new_label();
952 int label4
= gen_new_label();
954 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
955 /* Rm positive, shift to the left */
956 shift
= tcg_temp_new();
957 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
958 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
959 tcg_temp_free(shift
);
961 /* Rm negative, shift to the right */
962 gen_set_label(label1
);
963 shift
= tcg_temp_new();
964 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
965 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
966 tcg_gen_not_i32(shift
, REG(B7_4
));
967 tcg_gen_andi_i32(shift
, shift
, 0x1f);
968 tcg_gen_addi_i32(shift
, shift
, 1);
969 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
970 tcg_temp_free(shift
);
973 gen_set_label(label2
);
974 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
975 tcg_gen_movi_i32(REG(B11_8
), 0);
977 gen_set_label(label3
);
978 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
979 gen_set_label(label4
);
982 case 0x400d: /* shld Rm,Rn */
984 int label1
= gen_new_label();
985 int label2
= gen_new_label();
986 int label3
= gen_new_label();
988 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
989 /* Rm positive, shift to the left */
990 shift
= tcg_temp_new();
991 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
992 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
993 tcg_temp_free(shift
);
995 /* Rm negative, shift to the right */
996 gen_set_label(label1
);
997 shift
= tcg_temp_new();
998 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
999 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
1000 tcg_gen_not_i32(shift
, REG(B7_4
));
1001 tcg_gen_andi_i32(shift
, shift
, 0x1f);
1002 tcg_gen_addi_i32(shift
, shift
, 1);
1003 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
1004 tcg_temp_free(shift
);
1007 gen_set_label(label2
);
1008 tcg_gen_movi_i32(REG(B11_8
), 0);
1009 gen_set_label(label3
);
1012 case 0x3008: /* sub Rm,Rn */
1013 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1015 case 0x300a: /* subc Rm,Rn */
1016 gen_helper_subc(REG(B11_8
), REG(B7_4
), REG(B11_8
));
1018 case 0x300b: /* subv Rm,Rn */
1019 gen_helper_subv(REG(B11_8
), REG(B7_4
), REG(B11_8
));
1021 case 0x2008: /* tst Rm,Rn */
1023 TCGv val
= tcg_temp_new();
1024 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
1025 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1029 case 0x200a: /* xor Rm,Rn */
1030 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1032 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1034 if (ctx
->fpscr
& FPSCR_SZ
) {
1035 TCGv_i64 fp
= tcg_temp_new_i64();
1036 gen_load_fpr64(fp
, XREG(B7_4
));
1037 gen_store_fpr64(fp
, XREG(B11_8
));
1038 tcg_temp_free_i64(fp
);
1040 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1043 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1045 if (ctx
->fpscr
& FPSCR_SZ
) {
1046 TCGv addr_hi
= tcg_temp_new();
1047 int fr
= XREG(B7_4
);
1048 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
1049 tcg_gen_qemu_st32(cpu_fregs
[fr
], REG(B11_8
), ctx
->memidx
);
1050 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1051 tcg_temp_free(addr_hi
);
1053 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
), ctx
->memidx
);
1056 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1058 if (ctx
->fpscr
& FPSCR_SZ
) {
1059 TCGv addr_hi
= tcg_temp_new();
1060 int fr
= XREG(B11_8
);
1061 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1062 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1063 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1064 tcg_temp_free(addr_hi
);
1066 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1069 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1071 if (ctx
->fpscr
& FPSCR_SZ
) {
1072 TCGv addr_hi
= tcg_temp_new();
1073 int fr
= XREG(B11_8
);
1074 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1075 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1076 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1077 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1078 tcg_temp_free(addr_hi
);
1080 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1081 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1084 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1086 if (ctx
->fpscr
& FPSCR_SZ
) {
1087 TCGv addr
= tcg_temp_new_i32();
1088 int fr
= XREG(B7_4
);
1089 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1090 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1091 tcg_gen_subi_i32(addr
, addr
, 4);
1092 tcg_gen_qemu_st32(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1093 tcg_gen_mov_i32(REG(B11_8
), addr
);
1094 tcg_temp_free(addr
);
1097 addr
= tcg_temp_new_i32();
1098 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1099 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1100 tcg_gen_mov_i32(REG(B11_8
), addr
);
1101 tcg_temp_free(addr
);
1104 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1107 TCGv addr
= tcg_temp_new_i32();
1108 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1109 if (ctx
->fpscr
& FPSCR_SZ
) {
1110 int fr
= XREG(B11_8
);
1111 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1112 tcg_gen_addi_i32(addr
, addr
, 4);
1113 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1115 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], addr
, ctx
->memidx
);
1117 tcg_temp_free(addr
);
1120 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1123 TCGv addr
= tcg_temp_new();
1124 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1125 if (ctx
->fpscr
& FPSCR_SZ
) {
1126 int fr
= XREG(B7_4
);
1127 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1128 tcg_gen_addi_i32(addr
, addr
, 4);
1129 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1131 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1133 tcg_temp_free(addr
);
1136 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1137 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1138 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1139 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1140 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1141 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1144 if (ctx
->fpscr
& FPSCR_PR
) {
1147 if (ctx
->opcode
& 0x0110)
1148 break; /* illegal instruction */
1149 fp0
= tcg_temp_new_i64();
1150 fp1
= tcg_temp_new_i64();
1151 gen_load_fpr64(fp0
, DREG(B11_8
));
1152 gen_load_fpr64(fp1
, DREG(B7_4
));
1153 switch (ctx
->opcode
& 0xf00f) {
1154 case 0xf000: /* fadd Rm,Rn */
1155 gen_helper_fadd_DT(fp0
, fp0
, fp1
);
1157 case 0xf001: /* fsub Rm,Rn */
1158 gen_helper_fsub_DT(fp0
, fp0
, fp1
);
1160 case 0xf002: /* fmul Rm,Rn */
1161 gen_helper_fmul_DT(fp0
, fp0
, fp1
);
1163 case 0xf003: /* fdiv Rm,Rn */
1164 gen_helper_fdiv_DT(fp0
, fp0
, fp1
);
1166 case 0xf004: /* fcmp/eq Rm,Rn */
1167 gen_helper_fcmp_eq_DT(fp0
, fp1
);
1169 case 0xf005: /* fcmp/gt Rm,Rn */
1170 gen_helper_fcmp_gt_DT(fp0
, fp1
);
1173 gen_store_fpr64(fp0
, DREG(B11_8
));
1174 tcg_temp_free_i64(fp0
);
1175 tcg_temp_free_i64(fp1
);
1177 switch (ctx
->opcode
& 0xf00f) {
1178 case 0xf000: /* fadd Rm,Rn */
1179 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1181 case 0xf001: /* fsub Rm,Rn */
1182 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1184 case 0xf002: /* fmul Rm,Rn */
1185 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1187 case 0xf003: /* fdiv Rm,Rn */
1188 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1190 case 0xf004: /* fcmp/eq Rm,Rn */
1191 gen_helper_fcmp_eq_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1193 case 0xf005: /* fcmp/gt Rm,Rn */
1194 gen_helper_fcmp_gt_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1200 case 0xf00e: /* fmac FR0,RM,Rn */
1203 if (ctx
->fpscr
& FPSCR_PR
) {
1204 break; /* illegal instruction */
1206 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)],
1207 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)], cpu_fregs
[FREG(B11_8
)]);
1213 switch (ctx
->opcode
& 0xff00) {
1214 case 0xc900: /* and #imm,R0 */
1215 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1217 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1220 addr
= tcg_temp_new();
1221 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1222 val
= tcg_temp_new();
1223 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1224 tcg_gen_andi_i32(val
, val
, B7_0
);
1225 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1227 tcg_temp_free(addr
);
1230 case 0x8b00: /* bf label */
1231 CHECK_NOT_DELAY_SLOT
1232 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1233 ctx
->pc
+ 4 + B7_0s
* 2);
1234 ctx
->bstate
= BS_BRANCH
;
1236 case 0x8f00: /* bf/s label */
1237 CHECK_NOT_DELAY_SLOT
1238 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1239 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1241 case 0x8900: /* bt label */
1242 CHECK_NOT_DELAY_SLOT
1243 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1245 ctx
->bstate
= BS_BRANCH
;
1247 case 0x8d00: /* bt/s label */
1248 CHECK_NOT_DELAY_SLOT
1249 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1250 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1252 case 0x8800: /* cmp/eq #imm,R0 */
1253 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1255 case 0xc400: /* mov.b @(disp,GBR),R0 */
1257 TCGv addr
= tcg_temp_new();
1258 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1259 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1260 tcg_temp_free(addr
);
1263 case 0xc500: /* mov.w @(disp,GBR),R0 */
1265 TCGv addr
= tcg_temp_new();
1266 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1267 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1268 tcg_temp_free(addr
);
1271 case 0xc600: /* mov.l @(disp,GBR),R0 */
1273 TCGv addr
= tcg_temp_new();
1274 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1275 tcg_gen_qemu_ld32s(REG(0), addr
, ctx
->memidx
);
1276 tcg_temp_free(addr
);
1279 case 0xc000: /* mov.b R0,@(disp,GBR) */
1281 TCGv addr
= tcg_temp_new();
1282 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1283 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1284 tcg_temp_free(addr
);
1287 case 0xc100: /* mov.w R0,@(disp,GBR) */
1289 TCGv addr
= tcg_temp_new();
1290 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1291 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1292 tcg_temp_free(addr
);
1295 case 0xc200: /* mov.l R0,@(disp,GBR) */
1297 TCGv addr
= tcg_temp_new();
1298 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1299 tcg_gen_qemu_st32(REG(0), addr
, ctx
->memidx
);
1300 tcg_temp_free(addr
);
1303 case 0x8000: /* mov.b R0,@(disp,Rn) */
1305 TCGv addr
= tcg_temp_new();
1306 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1307 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1308 tcg_temp_free(addr
);
1311 case 0x8100: /* mov.w R0,@(disp,Rn) */
1313 TCGv addr
= tcg_temp_new();
1314 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1315 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1316 tcg_temp_free(addr
);
1319 case 0x8400: /* mov.b @(disp,Rn),R0 */
1321 TCGv addr
= tcg_temp_new();
1322 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1323 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1324 tcg_temp_free(addr
);
1327 case 0x8500: /* mov.w @(disp,Rn),R0 */
1329 TCGv addr
= tcg_temp_new();
1330 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1331 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1332 tcg_temp_free(addr
);
1335 case 0xc700: /* mova @(disp,PC),R0 */
1336 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1338 case 0xcb00: /* or #imm,R0 */
1339 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1341 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1344 addr
= tcg_temp_new();
1345 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1346 val
= tcg_temp_new();
1347 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1348 tcg_gen_ori_i32(val
, val
, B7_0
);
1349 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1351 tcg_temp_free(addr
);
1354 case 0xc300: /* trapa #imm */
1357 CHECK_NOT_DELAY_SLOT
1358 imm
= tcg_const_i32(B7_0
);
1359 gen_helper_trapa(imm
);
1361 ctx
->bstate
= BS_BRANCH
;
1364 case 0xc800: /* tst #imm,R0 */
1366 TCGv val
= tcg_temp_new();
1367 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1368 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1372 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1374 TCGv val
= tcg_temp_new();
1375 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1376 tcg_gen_qemu_ld8u(val
, val
, ctx
->memidx
);
1377 tcg_gen_andi_i32(val
, val
, B7_0
);
1378 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1382 case 0xca00: /* xor #imm,R0 */
1383 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1385 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1388 addr
= tcg_temp_new();
1389 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1390 val
= tcg_temp_new();
1391 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1392 tcg_gen_xori_i32(val
, val
, B7_0
);
1393 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1395 tcg_temp_free(addr
);
1400 switch (ctx
->opcode
& 0xf08f) {
1401 case 0x408e: /* ldc Rm,Rn_BANK */
1403 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1405 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1407 tcg_gen_qemu_ld32s(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
);
1408 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1410 case 0x0082: /* stc Rm_BANK,Rn */
1412 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1414 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1417 TCGv addr
= tcg_temp_new();
1418 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1419 tcg_gen_qemu_st32(ALTREG(B6_4
), addr
, ctx
->memidx
);
1420 tcg_gen_mov_i32(REG(B11_8
), addr
);
1421 tcg_temp_free(addr
);
1426 switch (ctx
->opcode
& 0xf0ff) {
1427 case 0x0023: /* braf Rn */
1428 CHECK_NOT_DELAY_SLOT
1429 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1430 ctx
->flags
|= DELAY_SLOT
;
1431 ctx
->delayed_pc
= (uint32_t) - 1;
1433 case 0x0003: /* bsrf Rn */
1434 CHECK_NOT_DELAY_SLOT
1435 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1436 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1437 ctx
->flags
|= DELAY_SLOT
;
1438 ctx
->delayed_pc
= (uint32_t) - 1;
1440 case 0x4015: /* cmp/pl Rn */
1441 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1443 case 0x4011: /* cmp/pz Rn */
1444 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1446 case 0x4010: /* dt Rn */
1447 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1448 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1450 case 0x402b: /* jmp @Rn */
1451 CHECK_NOT_DELAY_SLOT
1452 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1453 ctx
->flags
|= DELAY_SLOT
;
1454 ctx
->delayed_pc
= (uint32_t) - 1;
1456 case 0x400b: /* jsr @Rn */
1457 CHECK_NOT_DELAY_SLOT
1458 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1459 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1460 ctx
->flags
|= DELAY_SLOT
;
1461 ctx
->delayed_pc
= (uint32_t) - 1;
1463 case 0x400e: /* ldc Rm,SR */
1465 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1466 ctx
->bstate
= BS_STOP
;
1468 case 0x4007: /* ldc.l @Rm+,SR */
1471 TCGv val
= tcg_temp_new();
1472 tcg_gen_qemu_ld32s(val
, REG(B11_8
), ctx
->memidx
);
1473 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1475 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1476 ctx
->bstate
= BS_STOP
;
1479 case 0x0002: /* stc SR,Rn */
1481 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1483 case 0x4003: /* stc SR,@-Rn */
1486 TCGv addr
= tcg_temp_new();
1487 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1488 tcg_gen_qemu_st32(cpu_sr
, addr
, ctx
->memidx
);
1489 tcg_gen_mov_i32(REG(B11_8
), addr
);
1490 tcg_temp_free(addr
);
1493 #define LD(reg,ldnum,ldpnum,prechk) \
1496 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1500 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1501 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1503 #define ST(reg,stnum,stpnum,prechk) \
1506 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1511 TCGv addr = tcg_temp_new(); \
1512 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1513 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1514 tcg_gen_mov_i32(REG(B11_8), addr); \
1515 tcg_temp_free(addr); \
1518 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1519 LD(reg,ldnum,ldpnum,prechk) \
1520 ST(reg,stnum,stpnum,prechk)
1521 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1522 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1523 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1524 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1525 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1526 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1527 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1528 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1529 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1530 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1531 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1532 case 0x406a: /* lds Rm,FPSCR */
1534 gen_helper_ld_fpscr(REG(B11_8
));
1535 ctx
->bstate
= BS_STOP
;
1537 case 0x4066: /* lds.l @Rm+,FPSCR */
1540 TCGv addr
= tcg_temp_new();
1541 tcg_gen_qemu_ld32s(addr
, REG(B11_8
), ctx
->memidx
);
1542 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1543 gen_helper_ld_fpscr(addr
);
1544 tcg_temp_free(addr
);
1545 ctx
->bstate
= BS_STOP
;
1548 case 0x006a: /* sts FPSCR,Rn */
1550 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1552 case 0x4062: /* sts FPSCR,@-Rn */
1556 val
= tcg_temp_new();
1557 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1558 addr
= tcg_temp_new();
1559 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1560 tcg_gen_qemu_st32(val
, addr
, ctx
->memidx
);
1561 tcg_gen_mov_i32(REG(B11_8
), addr
);
1562 tcg_temp_free(addr
);
1566 case 0x00c3: /* movca.l R0,@Rm */
1568 TCGv val
= tcg_temp_new();
1569 tcg_gen_qemu_ld32u(val
, REG(B11_8
), ctx
->memidx
);
1570 gen_helper_movcal (REG(B11_8
), val
);
1571 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1573 ctx
->has_movcal
= 1;
1576 /* MOVUA.L @Rm,R0 (Rm) -> R0
1577 Load non-boundary-aligned data */
1578 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1581 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1582 Load non-boundary-aligned data */
1583 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1584 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1586 case 0x0029: /* movt Rn */
1587 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1592 If (T == 1) R0 -> (Rn)
1595 if (ctx
->features
& SH_FEATURE_SH4A
) {
1596 int label
= gen_new_label();
1598 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1599 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1600 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1601 gen_set_label(label
);
1602 tcg_gen_movi_i32(cpu_ldst
, 0);
1610 When interrupt/exception
1613 if (ctx
->features
& SH_FEATURE_SH4A
) {
1614 tcg_gen_movi_i32(cpu_ldst
, 0);
1615 tcg_gen_qemu_ld32s(REG(0), REG(B11_8
), ctx
->memidx
);
1616 tcg_gen_movi_i32(cpu_ldst
, 1);
1620 case 0x0093: /* ocbi @Rn */
1622 gen_helper_ocbi (REG(B11_8
));
1625 case 0x00a3: /* ocbp @Rn */
1626 case 0x00b3: /* ocbwb @Rn */
1627 /* These instructions are supposed to do nothing in case of
1628 a cache miss. Given that we only partially emulate caches
1629 it is safe to simply ignore them. */
1631 case 0x0083: /* pref @Rn */
1633 case 0x00d3: /* prefi @Rn */
1634 if (ctx
->features
& SH_FEATURE_SH4A
)
1638 case 0x00e3: /* icbi @Rn */
1639 if (ctx
->features
& SH_FEATURE_SH4A
)
1643 case 0x00ab: /* synco */
1644 if (ctx
->features
& SH_FEATURE_SH4A
)
1648 case 0x4024: /* rotcl Rn */
1650 TCGv tmp
= tcg_temp_new();
1651 tcg_gen_mov_i32(tmp
, cpu_sr
);
1652 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1653 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1654 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1658 case 0x4025: /* rotcr Rn */
1660 TCGv tmp
= tcg_temp_new();
1661 tcg_gen_mov_i32(tmp
, cpu_sr
);
1662 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1663 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1664 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1668 case 0x4004: /* rotl Rn */
1669 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1670 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1672 case 0x4005: /* rotr Rn */
1673 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1674 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1676 case 0x4000: /* shll Rn */
1677 case 0x4020: /* shal Rn */
1678 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1679 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1681 case 0x4021: /* shar Rn */
1682 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1683 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1685 case 0x4001: /* shlr Rn */
1686 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1687 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1689 case 0x4008: /* shll2 Rn */
1690 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1692 case 0x4018: /* shll8 Rn */
1693 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1695 case 0x4028: /* shll16 Rn */
1696 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1698 case 0x4009: /* shlr2 Rn */
1699 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1701 case 0x4019: /* shlr8 Rn */
1702 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1704 case 0x4029: /* shlr16 Rn */
1705 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1707 case 0x401b: /* tas.b @Rn */
1710 addr
= tcg_temp_local_new();
1711 tcg_gen_mov_i32(addr
, REG(B11_8
));
1712 val
= tcg_temp_local_new();
1713 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1714 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1715 tcg_gen_ori_i32(val
, val
, 0x80);
1716 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1718 tcg_temp_free(addr
);
1721 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1723 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1725 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1727 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1729 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1731 if (ctx
->fpscr
& FPSCR_PR
) {
1733 if (ctx
->opcode
& 0x0100)
1734 break; /* illegal instruction */
1735 fp
= tcg_temp_new_i64();
1736 gen_helper_float_DT(fp
, cpu_fpul
);
1737 gen_store_fpr64(fp
, DREG(B11_8
));
1738 tcg_temp_free_i64(fp
);
1741 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1744 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1746 if (ctx
->fpscr
& FPSCR_PR
) {
1748 if (ctx
->opcode
& 0x0100)
1749 break; /* illegal instruction */
1750 fp
= tcg_temp_new_i64();
1751 gen_load_fpr64(fp
, DREG(B11_8
));
1752 gen_helper_ftrc_DT(cpu_fpul
, fp
);
1753 tcg_temp_free_i64(fp
);
1756 gen_helper_ftrc_FT(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1759 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1762 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1765 case 0xf05d: /* fabs FRn/DRn */
1767 if (ctx
->fpscr
& FPSCR_PR
) {
1768 if (ctx
->opcode
& 0x0100)
1769 break; /* illegal instruction */
1770 TCGv_i64 fp
= tcg_temp_new_i64();
1771 gen_load_fpr64(fp
, DREG(B11_8
));
1772 gen_helper_fabs_DT(fp
, fp
);
1773 gen_store_fpr64(fp
, DREG(B11_8
));
1774 tcg_temp_free_i64(fp
);
1776 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1779 case 0xf06d: /* fsqrt FRn */
1781 if (ctx
->fpscr
& FPSCR_PR
) {
1782 if (ctx
->opcode
& 0x0100)
1783 break; /* illegal instruction */
1784 TCGv_i64 fp
= tcg_temp_new_i64();
1785 gen_load_fpr64(fp
, DREG(B11_8
));
1786 gen_helper_fsqrt_DT(fp
, fp
);
1787 gen_store_fpr64(fp
, DREG(B11_8
));
1788 tcg_temp_free_i64(fp
);
1790 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1793 case 0xf07d: /* fsrra FRn */
1796 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1798 if (!(ctx
->fpscr
& FPSCR_PR
)) {
1799 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1802 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1804 if (!(ctx
->fpscr
& FPSCR_PR
)) {
1805 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1808 case 0xf0ad: /* fcnvsd FPUL,DRn */
1811 TCGv_i64 fp
= tcg_temp_new_i64();
1812 gen_helper_fcnvsd_FT_DT(fp
, cpu_fpul
);
1813 gen_store_fpr64(fp
, DREG(B11_8
));
1814 tcg_temp_free_i64(fp
);
1817 case 0xf0bd: /* fcnvds DRn,FPUL */
1820 TCGv_i64 fp
= tcg_temp_new_i64();
1821 gen_load_fpr64(fp
, DREG(B11_8
));
1822 gen_helper_fcnvds_DT_FT(cpu_fpul
, fp
);
1823 tcg_temp_free_i64(fp
);
1826 case 0xf0ed: /* fipr FVm,FVn */
1828 if ((ctx
->fpscr
& FPSCR_PR
) == 0) {
1830 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1831 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1832 gen_helper_fipr(m
, n
);
1838 case 0xf0fd: /* ftrv XMTRX,FVn */
1840 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1841 (ctx
->fpscr
& FPSCR_PR
) == 0) {
1843 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1851 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1852 ctx
->opcode
, ctx
->pc
);
1855 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1856 gen_helper_raise_slot_illegal_instruction();
1858 gen_helper_raise_illegal_instruction();
1860 ctx
->bstate
= BS_EXCP
;
1863 static void decode_opc(DisasContext
* ctx
)
1865 uint32_t old_flags
= ctx
->flags
;
1867 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
1868 tcg_gen_debug_insn_start(ctx
->pc
);
1873 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1874 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1877 /* go out of the delay slot */
1878 uint32_t new_flags
= ctx
->flags
;
1879 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1880 gen_store_flags(new_flags
);
1883 ctx
->bstate
= BS_BRANCH
;
1884 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1885 gen_delayed_conditional_jump(ctx
);
1886 } else if (old_flags
& DELAY_SLOT
) {
1892 /* go into a delay slot */
1893 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1894 gen_store_flags(ctx
->flags
);
1898 gen_intermediate_code_internal(CPUSH4State
* env
, TranslationBlock
* tb
,
1902 target_ulong pc_start
;
1903 static uint16_t *gen_opc_end
;
1910 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
1912 ctx
.flags
= (uint32_t)tb
->flags
;
1913 ctx
.bstate
= BS_NONE
;
1915 ctx
.fpscr
= env
->fpscr
;
1916 ctx
.memidx
= (env
->sr
& SR_MD
) == 0 ? 1 : 0;
1917 /* We don't know if the delayed pc came from a dynamic or static branch,
1918 so assume it is a dynamic branch. */
1919 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1921 ctx
.singlestep_enabled
= env
->singlestep_enabled
;
1922 ctx
.features
= env
->features
;
1923 ctx
.has_movcal
= (tb
->flags
& TB_FLAG_PENDING_MOVCA
);
1927 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1929 max_insns
= CF_COUNT_MASK
;
1931 while (ctx
.bstate
== BS_NONE
&& gen_opc_ptr
< gen_opc_end
) {
1932 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
1933 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1934 if (ctx
.pc
== bp
->pc
) {
1935 /* We have hit a breakpoint - make sure PC is up-to-date */
1936 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1938 ctx
.bstate
= BS_EXCP
;
1944 i
= gen_opc_ptr
- gen_opc_buf
;
1948 gen_opc_instr_start
[ii
++] = 0;
1950 gen_opc_pc
[ii
] = ctx
.pc
;
1951 gen_opc_hflags
[ii
] = ctx
.flags
;
1952 gen_opc_instr_start
[ii
] = 1;
1953 gen_opc_icount
[ii
] = num_insns
;
1955 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1958 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
1961 ctx
.opcode
= lduw_code(ctx
.pc
);
1965 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1967 if (env
->singlestep_enabled
)
1969 if (num_insns
>= max_insns
)
1974 if (tb
->cflags
& CF_LAST_IO
)
1976 if (env
->singlestep_enabled
) {
1977 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1980 switch (ctx
.bstate
) {
1982 /* gen_op_interrupt_restart(); */
1986 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1988 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1991 /* gen_op_interrupt_restart(); */
2000 gen_icount_end(tb
, num_insns
);
2001 *gen_opc_ptr
= INDEX_op_end
;
2003 i
= gen_opc_ptr
- gen_opc_buf
;
2006 gen_opc_instr_start
[ii
++] = 0;
2008 tb
->size
= ctx
.pc
- pc_start
;
2009 tb
->icount
= num_insns
;
2013 #ifdef SH4_DEBUG_DISAS
2014 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "\n");
2016 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2017 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2018 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 0);
2024 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2026 gen_intermediate_code_internal(env
, tb
, 0);
2029 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2031 gen_intermediate_code_internal(env
, tb
, 1);
2034 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
2036 env
->pc
= gen_opc_pc
[pc_pos
];
2037 env
->flags
= gen_opc_hflags
[pc_pos
];