4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #define SH4_DEBUG_DISAS
22 //#define SH4_SINGLE_STEP
32 typedef struct DisasContext
{
33 struct TranslationBlock
*tb
;
42 int singlestep_enabled
;
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
50 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
54 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
57 BS_STOP
= 1, /* We want to stop translation for any reason */
58 BS_BRANCH
= 2, /* We reached a branch condition */
59 BS_EXCP
= 3, /* We reached an exception condition */
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
64 static TCGv cpu_gregs
[24];
65 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
66 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
67 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
68 static TCGv cpu_fregs
[32];
70 /* internal register indexes */
71 static TCGv cpu_flags
, cpu_delayed_pc
;
73 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
75 #include "gen-icount.h"
77 static void sh4_translate_init(void)
80 static int done_init
= 0;
81 static const char * const gregnames
[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 static const char * const fregnames
[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
104 for (i
= 0; i
< 24; i
++)
105 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUSH4State
, gregs
[i
]),
109 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUSH4State
, pc
), "PC");
111 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
112 offsetof(CPUSH4State
, sr
), "SR");
113 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUSH4State
, ssr
), "SSR");
115 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
116 offsetof(CPUSH4State
, spc
), "SPC");
117 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
118 offsetof(CPUSH4State
, gbr
), "GBR");
119 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
120 offsetof(CPUSH4State
, vbr
), "VBR");
121 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUSH4State
, sgr
), "SGR");
123 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
124 offsetof(CPUSH4State
, dbr
), "DBR");
125 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUSH4State
, mach
), "MACH");
127 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUSH4State
, macl
), "MACL");
129 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
130 offsetof(CPUSH4State
, pr
), "PR");
131 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
132 offsetof(CPUSH4State
, fpscr
), "FPSCR");
133 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, fpul
), "FPUL");
136 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
137 offsetof(CPUSH4State
, flags
), "_flags_");
138 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, delayed_pc
),
141 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
142 offsetof(CPUSH4State
, ldst
), "_ldst_");
144 for (i
= 0; i
< 32; i
++)
145 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
146 offsetof(CPUSH4State
, fregs
[i
]),
149 /* register helpers */
156 void cpu_dump_state(CPUSH4State
* env
, FILE * f
,
157 int (*cpu_fprintf
) (FILE * f
, const char *fmt
, ...),
161 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
162 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
163 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
164 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
165 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
166 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
167 for (i
= 0; i
< 24; i
+= 4) {
168 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
169 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
170 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
172 if (env
->flags
& DELAY_SLOT
) {
173 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
175 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
176 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
181 void cpu_state_reset(CPUSH4State
*env
)
183 cpu_reset(ENV_GET_CPU(env
));
195 static sh4_def_t sh4_defs
[] = {
198 .id
= SH_CPU_SH7750R
,
202 .features
= SH_FEATURE_BCR3_AND_BCR4
,
205 .id
= SH_CPU_SH7751R
,
208 .cvr
= 0x00110000, /* Neutered caches, should be 0x20480000 */
209 .features
= SH_FEATURE_BCR3_AND_BCR4
,
216 .features
= SH_FEATURE_SH4A
,
220 static const sh4_def_t
*cpu_sh4_find_by_name(const char *name
)
224 if (strcasecmp(name
, "any") == 0)
227 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
228 if (strcasecmp(name
, sh4_defs
[i
].name
) == 0)
234 void sh4_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
238 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
239 (*cpu_fprintf
)(f
, "%s\n", sh4_defs
[i
].name
);
242 static void cpu_register(CPUSH4State
*env
, const sh4_def_t
*def
)
250 CPUSH4State
*cpu_sh4_init(const char *cpu_model
)
254 const sh4_def_t
*def
;
256 def
= cpu_sh4_find_by_name(cpu_model
);
259 cpu
= SUPERH_CPU(object_new(TYPE_SUPERH_CPU
));
261 env
->features
= def
->features
;
262 sh4_translate_init();
263 env
->cpu_model_str
= cpu_model
;
265 cpu_register(env
, def
);
270 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
272 TranslationBlock
*tb
;
275 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
276 !ctx
->singlestep_enabled
) {
277 /* Use a direct jump if in same page and singlestep not enabled */
279 tcg_gen_movi_i32(cpu_pc
, dest
);
280 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
282 tcg_gen_movi_i32(cpu_pc
, dest
);
283 if (ctx
->singlestep_enabled
)
289 static void gen_jump(DisasContext
* ctx
)
291 if (ctx
->delayed_pc
== (uint32_t) - 1) {
292 /* Target is not statically known, it comes necessarily from a
293 delayed jump as immediate jump are conditinal jumps */
294 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
295 if (ctx
->singlestep_enabled
)
299 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
303 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
306 int label
= gen_new_label();
307 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
309 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
310 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
311 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
312 gen_set_label(label
);
315 /* Immediate conditional jump (bt or bf) */
316 static void gen_conditional_jump(DisasContext
* ctx
,
317 target_ulong ift
, target_ulong ifnott
)
322 l1
= gen_new_label();
324 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
325 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
326 gen_goto_tb(ctx
, 0, ifnott
);
328 gen_goto_tb(ctx
, 1, ift
);
331 /* Delayed conditional jump (bt or bf) */
332 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
337 l1
= gen_new_label();
339 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
340 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
341 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
343 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
347 static inline void gen_set_t(void)
349 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
352 static inline void gen_clr_t(void)
354 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
357 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
362 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
363 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
364 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
369 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
374 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
375 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
376 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
381 static inline void gen_store_flags(uint32_t flags
)
383 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
384 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
387 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
389 TCGv tmp
= tcg_temp_new();
394 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
395 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
397 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
399 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
400 tcg_gen_or_i32(t0
, t0
, tmp
);
405 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
407 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
410 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
412 TCGv_i32 tmp
= tcg_temp_new_i32();
413 tcg_gen_trunc_i64_i32(tmp
, t
);
414 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
415 tcg_gen_shri_i64(t
, t
, 32);
416 tcg_gen_trunc_i64_i32(tmp
, t
);
417 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
418 tcg_temp_free_i32(tmp
);
421 #define B3_0 (ctx->opcode & 0xf)
422 #define B6_4 ((ctx->opcode >> 4) & 0x7)
423 #define B7_4 ((ctx->opcode >> 4) & 0xf)
424 #define B7_0 (ctx->opcode & 0xff)
425 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
426 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
427 (ctx->opcode & 0xfff))
428 #define B11_8 ((ctx->opcode >> 8) & 0xf)
429 #define B15_12 ((ctx->opcode >> 12) & 0xf)
431 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
432 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
434 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
435 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
437 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
438 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
439 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
440 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
442 #define CHECK_NOT_DELAY_SLOT \
443 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
445 gen_helper_raise_slot_illegal_instruction(); \
446 ctx->bstate = BS_EXCP; \
450 #define CHECK_PRIVILEGED \
451 if (IS_USER(ctx)) { \
452 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
453 gen_helper_raise_slot_illegal_instruction(); \
455 gen_helper_raise_illegal_instruction(); \
457 ctx->bstate = BS_EXCP; \
461 #define CHECK_FPU_ENABLED \
462 if (ctx->flags & SR_FD) { \
463 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
464 gen_helper_raise_slot_fpu_disable(); \
466 gen_helper_raise_fpu_disable(); \
468 ctx->bstate = BS_EXCP; \
472 static void _decode_opc(DisasContext
* ctx
)
474 /* This code tries to make movcal emulation sufficiently
475 accurate for Linux purposes. This instruction writes
476 memory, and prior to that, always allocates a cache line.
477 It is used in two contexts:
478 - in memcpy, where data is copied in blocks, the first write
479 of to a block uses movca.l for performance.
480 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
481 to flush the cache. Here, the data written by movcal.l is never
482 written to memory, and the data written is just bogus.
484 To simulate this, we simulate movcal.l, we store the value to memory,
485 but we also remember the previous content. If we see ocbi, we check
486 if movcal.l for that address was done previously. If so, the write should
487 not have hit the memory, so we restore the previous content.
488 When we see an instruction that is neither movca.l
489 nor ocbi, the previous content is discarded.
491 To optimize, we only try to flush stores when we're at the start of
492 TB, or if we already saw movca.l in this TB and did not flush stores
496 int opcode
= ctx
->opcode
& 0xf0ff;
497 if (opcode
!= 0x0093 /* ocbi */
498 && opcode
!= 0x00c3 /* movca.l */)
500 gen_helper_discard_movcal_backup ();
506 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
509 switch (ctx
->opcode
) {
510 case 0x0019: /* div0u */
511 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
513 case 0x000b: /* rts */
515 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
516 ctx
->flags
|= DELAY_SLOT
;
517 ctx
->delayed_pc
= (uint32_t) - 1;
519 case 0x0028: /* clrmac */
520 tcg_gen_movi_i32(cpu_mach
, 0);
521 tcg_gen_movi_i32(cpu_macl
, 0);
523 case 0x0048: /* clrs */
524 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
526 case 0x0008: /* clrt */
529 case 0x0038: /* ldtlb */
533 case 0x002b: /* rte */
536 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
537 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
538 ctx
->flags
|= DELAY_SLOT
;
539 ctx
->delayed_pc
= (uint32_t) - 1;
541 case 0x0058: /* sets */
542 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
544 case 0x0018: /* sett */
547 case 0xfbfd: /* frchg */
548 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
549 ctx
->bstate
= BS_STOP
;
551 case 0xf3fd: /* fschg */
552 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
553 ctx
->bstate
= BS_STOP
;
555 case 0x0009: /* nop */
557 case 0x001b: /* sleep */
559 gen_helper_sleep(tcg_const_i32(ctx
->pc
+ 2));
563 switch (ctx
->opcode
& 0xf000) {
564 case 0x1000: /* mov.l Rm,@(disp,Rn) */
566 TCGv addr
= tcg_temp_new();
567 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
568 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
572 case 0x5000: /* mov.l @(disp,Rm),Rn */
574 TCGv addr
= tcg_temp_new();
575 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
576 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
580 case 0xe000: /* mov #imm,Rn */
581 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
583 case 0x9000: /* mov.w @(disp,PC),Rn */
585 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
586 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
590 case 0xd000: /* mov.l @(disp,PC),Rn */
592 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
593 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
597 case 0x7000: /* add #imm,Rn */
598 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
600 case 0xa000: /* bra disp */
602 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
603 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
604 ctx
->flags
|= DELAY_SLOT
;
606 case 0xb000: /* bsr disp */
608 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
609 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
610 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
611 ctx
->flags
|= DELAY_SLOT
;
615 switch (ctx
->opcode
& 0xf00f) {
616 case 0x6003: /* mov Rm,Rn */
617 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
619 case 0x2000: /* mov.b Rm,@Rn */
620 tcg_gen_qemu_st8(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
622 case 0x2001: /* mov.w Rm,@Rn */
623 tcg_gen_qemu_st16(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
625 case 0x2002: /* mov.l Rm,@Rn */
626 tcg_gen_qemu_st32(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
628 case 0x6000: /* mov.b @Rm,Rn */
629 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
631 case 0x6001: /* mov.w @Rm,Rn */
632 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
634 case 0x6002: /* mov.l @Rm,Rn */
635 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
637 case 0x2004: /* mov.b Rm,@-Rn */
639 TCGv addr
= tcg_temp_new();
640 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
641 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
); /* might cause re-execution */
642 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
646 case 0x2005: /* mov.w Rm,@-Rn */
648 TCGv addr
= tcg_temp_new();
649 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
650 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
651 tcg_gen_mov_i32(REG(B11_8
), addr
);
655 case 0x2006: /* mov.l Rm,@-Rn */
657 TCGv addr
= tcg_temp_new();
658 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
659 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
660 tcg_gen_mov_i32(REG(B11_8
), addr
);
663 case 0x6004: /* mov.b @Rm+,Rn */
664 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
666 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
668 case 0x6005: /* mov.w @Rm+,Rn */
669 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
671 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
673 case 0x6006: /* mov.l @Rm+,Rn */
674 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
676 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
678 case 0x0004: /* mov.b Rm,@(R0,Rn) */
680 TCGv addr
= tcg_temp_new();
681 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
682 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
);
686 case 0x0005: /* mov.w Rm,@(R0,Rn) */
688 TCGv addr
= tcg_temp_new();
689 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
690 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
694 case 0x0006: /* mov.l Rm,@(R0,Rn) */
696 TCGv addr
= tcg_temp_new();
697 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
698 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
702 case 0x000c: /* mov.b @(R0,Rm),Rn */
704 TCGv addr
= tcg_temp_new();
705 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
706 tcg_gen_qemu_ld8s(REG(B11_8
), addr
, ctx
->memidx
);
710 case 0x000d: /* mov.w @(R0,Rm),Rn */
712 TCGv addr
= tcg_temp_new();
713 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
714 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
718 case 0x000e: /* mov.l @(R0,Rm),Rn */
720 TCGv addr
= tcg_temp_new();
721 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
722 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
726 case 0x6008: /* swap.b Rm,Rn */
729 high
= tcg_temp_new();
730 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
731 low
= tcg_temp_new();
732 tcg_gen_ext16u_i32(low
, REG(B7_4
));
733 tcg_gen_bswap16_i32(low
, low
);
734 tcg_gen_or_i32(REG(B11_8
), high
, low
);
739 case 0x6009: /* swap.w Rm,Rn */
742 high
= tcg_temp_new();
743 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
744 low
= tcg_temp_new();
745 tcg_gen_shri_i32(low
, REG(B7_4
), 16);
746 tcg_gen_ext16u_i32(low
, low
);
747 tcg_gen_or_i32(REG(B11_8
), high
, low
);
752 case 0x200d: /* xtrct Rm,Rn */
755 high
= tcg_temp_new();
756 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
757 low
= tcg_temp_new();
758 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
759 tcg_gen_ext16u_i32(low
, low
);
760 tcg_gen_or_i32(REG(B11_8
), high
, low
);
765 case 0x300c: /* add Rm,Rn */
766 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
768 case 0x300e: /* addc Rm,Rn */
769 gen_helper_addc(REG(B11_8
), REG(B7_4
), REG(B11_8
));
771 case 0x300f: /* addv Rm,Rn */
772 gen_helper_addv(REG(B11_8
), REG(B7_4
), REG(B11_8
));
774 case 0x2009: /* and Rm,Rn */
775 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
777 case 0x3000: /* cmp/eq Rm,Rn */
778 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
780 case 0x3003: /* cmp/ge Rm,Rn */
781 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
783 case 0x3007: /* cmp/gt Rm,Rn */
784 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
786 case 0x3006: /* cmp/hi Rm,Rn */
787 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
789 case 0x3002: /* cmp/hs Rm,Rn */
790 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
792 case 0x200c: /* cmp/str Rm,Rn */
794 TCGv cmp1
= tcg_temp_new();
795 TCGv cmp2
= tcg_temp_new();
796 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
797 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
798 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
799 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
800 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
801 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
802 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
803 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
804 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
805 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
806 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
807 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
808 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
809 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
814 case 0x2007: /* div0s Rm,Rn */
816 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
817 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
818 TCGv val
= tcg_temp_new();
819 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
820 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
824 case 0x3004: /* div1 Rm,Rn */
825 gen_helper_div1(REG(B11_8
), REG(B7_4
), REG(B11_8
));
827 case 0x300d: /* dmuls.l Rm,Rn */
829 TCGv_i64 tmp1
= tcg_temp_new_i64();
830 TCGv_i64 tmp2
= tcg_temp_new_i64();
832 tcg_gen_ext_i32_i64(tmp1
, REG(B7_4
));
833 tcg_gen_ext_i32_i64(tmp2
, REG(B11_8
));
834 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
835 tcg_gen_trunc_i64_i32(cpu_macl
, tmp1
);
836 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
837 tcg_gen_trunc_i64_i32(cpu_mach
, tmp1
);
839 tcg_temp_free_i64(tmp2
);
840 tcg_temp_free_i64(tmp1
);
843 case 0x3005: /* dmulu.l Rm,Rn */
845 TCGv_i64 tmp1
= tcg_temp_new_i64();
846 TCGv_i64 tmp2
= tcg_temp_new_i64();
848 tcg_gen_extu_i32_i64(tmp1
, REG(B7_4
));
849 tcg_gen_extu_i32_i64(tmp2
, REG(B11_8
));
850 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
851 tcg_gen_trunc_i64_i32(cpu_macl
, tmp1
);
852 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
853 tcg_gen_trunc_i64_i32(cpu_mach
, tmp1
);
855 tcg_temp_free_i64(tmp2
);
856 tcg_temp_free_i64(tmp1
);
859 case 0x600e: /* exts.b Rm,Rn */
860 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
862 case 0x600f: /* exts.w Rm,Rn */
863 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
865 case 0x600c: /* extu.b Rm,Rn */
866 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
868 case 0x600d: /* extu.w Rm,Rn */
869 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
871 case 0x000f: /* mac.l @Rm+,@Rn+ */
874 arg0
= tcg_temp_new();
875 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
876 arg1
= tcg_temp_new();
877 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
878 gen_helper_macl(arg0
, arg1
);
881 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
882 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
885 case 0x400f: /* mac.w @Rm+,@Rn+ */
888 arg0
= tcg_temp_new();
889 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
890 arg1
= tcg_temp_new();
891 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
892 gen_helper_macw(arg0
, arg1
);
895 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
896 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
899 case 0x0007: /* mul.l Rm,Rn */
900 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
902 case 0x200f: /* muls.w Rm,Rn */
905 arg0
= tcg_temp_new();
906 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
907 arg1
= tcg_temp_new();
908 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
909 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
914 case 0x200e: /* mulu.w Rm,Rn */
917 arg0
= tcg_temp_new();
918 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
919 arg1
= tcg_temp_new();
920 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
921 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
926 case 0x600b: /* neg Rm,Rn */
927 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
929 case 0x600a: /* negc Rm,Rn */
933 tcg_gen_neg_i32(t0
, REG(B7_4
));
935 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
936 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
937 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
938 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
939 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
940 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
941 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
946 case 0x6007: /* not Rm,Rn */
947 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
949 case 0x200b: /* or Rm,Rn */
950 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
952 case 0x400c: /* shad Rm,Rn */
954 int label1
= gen_new_label();
955 int label2
= gen_new_label();
956 int label3
= gen_new_label();
957 int label4
= gen_new_label();
959 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
960 /* Rm positive, shift to the left */
961 shift
= tcg_temp_new();
962 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
963 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
964 tcg_temp_free(shift
);
966 /* Rm negative, shift to the right */
967 gen_set_label(label1
);
968 shift
= tcg_temp_new();
969 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
970 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
971 tcg_gen_not_i32(shift
, REG(B7_4
));
972 tcg_gen_andi_i32(shift
, shift
, 0x1f);
973 tcg_gen_addi_i32(shift
, shift
, 1);
974 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
975 tcg_temp_free(shift
);
978 gen_set_label(label2
);
979 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
980 tcg_gen_movi_i32(REG(B11_8
), 0);
982 gen_set_label(label3
);
983 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
984 gen_set_label(label4
);
987 case 0x400d: /* shld Rm,Rn */
989 int label1
= gen_new_label();
990 int label2
= gen_new_label();
991 int label3
= gen_new_label();
993 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
994 /* Rm positive, shift to the left */
995 shift
= tcg_temp_new();
996 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
997 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
998 tcg_temp_free(shift
);
1000 /* Rm negative, shift to the right */
1001 gen_set_label(label1
);
1002 shift
= tcg_temp_new();
1003 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
1004 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
1005 tcg_gen_not_i32(shift
, REG(B7_4
));
1006 tcg_gen_andi_i32(shift
, shift
, 0x1f);
1007 tcg_gen_addi_i32(shift
, shift
, 1);
1008 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
1009 tcg_temp_free(shift
);
1012 gen_set_label(label2
);
1013 tcg_gen_movi_i32(REG(B11_8
), 0);
1014 gen_set_label(label3
);
1017 case 0x3008: /* sub Rm,Rn */
1018 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1020 case 0x300a: /* subc Rm,Rn */
1021 gen_helper_subc(REG(B11_8
), REG(B7_4
), REG(B11_8
));
1023 case 0x300b: /* subv Rm,Rn */
1024 gen_helper_subv(REG(B11_8
), REG(B7_4
), REG(B11_8
));
1026 case 0x2008: /* tst Rm,Rn */
1028 TCGv val
= tcg_temp_new();
1029 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
1030 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1034 case 0x200a: /* xor Rm,Rn */
1035 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1037 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1039 if (ctx
->fpscr
& FPSCR_SZ
) {
1040 TCGv_i64 fp
= tcg_temp_new_i64();
1041 gen_load_fpr64(fp
, XREG(B7_4
));
1042 gen_store_fpr64(fp
, XREG(B11_8
));
1043 tcg_temp_free_i64(fp
);
1045 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1048 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1050 if (ctx
->fpscr
& FPSCR_SZ
) {
1051 TCGv addr_hi
= tcg_temp_new();
1052 int fr
= XREG(B7_4
);
1053 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
1054 tcg_gen_qemu_st32(cpu_fregs
[fr
], REG(B11_8
), ctx
->memidx
);
1055 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1056 tcg_temp_free(addr_hi
);
1058 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
), ctx
->memidx
);
1061 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1063 if (ctx
->fpscr
& FPSCR_SZ
) {
1064 TCGv addr_hi
= tcg_temp_new();
1065 int fr
= XREG(B11_8
);
1066 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1067 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1068 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1069 tcg_temp_free(addr_hi
);
1071 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1074 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1076 if (ctx
->fpscr
& FPSCR_SZ
) {
1077 TCGv addr_hi
= tcg_temp_new();
1078 int fr
= XREG(B11_8
);
1079 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1080 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1081 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1082 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1083 tcg_temp_free(addr_hi
);
1085 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1086 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1089 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1091 if (ctx
->fpscr
& FPSCR_SZ
) {
1092 TCGv addr
= tcg_temp_new_i32();
1093 int fr
= XREG(B7_4
);
1094 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1095 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1096 tcg_gen_subi_i32(addr
, addr
, 4);
1097 tcg_gen_qemu_st32(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1098 tcg_gen_mov_i32(REG(B11_8
), addr
);
1099 tcg_temp_free(addr
);
1102 addr
= tcg_temp_new_i32();
1103 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1104 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1105 tcg_gen_mov_i32(REG(B11_8
), addr
);
1106 tcg_temp_free(addr
);
1109 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1112 TCGv addr
= tcg_temp_new_i32();
1113 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1114 if (ctx
->fpscr
& FPSCR_SZ
) {
1115 int fr
= XREG(B11_8
);
1116 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1117 tcg_gen_addi_i32(addr
, addr
, 4);
1118 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1120 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], addr
, ctx
->memidx
);
1122 tcg_temp_free(addr
);
1125 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1128 TCGv addr
= tcg_temp_new();
1129 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1130 if (ctx
->fpscr
& FPSCR_SZ
) {
1131 int fr
= XREG(B7_4
);
1132 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1133 tcg_gen_addi_i32(addr
, addr
, 4);
1134 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1136 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1138 tcg_temp_free(addr
);
1141 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1142 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1143 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1144 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1145 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1146 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1149 if (ctx
->fpscr
& FPSCR_PR
) {
1152 if (ctx
->opcode
& 0x0110)
1153 break; /* illegal instruction */
1154 fp0
= tcg_temp_new_i64();
1155 fp1
= tcg_temp_new_i64();
1156 gen_load_fpr64(fp0
, DREG(B11_8
));
1157 gen_load_fpr64(fp1
, DREG(B7_4
));
1158 switch (ctx
->opcode
& 0xf00f) {
1159 case 0xf000: /* fadd Rm,Rn */
1160 gen_helper_fadd_DT(fp0
, fp0
, fp1
);
1162 case 0xf001: /* fsub Rm,Rn */
1163 gen_helper_fsub_DT(fp0
, fp0
, fp1
);
1165 case 0xf002: /* fmul Rm,Rn */
1166 gen_helper_fmul_DT(fp0
, fp0
, fp1
);
1168 case 0xf003: /* fdiv Rm,Rn */
1169 gen_helper_fdiv_DT(fp0
, fp0
, fp1
);
1171 case 0xf004: /* fcmp/eq Rm,Rn */
1172 gen_helper_fcmp_eq_DT(fp0
, fp1
);
1174 case 0xf005: /* fcmp/gt Rm,Rn */
1175 gen_helper_fcmp_gt_DT(fp0
, fp1
);
1178 gen_store_fpr64(fp0
, DREG(B11_8
));
1179 tcg_temp_free_i64(fp0
);
1180 tcg_temp_free_i64(fp1
);
1182 switch (ctx
->opcode
& 0xf00f) {
1183 case 0xf000: /* fadd Rm,Rn */
1184 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1186 case 0xf001: /* fsub Rm,Rn */
1187 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1189 case 0xf002: /* fmul Rm,Rn */
1190 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1192 case 0xf003: /* fdiv Rm,Rn */
1193 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1195 case 0xf004: /* fcmp/eq Rm,Rn */
1196 gen_helper_fcmp_eq_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1198 case 0xf005: /* fcmp/gt Rm,Rn */
1199 gen_helper_fcmp_gt_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1205 case 0xf00e: /* fmac FR0,RM,Rn */
1208 if (ctx
->fpscr
& FPSCR_PR
) {
1209 break; /* illegal instruction */
1211 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)],
1212 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)], cpu_fregs
[FREG(B11_8
)]);
1218 switch (ctx
->opcode
& 0xff00) {
1219 case 0xc900: /* and #imm,R0 */
1220 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1222 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1225 addr
= tcg_temp_new();
1226 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1227 val
= tcg_temp_new();
1228 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1229 tcg_gen_andi_i32(val
, val
, B7_0
);
1230 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1232 tcg_temp_free(addr
);
1235 case 0x8b00: /* bf label */
1236 CHECK_NOT_DELAY_SLOT
1237 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1238 ctx
->pc
+ 4 + B7_0s
* 2);
1239 ctx
->bstate
= BS_BRANCH
;
1241 case 0x8f00: /* bf/s label */
1242 CHECK_NOT_DELAY_SLOT
1243 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1244 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1246 case 0x8900: /* bt label */
1247 CHECK_NOT_DELAY_SLOT
1248 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1250 ctx
->bstate
= BS_BRANCH
;
1252 case 0x8d00: /* bt/s label */
1253 CHECK_NOT_DELAY_SLOT
1254 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1255 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1257 case 0x8800: /* cmp/eq #imm,R0 */
1258 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1260 case 0xc400: /* mov.b @(disp,GBR),R0 */
1262 TCGv addr
= tcg_temp_new();
1263 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1264 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1265 tcg_temp_free(addr
);
1268 case 0xc500: /* mov.w @(disp,GBR),R0 */
1270 TCGv addr
= tcg_temp_new();
1271 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1272 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1273 tcg_temp_free(addr
);
1276 case 0xc600: /* mov.l @(disp,GBR),R0 */
1278 TCGv addr
= tcg_temp_new();
1279 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1280 tcg_gen_qemu_ld32s(REG(0), addr
, ctx
->memidx
);
1281 tcg_temp_free(addr
);
1284 case 0xc000: /* mov.b R0,@(disp,GBR) */
1286 TCGv addr
= tcg_temp_new();
1287 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1288 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1289 tcg_temp_free(addr
);
1292 case 0xc100: /* mov.w R0,@(disp,GBR) */
1294 TCGv addr
= tcg_temp_new();
1295 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1296 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1297 tcg_temp_free(addr
);
1300 case 0xc200: /* mov.l R0,@(disp,GBR) */
1302 TCGv addr
= tcg_temp_new();
1303 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1304 tcg_gen_qemu_st32(REG(0), addr
, ctx
->memidx
);
1305 tcg_temp_free(addr
);
1308 case 0x8000: /* mov.b R0,@(disp,Rn) */
1310 TCGv addr
= tcg_temp_new();
1311 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1312 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1313 tcg_temp_free(addr
);
1316 case 0x8100: /* mov.w R0,@(disp,Rn) */
1318 TCGv addr
= tcg_temp_new();
1319 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1320 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1321 tcg_temp_free(addr
);
1324 case 0x8400: /* mov.b @(disp,Rn),R0 */
1326 TCGv addr
= tcg_temp_new();
1327 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1328 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1329 tcg_temp_free(addr
);
1332 case 0x8500: /* mov.w @(disp,Rn),R0 */
1334 TCGv addr
= tcg_temp_new();
1335 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1336 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1337 tcg_temp_free(addr
);
1340 case 0xc700: /* mova @(disp,PC),R0 */
1341 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1343 case 0xcb00: /* or #imm,R0 */
1344 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1346 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1349 addr
= tcg_temp_new();
1350 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1351 val
= tcg_temp_new();
1352 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1353 tcg_gen_ori_i32(val
, val
, B7_0
);
1354 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1356 tcg_temp_free(addr
);
1359 case 0xc300: /* trapa #imm */
1362 CHECK_NOT_DELAY_SLOT
1363 imm
= tcg_const_i32(B7_0
);
1364 gen_helper_trapa(imm
);
1366 ctx
->bstate
= BS_BRANCH
;
1369 case 0xc800: /* tst #imm,R0 */
1371 TCGv val
= tcg_temp_new();
1372 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1373 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1377 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1379 TCGv val
= tcg_temp_new();
1380 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1381 tcg_gen_qemu_ld8u(val
, val
, ctx
->memidx
);
1382 tcg_gen_andi_i32(val
, val
, B7_0
);
1383 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1387 case 0xca00: /* xor #imm,R0 */
1388 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1390 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1393 addr
= tcg_temp_new();
1394 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1395 val
= tcg_temp_new();
1396 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1397 tcg_gen_xori_i32(val
, val
, B7_0
);
1398 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1400 tcg_temp_free(addr
);
1405 switch (ctx
->opcode
& 0xf08f) {
1406 case 0x408e: /* ldc Rm,Rn_BANK */
1408 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1410 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1412 tcg_gen_qemu_ld32s(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
);
1413 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1415 case 0x0082: /* stc Rm_BANK,Rn */
1417 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1419 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1422 TCGv addr
= tcg_temp_new();
1423 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1424 tcg_gen_qemu_st32(ALTREG(B6_4
), addr
, ctx
->memidx
);
1425 tcg_gen_mov_i32(REG(B11_8
), addr
);
1426 tcg_temp_free(addr
);
1431 switch (ctx
->opcode
& 0xf0ff) {
1432 case 0x0023: /* braf Rn */
1433 CHECK_NOT_DELAY_SLOT
1434 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1435 ctx
->flags
|= DELAY_SLOT
;
1436 ctx
->delayed_pc
= (uint32_t) - 1;
1438 case 0x0003: /* bsrf Rn */
1439 CHECK_NOT_DELAY_SLOT
1440 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1441 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1442 ctx
->flags
|= DELAY_SLOT
;
1443 ctx
->delayed_pc
= (uint32_t) - 1;
1445 case 0x4015: /* cmp/pl Rn */
1446 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1448 case 0x4011: /* cmp/pz Rn */
1449 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1451 case 0x4010: /* dt Rn */
1452 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1453 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1455 case 0x402b: /* jmp @Rn */
1456 CHECK_NOT_DELAY_SLOT
1457 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1458 ctx
->flags
|= DELAY_SLOT
;
1459 ctx
->delayed_pc
= (uint32_t) - 1;
1461 case 0x400b: /* jsr @Rn */
1462 CHECK_NOT_DELAY_SLOT
1463 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1464 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1465 ctx
->flags
|= DELAY_SLOT
;
1466 ctx
->delayed_pc
= (uint32_t) - 1;
1468 case 0x400e: /* ldc Rm,SR */
1470 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1471 ctx
->bstate
= BS_STOP
;
1473 case 0x4007: /* ldc.l @Rm+,SR */
1476 TCGv val
= tcg_temp_new();
1477 tcg_gen_qemu_ld32s(val
, REG(B11_8
), ctx
->memidx
);
1478 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1480 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1481 ctx
->bstate
= BS_STOP
;
1484 case 0x0002: /* stc SR,Rn */
1486 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1488 case 0x4003: /* stc SR,@-Rn */
1491 TCGv addr
= tcg_temp_new();
1492 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1493 tcg_gen_qemu_st32(cpu_sr
, addr
, ctx
->memidx
);
1494 tcg_gen_mov_i32(REG(B11_8
), addr
);
1495 tcg_temp_free(addr
);
1498 #define LD(reg,ldnum,ldpnum,prechk) \
1501 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1505 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1506 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1508 #define ST(reg,stnum,stpnum,prechk) \
1511 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1516 TCGv addr = tcg_temp_new(); \
1517 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1518 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1519 tcg_gen_mov_i32(REG(B11_8), addr); \
1520 tcg_temp_free(addr); \
1523 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1524 LD(reg,ldnum,ldpnum,prechk) \
1525 ST(reg,stnum,stpnum,prechk)
1526 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1527 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1528 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1529 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1530 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1531 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1532 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1533 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1534 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1535 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1536 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1537 case 0x406a: /* lds Rm,FPSCR */
1539 gen_helper_ld_fpscr(REG(B11_8
));
1540 ctx
->bstate
= BS_STOP
;
1542 case 0x4066: /* lds.l @Rm+,FPSCR */
1545 TCGv addr
= tcg_temp_new();
1546 tcg_gen_qemu_ld32s(addr
, REG(B11_8
), ctx
->memidx
);
1547 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1548 gen_helper_ld_fpscr(addr
);
1549 tcg_temp_free(addr
);
1550 ctx
->bstate
= BS_STOP
;
1553 case 0x006a: /* sts FPSCR,Rn */
1555 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1557 case 0x4062: /* sts FPSCR,@-Rn */
1561 val
= tcg_temp_new();
1562 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1563 addr
= tcg_temp_new();
1564 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1565 tcg_gen_qemu_st32(val
, addr
, ctx
->memidx
);
1566 tcg_gen_mov_i32(REG(B11_8
), addr
);
1567 tcg_temp_free(addr
);
1571 case 0x00c3: /* movca.l R0,@Rm */
1573 TCGv val
= tcg_temp_new();
1574 tcg_gen_qemu_ld32u(val
, REG(B11_8
), ctx
->memidx
);
1575 gen_helper_movcal (REG(B11_8
), val
);
1576 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1578 ctx
->has_movcal
= 1;
1581 /* MOVUA.L @Rm,R0 (Rm) -> R0
1582 Load non-boundary-aligned data */
1583 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1586 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1587 Load non-boundary-aligned data */
1588 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1589 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1591 case 0x0029: /* movt Rn */
1592 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1597 If (T == 1) R0 -> (Rn)
1600 if (ctx
->features
& SH_FEATURE_SH4A
) {
1601 int label
= gen_new_label();
1603 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1604 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1605 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1606 gen_set_label(label
);
1607 tcg_gen_movi_i32(cpu_ldst
, 0);
1615 When interrupt/exception
1618 if (ctx
->features
& SH_FEATURE_SH4A
) {
1619 tcg_gen_movi_i32(cpu_ldst
, 0);
1620 tcg_gen_qemu_ld32s(REG(0), REG(B11_8
), ctx
->memidx
);
1621 tcg_gen_movi_i32(cpu_ldst
, 1);
1625 case 0x0093: /* ocbi @Rn */
1627 gen_helper_ocbi (REG(B11_8
));
1630 case 0x00a3: /* ocbp @Rn */
1631 case 0x00b3: /* ocbwb @Rn */
1632 /* These instructions are supposed to do nothing in case of
1633 a cache miss. Given that we only partially emulate caches
1634 it is safe to simply ignore them. */
1636 case 0x0083: /* pref @Rn */
1638 case 0x00d3: /* prefi @Rn */
1639 if (ctx
->features
& SH_FEATURE_SH4A
)
1643 case 0x00e3: /* icbi @Rn */
1644 if (ctx
->features
& SH_FEATURE_SH4A
)
1648 case 0x00ab: /* synco */
1649 if (ctx
->features
& SH_FEATURE_SH4A
)
1653 case 0x4024: /* rotcl Rn */
1655 TCGv tmp
= tcg_temp_new();
1656 tcg_gen_mov_i32(tmp
, cpu_sr
);
1657 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1658 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1659 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1663 case 0x4025: /* rotcr Rn */
1665 TCGv tmp
= tcg_temp_new();
1666 tcg_gen_mov_i32(tmp
, cpu_sr
);
1667 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1668 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1669 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1673 case 0x4004: /* rotl Rn */
1674 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1675 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1677 case 0x4005: /* rotr Rn */
1678 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1679 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1681 case 0x4000: /* shll Rn */
1682 case 0x4020: /* shal Rn */
1683 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1684 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1686 case 0x4021: /* shar Rn */
1687 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1688 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1690 case 0x4001: /* shlr Rn */
1691 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1692 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1694 case 0x4008: /* shll2 Rn */
1695 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1697 case 0x4018: /* shll8 Rn */
1698 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1700 case 0x4028: /* shll16 Rn */
1701 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1703 case 0x4009: /* shlr2 Rn */
1704 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1706 case 0x4019: /* shlr8 Rn */
1707 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1709 case 0x4029: /* shlr16 Rn */
1710 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1712 case 0x401b: /* tas.b @Rn */
1715 addr
= tcg_temp_local_new();
1716 tcg_gen_mov_i32(addr
, REG(B11_8
));
1717 val
= tcg_temp_local_new();
1718 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1719 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1720 tcg_gen_ori_i32(val
, val
, 0x80);
1721 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1723 tcg_temp_free(addr
);
1726 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1728 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1730 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1732 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1734 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1736 if (ctx
->fpscr
& FPSCR_PR
) {
1738 if (ctx
->opcode
& 0x0100)
1739 break; /* illegal instruction */
1740 fp
= tcg_temp_new_i64();
1741 gen_helper_float_DT(fp
, cpu_fpul
);
1742 gen_store_fpr64(fp
, DREG(B11_8
));
1743 tcg_temp_free_i64(fp
);
1746 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1749 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1751 if (ctx
->fpscr
& FPSCR_PR
) {
1753 if (ctx
->opcode
& 0x0100)
1754 break; /* illegal instruction */
1755 fp
= tcg_temp_new_i64();
1756 gen_load_fpr64(fp
, DREG(B11_8
));
1757 gen_helper_ftrc_DT(cpu_fpul
, fp
);
1758 tcg_temp_free_i64(fp
);
1761 gen_helper_ftrc_FT(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1764 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1767 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1770 case 0xf05d: /* fabs FRn/DRn */
1772 if (ctx
->fpscr
& FPSCR_PR
) {
1773 if (ctx
->opcode
& 0x0100)
1774 break; /* illegal instruction */
1775 TCGv_i64 fp
= tcg_temp_new_i64();
1776 gen_load_fpr64(fp
, DREG(B11_8
));
1777 gen_helper_fabs_DT(fp
, fp
);
1778 gen_store_fpr64(fp
, DREG(B11_8
));
1779 tcg_temp_free_i64(fp
);
1781 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1784 case 0xf06d: /* fsqrt FRn */
1786 if (ctx
->fpscr
& FPSCR_PR
) {
1787 if (ctx
->opcode
& 0x0100)
1788 break; /* illegal instruction */
1789 TCGv_i64 fp
= tcg_temp_new_i64();
1790 gen_load_fpr64(fp
, DREG(B11_8
));
1791 gen_helper_fsqrt_DT(fp
, fp
);
1792 gen_store_fpr64(fp
, DREG(B11_8
));
1793 tcg_temp_free_i64(fp
);
1795 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1798 case 0xf07d: /* fsrra FRn */
1801 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1803 if (!(ctx
->fpscr
& FPSCR_PR
)) {
1804 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1807 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1809 if (!(ctx
->fpscr
& FPSCR_PR
)) {
1810 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1813 case 0xf0ad: /* fcnvsd FPUL,DRn */
1816 TCGv_i64 fp
= tcg_temp_new_i64();
1817 gen_helper_fcnvsd_FT_DT(fp
, cpu_fpul
);
1818 gen_store_fpr64(fp
, DREG(B11_8
));
1819 tcg_temp_free_i64(fp
);
1822 case 0xf0bd: /* fcnvds DRn,FPUL */
1825 TCGv_i64 fp
= tcg_temp_new_i64();
1826 gen_load_fpr64(fp
, DREG(B11_8
));
1827 gen_helper_fcnvds_DT_FT(cpu_fpul
, fp
);
1828 tcg_temp_free_i64(fp
);
1831 case 0xf0ed: /* fipr FVm,FVn */
1833 if ((ctx
->fpscr
& FPSCR_PR
) == 0) {
1835 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1836 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1837 gen_helper_fipr(m
, n
);
1843 case 0xf0fd: /* ftrv XMTRX,FVn */
1845 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1846 (ctx
->fpscr
& FPSCR_PR
) == 0) {
1848 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1856 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1857 ctx
->opcode
, ctx
->pc
);
1860 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1861 gen_helper_raise_slot_illegal_instruction();
1863 gen_helper_raise_illegal_instruction();
1865 ctx
->bstate
= BS_EXCP
;
1868 static void decode_opc(DisasContext
* ctx
)
1870 uint32_t old_flags
= ctx
->flags
;
1872 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
1873 tcg_gen_debug_insn_start(ctx
->pc
);
1878 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1879 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1882 /* go out of the delay slot */
1883 uint32_t new_flags
= ctx
->flags
;
1884 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1885 gen_store_flags(new_flags
);
1888 ctx
->bstate
= BS_BRANCH
;
1889 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1890 gen_delayed_conditional_jump(ctx
);
1891 } else if (old_flags
& DELAY_SLOT
) {
1897 /* go into a delay slot */
1898 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1899 gen_store_flags(ctx
->flags
);
1903 gen_intermediate_code_internal(CPUSH4State
* env
, TranslationBlock
* tb
,
1907 target_ulong pc_start
;
1908 static uint16_t *gen_opc_end
;
1915 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
1917 ctx
.flags
= (uint32_t)tb
->flags
;
1918 ctx
.bstate
= BS_NONE
;
1920 ctx
.fpscr
= env
->fpscr
;
1921 ctx
.memidx
= (env
->sr
& SR_MD
) == 0 ? 1 : 0;
1922 /* We don't know if the delayed pc came from a dynamic or static branch,
1923 so assume it is a dynamic branch. */
1924 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1926 ctx
.singlestep_enabled
= env
->singlestep_enabled
;
1927 ctx
.features
= env
->features
;
1928 ctx
.has_movcal
= (tb
->flags
& TB_FLAG_PENDING_MOVCA
);
1932 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1934 max_insns
= CF_COUNT_MASK
;
1936 while (ctx
.bstate
== BS_NONE
&& gen_opc_ptr
< gen_opc_end
) {
1937 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
1938 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1939 if (ctx
.pc
== bp
->pc
) {
1940 /* We have hit a breakpoint - make sure PC is up-to-date */
1941 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1943 ctx
.bstate
= BS_EXCP
;
1949 i
= gen_opc_ptr
- gen_opc_buf
;
1953 gen_opc_instr_start
[ii
++] = 0;
1955 gen_opc_pc
[ii
] = ctx
.pc
;
1956 gen_opc_hflags
[ii
] = ctx
.flags
;
1957 gen_opc_instr_start
[ii
] = 1;
1958 gen_opc_icount
[ii
] = num_insns
;
1960 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1963 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
1966 ctx
.opcode
= lduw_code(ctx
.pc
);
1970 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1972 if (env
->singlestep_enabled
)
1974 if (num_insns
>= max_insns
)
1979 if (tb
->cflags
& CF_LAST_IO
)
1981 if (env
->singlestep_enabled
) {
1982 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1985 switch (ctx
.bstate
) {
1987 /* gen_op_interrupt_restart(); */
1991 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1993 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1996 /* gen_op_interrupt_restart(); */
2005 gen_icount_end(tb
, num_insns
);
2006 *gen_opc_ptr
= INDEX_op_end
;
2008 i
= gen_opc_ptr
- gen_opc_buf
;
2011 gen_opc_instr_start
[ii
++] = 0;
2013 tb
->size
= ctx
.pc
- pc_start
;
2014 tb
->icount
= num_insns
;
2018 #ifdef SH4_DEBUG_DISAS
2019 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "\n");
2021 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2022 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2023 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 0);
2029 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2031 gen_intermediate_code_internal(env
, tb
, 0);
2034 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2036 gen_intermediate_code_internal(env
, tb
, 1);
2039 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
2041 env
->pc
= gen_opc_pc
[pc_pos
];
2042 env
->flags
= gen_opc_hflags
[pc_pos
];