4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 //#define SH4_SINGLE_STEP
24 #include "disas/disas.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
34 typedef struct DisasContext
{
35 struct TranslationBlock
*tb
;
42 int singlestep_enabled
;
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
50 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
54 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
57 BS_STOP
= 1, /* We want to stop translation for any reason */
58 BS_BRANCH
= 2, /* We reached a branch condition */
59 BS_EXCP
= 3, /* We reached an exception condition */
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
64 static TCGv cpu_gregs
[24];
65 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
66 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
67 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
68 static TCGv cpu_fregs
[32];
70 /* internal register indexes */
71 static TCGv cpu_flags
, cpu_delayed_pc
;
73 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
75 #include "exec/gen-icount.h"
77 void sh4_translate_init(void)
80 static int done_init
= 0;
81 static const char * const gregnames
[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 static const char * const fregnames
[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
104 for (i
= 0; i
< 24; i
++)
105 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUSH4State
, gregs
[i
]),
109 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUSH4State
, pc
), "PC");
111 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
112 offsetof(CPUSH4State
, sr
), "SR");
113 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUSH4State
, ssr
), "SSR");
115 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
116 offsetof(CPUSH4State
, spc
), "SPC");
117 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
118 offsetof(CPUSH4State
, gbr
), "GBR");
119 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
120 offsetof(CPUSH4State
, vbr
), "VBR");
121 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUSH4State
, sgr
), "SGR");
123 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
124 offsetof(CPUSH4State
, dbr
), "DBR");
125 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUSH4State
, mach
), "MACH");
127 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUSH4State
, macl
), "MACL");
129 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
130 offsetof(CPUSH4State
, pr
), "PR");
131 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
132 offsetof(CPUSH4State
, fpscr
), "FPSCR");
133 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, fpul
), "FPUL");
136 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
137 offsetof(CPUSH4State
, flags
), "_flags_");
138 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, delayed_pc
),
141 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
142 offsetof(CPUSH4State
, ldst
), "_ldst_");
144 for (i
= 0; i
< 32; i
++)
145 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
146 offsetof(CPUSH4State
, fregs
[i
]),
152 void superh_cpu_dump_state(CPUState
*cs
, FILE *f
,
153 fprintf_function cpu_fprintf
, int flags
)
155 SuperHCPU
*cpu
= SUPERH_CPU(cs
);
156 CPUSH4State
*env
= &cpu
->env
;
158 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
159 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
160 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
161 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
162 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
163 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
164 for (i
= 0; i
< 24; i
+= 4) {
165 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
166 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
167 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
169 if (env
->flags
& DELAY_SLOT
) {
170 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
172 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
173 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
178 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
180 TranslationBlock
*tb
;
183 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
184 !ctx
->singlestep_enabled
) {
185 /* Use a direct jump if in same page and singlestep not enabled */
187 tcg_gen_movi_i32(cpu_pc
, dest
);
188 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
190 tcg_gen_movi_i32(cpu_pc
, dest
);
191 if (ctx
->singlestep_enabled
)
192 gen_helper_debug(cpu_env
);
197 static void gen_jump(DisasContext
* ctx
)
199 if (ctx
->delayed_pc
== (uint32_t) - 1) {
200 /* Target is not statically known, it comes necessarily from a
201 delayed jump as immediate jump are conditinal jumps */
202 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
203 if (ctx
->singlestep_enabled
)
204 gen_helper_debug(cpu_env
);
207 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
211 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
214 TCGLabel
*label
= gen_new_label();
215 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
217 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
218 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
219 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
220 gen_set_label(label
);
223 /* Immediate conditional jump (bt or bf) */
224 static void gen_conditional_jump(DisasContext
* ctx
,
225 target_ulong ift
, target_ulong ifnott
)
230 l1
= gen_new_label();
232 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
233 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
234 gen_goto_tb(ctx
, 0, ifnott
);
236 gen_goto_tb(ctx
, 1, ift
);
239 /* Delayed conditional jump (bt or bf) */
240 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
245 l1
= gen_new_label();
247 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
248 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
249 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
251 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
255 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
260 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
261 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
262 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
267 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
272 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
273 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
274 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
279 static inline void gen_store_flags(uint32_t flags
)
281 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
282 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
285 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
287 TCGv tmp
= tcg_temp_new();
292 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
293 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
295 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
297 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
298 tcg_gen_or_i32(t0
, t0
, tmp
);
303 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
305 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
308 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
310 TCGv_i32 tmp
= tcg_temp_new_i32();
311 tcg_gen_trunc_i64_i32(tmp
, t
);
312 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
313 tcg_gen_shri_i64(t
, t
, 32);
314 tcg_gen_trunc_i64_i32(tmp
, t
);
315 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
316 tcg_temp_free_i32(tmp
);
319 #define B3_0 (ctx->opcode & 0xf)
320 #define B6_4 ((ctx->opcode >> 4) & 0x7)
321 #define B7_4 ((ctx->opcode >> 4) & 0xf)
322 #define B7_0 (ctx->opcode & 0xff)
323 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
324 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
325 (ctx->opcode & 0xfff))
326 #define B11_8 ((ctx->opcode >> 8) & 0xf)
327 #define B15_12 ((ctx->opcode >> 12) & 0xf)
329 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
330 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
332 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
333 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
335 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
336 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
337 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
338 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
340 #define CHECK_NOT_DELAY_SLOT \
341 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
343 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
344 gen_helper_raise_slot_illegal_instruction(cpu_env); \
345 ctx->bstate = BS_BRANCH; \
349 #define CHECK_PRIVILEGED \
350 if (IS_USER(ctx)) { \
351 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
352 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
353 gen_helper_raise_slot_illegal_instruction(cpu_env); \
355 gen_helper_raise_illegal_instruction(cpu_env); \
357 ctx->bstate = BS_BRANCH; \
361 #define CHECK_FPU_ENABLED \
362 if (ctx->flags & SR_FD) { \
363 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
364 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
365 gen_helper_raise_slot_fpu_disable(cpu_env); \
367 gen_helper_raise_fpu_disable(cpu_env); \
369 ctx->bstate = BS_BRANCH; \
373 static void _decode_opc(DisasContext
* ctx
)
375 /* This code tries to make movcal emulation sufficiently
376 accurate for Linux purposes. This instruction writes
377 memory, and prior to that, always allocates a cache line.
378 It is used in two contexts:
379 - in memcpy, where data is copied in blocks, the first write
380 of to a block uses movca.l for performance.
381 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
382 to flush the cache. Here, the data written by movcal.l is never
383 written to memory, and the data written is just bogus.
385 To simulate this, we simulate movcal.l, we store the value to memory,
386 but we also remember the previous content. If we see ocbi, we check
387 if movcal.l for that address was done previously. If so, the write should
388 not have hit the memory, so we restore the previous content.
389 When we see an instruction that is neither movca.l
390 nor ocbi, the previous content is discarded.
392 To optimize, we only try to flush stores when we're at the start of
393 TB, or if we already saw movca.l in this TB and did not flush stores
397 int opcode
= ctx
->opcode
& 0xf0ff;
398 if (opcode
!= 0x0093 /* ocbi */
399 && opcode
!= 0x00c3 /* movca.l */)
401 gen_helper_discard_movcal_backup(cpu_env
);
407 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
410 switch (ctx
->opcode
) {
411 case 0x0019: /* div0u */
412 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
414 case 0x000b: /* rts */
416 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
417 ctx
->flags
|= DELAY_SLOT
;
418 ctx
->delayed_pc
= (uint32_t) - 1;
420 case 0x0028: /* clrmac */
421 tcg_gen_movi_i32(cpu_mach
, 0);
422 tcg_gen_movi_i32(cpu_macl
, 0);
424 case 0x0048: /* clrs */
425 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
427 case 0x0008: /* clrt */
428 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
430 case 0x0038: /* ldtlb */
432 gen_helper_ldtlb(cpu_env
);
434 case 0x002b: /* rte */
437 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
438 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
439 ctx
->flags
|= DELAY_SLOT
;
440 ctx
->delayed_pc
= (uint32_t) - 1;
442 case 0x0058: /* sets */
443 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
445 case 0x0018: /* sett */
446 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
448 case 0xfbfd: /* frchg */
449 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
450 ctx
->bstate
= BS_STOP
;
452 case 0xf3fd: /* fschg */
453 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
454 ctx
->bstate
= BS_STOP
;
456 case 0x0009: /* nop */
458 case 0x001b: /* sleep */
460 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
461 gen_helper_sleep(cpu_env
);
465 switch (ctx
->opcode
& 0xf000) {
466 case 0x1000: /* mov.l Rm,@(disp,Rn) */
468 TCGv addr
= tcg_temp_new();
469 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
470 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
474 case 0x5000: /* mov.l @(disp,Rm),Rn */
476 TCGv addr
= tcg_temp_new();
477 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
478 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
482 case 0xe000: /* mov #imm,Rn */
483 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
485 case 0x9000: /* mov.w @(disp,PC),Rn */
487 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
488 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
492 case 0xd000: /* mov.l @(disp,PC),Rn */
494 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
495 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
499 case 0x7000: /* add #imm,Rn */
500 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
502 case 0xa000: /* bra disp */
504 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
505 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
506 ctx
->flags
|= DELAY_SLOT
;
508 case 0xb000: /* bsr disp */
510 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
511 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
512 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
513 ctx
->flags
|= DELAY_SLOT
;
517 switch (ctx
->opcode
& 0xf00f) {
518 case 0x6003: /* mov Rm,Rn */
519 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
521 case 0x2000: /* mov.b Rm,@Rn */
522 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_UB
);
524 case 0x2001: /* mov.w Rm,@Rn */
525 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUW
);
527 case 0x2002: /* mov.l Rm,@Rn */
528 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
530 case 0x6000: /* mov.b @Rm,Rn */
531 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
533 case 0x6001: /* mov.w @Rm,Rn */
534 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
536 case 0x6002: /* mov.l @Rm,Rn */
537 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
539 case 0x2004: /* mov.b Rm,@-Rn */
541 TCGv addr
= tcg_temp_new();
542 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
543 /* might cause re-execution */
544 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
545 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
549 case 0x2005: /* mov.w Rm,@-Rn */
551 TCGv addr
= tcg_temp_new();
552 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
553 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
554 tcg_gen_mov_i32(REG(B11_8
), addr
);
558 case 0x2006: /* mov.l Rm,@-Rn */
560 TCGv addr
= tcg_temp_new();
561 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
562 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
563 tcg_gen_mov_i32(REG(B11_8
), addr
);
566 case 0x6004: /* mov.b @Rm+,Rn */
567 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
569 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
571 case 0x6005: /* mov.w @Rm+,Rn */
572 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
574 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
576 case 0x6006: /* mov.l @Rm+,Rn */
577 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
579 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
581 case 0x0004: /* mov.b Rm,@(R0,Rn) */
583 TCGv addr
= tcg_temp_new();
584 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
585 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
589 case 0x0005: /* mov.w Rm,@(R0,Rn) */
591 TCGv addr
= tcg_temp_new();
592 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
593 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
597 case 0x0006: /* mov.l Rm,@(R0,Rn) */
599 TCGv addr
= tcg_temp_new();
600 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
601 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
605 case 0x000c: /* mov.b @(R0,Rm),Rn */
607 TCGv addr
= tcg_temp_new();
608 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
609 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_SB
);
613 case 0x000d: /* mov.w @(R0,Rm),Rn */
615 TCGv addr
= tcg_temp_new();
616 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
617 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
621 case 0x000e: /* mov.l @(R0,Rm),Rn */
623 TCGv addr
= tcg_temp_new();
624 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
625 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
629 case 0x6008: /* swap.b Rm,Rn */
632 high
= tcg_temp_new();
633 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
634 low
= tcg_temp_new();
635 tcg_gen_ext16u_i32(low
, REG(B7_4
));
636 tcg_gen_bswap16_i32(low
, low
);
637 tcg_gen_or_i32(REG(B11_8
), high
, low
);
642 case 0x6009: /* swap.w Rm,Rn */
643 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
645 case 0x200d: /* xtrct Rm,Rn */
648 high
= tcg_temp_new();
649 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
650 low
= tcg_temp_new();
651 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
652 tcg_gen_or_i32(REG(B11_8
), high
, low
);
657 case 0x300c: /* add Rm,Rn */
658 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
660 case 0x300e: /* addc Rm,Rn */
664 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
666 tcg_gen_add_i32(t1
, REG(B7_4
), REG(B11_8
));
667 tcg_gen_add_i32(t0
, t0
, t1
);
669 tcg_gen_setcond_i32(TCG_COND_GTU
, t2
, REG(B11_8
), t1
);
670 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, t1
, t0
);
671 tcg_gen_or_i32(t1
, t1
, t2
);
673 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
674 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
676 tcg_gen_mov_i32(REG(B11_8
), t0
);
680 case 0x300f: /* addv Rm,Rn */
684 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
686 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
688 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
689 tcg_gen_andc_i32(t1
, t1
, t2
);
691 tcg_gen_shri_i32(t1
, t1
, 31);
692 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
693 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
695 tcg_gen_mov_i32(REG(B7_4
), t0
);
699 case 0x2009: /* and Rm,Rn */
700 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
702 case 0x3000: /* cmp/eq Rm,Rn */
703 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
705 case 0x3003: /* cmp/ge Rm,Rn */
706 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
708 case 0x3007: /* cmp/gt Rm,Rn */
709 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
711 case 0x3006: /* cmp/hi Rm,Rn */
712 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
714 case 0x3002: /* cmp/hs Rm,Rn */
715 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
717 case 0x200c: /* cmp/str Rm,Rn */
719 TCGv cmp1
= tcg_temp_new();
720 TCGv cmp2
= tcg_temp_new();
721 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
722 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
723 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
724 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
725 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
726 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
727 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
728 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
729 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
730 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
731 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
732 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
733 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
734 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
739 case 0x2007: /* div0s Rm,Rn */
741 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
742 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
743 TCGv val
= tcg_temp_new();
744 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
745 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
749 case 0x3004: /* div1 Rm,Rn */
750 gen_helper_div1(REG(B11_8
), cpu_env
, REG(B7_4
), REG(B11_8
));
752 case 0x300d: /* dmuls.l Rm,Rn */
753 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
755 case 0x3005: /* dmulu.l Rm,Rn */
756 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
758 case 0x600e: /* exts.b Rm,Rn */
759 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
761 case 0x600f: /* exts.w Rm,Rn */
762 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
764 case 0x600c: /* extu.b Rm,Rn */
765 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
767 case 0x600d: /* extu.w Rm,Rn */
768 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
770 case 0x000f: /* mac.l @Rm+,@Rn+ */
773 arg0
= tcg_temp_new();
774 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
775 arg1
= tcg_temp_new();
776 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
777 gen_helper_macl(cpu_env
, arg0
, arg1
);
780 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
781 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
784 case 0x400f: /* mac.w @Rm+,@Rn+ */
787 arg0
= tcg_temp_new();
788 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
789 arg1
= tcg_temp_new();
790 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
791 gen_helper_macw(cpu_env
, arg0
, arg1
);
794 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
795 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
798 case 0x0007: /* mul.l Rm,Rn */
799 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
801 case 0x200f: /* muls.w Rm,Rn */
804 arg0
= tcg_temp_new();
805 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
806 arg1
= tcg_temp_new();
807 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
808 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
813 case 0x200e: /* mulu.w Rm,Rn */
816 arg0
= tcg_temp_new();
817 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
818 arg1
= tcg_temp_new();
819 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
820 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
825 case 0x600b: /* neg Rm,Rn */
826 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
828 case 0x600a: /* negc Rm,Rn */
832 tcg_gen_neg_i32(t0
, REG(B7_4
));
834 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
835 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
836 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
837 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
838 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
839 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
840 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
845 case 0x6007: /* not Rm,Rn */
846 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
848 case 0x200b: /* or Rm,Rn */
849 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
851 case 0x400c: /* shad Rm,Rn */
853 TCGLabel
*label1
= gen_new_label();
854 TCGLabel
*label2
= gen_new_label();
855 TCGLabel
*label3
= gen_new_label();
856 TCGLabel
*label4
= gen_new_label();
858 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
859 /* Rm positive, shift to the left */
860 shift
= tcg_temp_new();
861 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
862 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
863 tcg_temp_free(shift
);
865 /* Rm negative, shift to the right */
866 gen_set_label(label1
);
867 shift
= tcg_temp_new();
868 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
869 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
870 tcg_gen_not_i32(shift
, REG(B7_4
));
871 tcg_gen_andi_i32(shift
, shift
, 0x1f);
872 tcg_gen_addi_i32(shift
, shift
, 1);
873 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
874 tcg_temp_free(shift
);
877 gen_set_label(label2
);
878 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
879 tcg_gen_movi_i32(REG(B11_8
), 0);
881 gen_set_label(label3
);
882 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
883 gen_set_label(label4
);
886 case 0x400d: /* shld Rm,Rn */
888 TCGLabel
*label1
= gen_new_label();
889 TCGLabel
*label2
= gen_new_label();
890 TCGLabel
*label3
= gen_new_label();
892 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
893 /* Rm positive, shift to the left */
894 shift
= tcg_temp_new();
895 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
896 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
897 tcg_temp_free(shift
);
899 /* Rm negative, shift to the right */
900 gen_set_label(label1
);
901 shift
= tcg_temp_new();
902 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
903 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
904 tcg_gen_not_i32(shift
, REG(B7_4
));
905 tcg_gen_andi_i32(shift
, shift
, 0x1f);
906 tcg_gen_addi_i32(shift
, shift
, 1);
907 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
908 tcg_temp_free(shift
);
911 gen_set_label(label2
);
912 tcg_gen_movi_i32(REG(B11_8
), 0);
913 gen_set_label(label3
);
916 case 0x3008: /* sub Rm,Rn */
917 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
919 case 0x300a: /* subc Rm,Rn */
923 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
925 tcg_gen_sub_i32(t1
, REG(B11_8
), REG(B7_4
));
926 tcg_gen_sub_i32(t0
, t1
, t0
);
928 tcg_gen_setcond_i32(TCG_COND_LTU
, t2
, REG(B11_8
), t1
);
929 tcg_gen_setcond_i32(TCG_COND_LTU
, t1
, t1
, t0
);
930 tcg_gen_or_i32(t1
, t1
, t2
);
932 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
933 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
935 tcg_gen_mov_i32(REG(B11_8
), t0
);
939 case 0x300b: /* subv Rm,Rn */
943 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
945 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
947 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
948 tcg_gen_and_i32(t1
, t1
, t2
);
950 tcg_gen_shri_i32(t1
, t1
, 31);
951 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
952 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
954 tcg_gen_mov_i32(REG(B11_8
), t0
);
958 case 0x2008: /* tst Rm,Rn */
960 TCGv val
= tcg_temp_new();
961 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
962 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
966 case 0x200a: /* xor Rm,Rn */
967 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
969 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
971 if (ctx
->flags
& FPSCR_SZ
) {
972 TCGv_i64 fp
= tcg_temp_new_i64();
973 gen_load_fpr64(fp
, XREG(B7_4
));
974 gen_store_fpr64(fp
, XREG(B11_8
));
975 tcg_temp_free_i64(fp
);
977 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
980 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
982 if (ctx
->flags
& FPSCR_SZ
) {
983 TCGv addr_hi
= tcg_temp_new();
985 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
986 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], REG(B11_8
),
987 ctx
->memidx
, MO_TEUL
);
988 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr_hi
,
989 ctx
->memidx
, MO_TEUL
);
990 tcg_temp_free(addr_hi
);
992 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
),
993 ctx
->memidx
, MO_TEUL
);
996 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
998 if (ctx
->flags
& FPSCR_SZ
) {
999 TCGv addr_hi
= tcg_temp_new();
1000 int fr
= XREG(B11_8
);
1001 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1002 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
1003 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
1004 tcg_temp_free(addr_hi
);
1006 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
1007 ctx
->memidx
, MO_TEUL
);
1010 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1012 if (ctx
->flags
& FPSCR_SZ
) {
1013 TCGv addr_hi
= tcg_temp_new();
1014 int fr
= XREG(B11_8
);
1015 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1016 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
1017 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
1018 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1019 tcg_temp_free(addr_hi
);
1021 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
1022 ctx
->memidx
, MO_TEUL
);
1023 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1026 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1028 if (ctx
->flags
& FPSCR_SZ
) {
1029 TCGv addr
= tcg_temp_new_i32();
1030 int fr
= XREG(B7_4
);
1031 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1032 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
, MO_TEUL
);
1033 tcg_gen_subi_i32(addr
, addr
, 4);
1034 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], addr
, ctx
->memidx
, MO_TEUL
);
1035 tcg_gen_mov_i32(REG(B11_8
), addr
);
1036 tcg_temp_free(addr
);
1039 addr
= tcg_temp_new_i32();
1040 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1041 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
1042 ctx
->memidx
, MO_TEUL
);
1043 tcg_gen_mov_i32(REG(B11_8
), addr
);
1044 tcg_temp_free(addr
);
1047 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1050 TCGv addr
= tcg_temp_new_i32();
1051 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1052 if (ctx
->flags
& FPSCR_SZ
) {
1053 int fr
= XREG(B11_8
);
1054 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1055 ctx
->memidx
, MO_TEUL
);
1056 tcg_gen_addi_i32(addr
, addr
, 4);
1057 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1058 ctx
->memidx
, MO_TEUL
);
1060 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], addr
,
1061 ctx
->memidx
, MO_TEUL
);
1063 tcg_temp_free(addr
);
1066 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1069 TCGv addr
= tcg_temp_new();
1070 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1071 if (ctx
->flags
& FPSCR_SZ
) {
1072 int fr
= XREG(B7_4
);
1073 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1074 ctx
->memidx
, MO_TEUL
);
1075 tcg_gen_addi_i32(addr
, addr
, 4);
1076 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1077 ctx
->memidx
, MO_TEUL
);
1079 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
1080 ctx
->memidx
, MO_TEUL
);
1082 tcg_temp_free(addr
);
1085 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1086 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1087 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1088 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1089 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1090 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1093 if (ctx
->flags
& FPSCR_PR
) {
1096 if (ctx
->opcode
& 0x0110)
1097 break; /* illegal instruction */
1098 fp0
= tcg_temp_new_i64();
1099 fp1
= tcg_temp_new_i64();
1100 gen_load_fpr64(fp0
, DREG(B11_8
));
1101 gen_load_fpr64(fp1
, DREG(B7_4
));
1102 switch (ctx
->opcode
& 0xf00f) {
1103 case 0xf000: /* fadd Rm,Rn */
1104 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1106 case 0xf001: /* fsub Rm,Rn */
1107 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1109 case 0xf002: /* fmul Rm,Rn */
1110 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1112 case 0xf003: /* fdiv Rm,Rn */
1113 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1115 case 0xf004: /* fcmp/eq Rm,Rn */
1116 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1118 case 0xf005: /* fcmp/gt Rm,Rn */
1119 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1122 gen_store_fpr64(fp0
, DREG(B11_8
));
1123 tcg_temp_free_i64(fp0
);
1124 tcg_temp_free_i64(fp1
);
1126 switch (ctx
->opcode
& 0xf00f) {
1127 case 0xf000: /* fadd Rm,Rn */
1128 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1129 cpu_fregs
[FREG(B11_8
)],
1130 cpu_fregs
[FREG(B7_4
)]);
1132 case 0xf001: /* fsub Rm,Rn */
1133 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1134 cpu_fregs
[FREG(B11_8
)],
1135 cpu_fregs
[FREG(B7_4
)]);
1137 case 0xf002: /* fmul Rm,Rn */
1138 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1139 cpu_fregs
[FREG(B11_8
)],
1140 cpu_fregs
[FREG(B7_4
)]);
1142 case 0xf003: /* fdiv Rm,Rn */
1143 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1144 cpu_fregs
[FREG(B11_8
)],
1145 cpu_fregs
[FREG(B7_4
)]);
1147 case 0xf004: /* fcmp/eq Rm,Rn */
1148 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1149 cpu_fregs
[FREG(B7_4
)]);
1151 case 0xf005: /* fcmp/gt Rm,Rn */
1152 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1153 cpu_fregs
[FREG(B7_4
)]);
1159 case 0xf00e: /* fmac FR0,RM,Rn */
1162 if (ctx
->flags
& FPSCR_PR
) {
1163 break; /* illegal instruction */
1165 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1166 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1167 cpu_fregs
[FREG(B11_8
)]);
1173 switch (ctx
->opcode
& 0xff00) {
1174 case 0xc900: /* and #imm,R0 */
1175 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1177 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1180 addr
= tcg_temp_new();
1181 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1182 val
= tcg_temp_new();
1183 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1184 tcg_gen_andi_i32(val
, val
, B7_0
);
1185 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1187 tcg_temp_free(addr
);
1190 case 0x8b00: /* bf label */
1191 CHECK_NOT_DELAY_SLOT
1192 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1193 ctx
->pc
+ 4 + B7_0s
* 2);
1194 ctx
->bstate
= BS_BRANCH
;
1196 case 0x8f00: /* bf/s label */
1197 CHECK_NOT_DELAY_SLOT
1198 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1199 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1201 case 0x8900: /* bt label */
1202 CHECK_NOT_DELAY_SLOT
1203 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1205 ctx
->bstate
= BS_BRANCH
;
1207 case 0x8d00: /* bt/s label */
1208 CHECK_NOT_DELAY_SLOT
1209 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1210 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1212 case 0x8800: /* cmp/eq #imm,R0 */
1213 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1215 case 0xc400: /* mov.b @(disp,GBR),R0 */
1217 TCGv addr
= tcg_temp_new();
1218 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1219 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1220 tcg_temp_free(addr
);
1223 case 0xc500: /* mov.w @(disp,GBR),R0 */
1225 TCGv addr
= tcg_temp_new();
1226 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1227 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1228 tcg_temp_free(addr
);
1231 case 0xc600: /* mov.l @(disp,GBR),R0 */
1233 TCGv addr
= tcg_temp_new();
1234 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1235 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESL
);
1236 tcg_temp_free(addr
);
1239 case 0xc000: /* mov.b R0,@(disp,GBR) */
1241 TCGv addr
= tcg_temp_new();
1242 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1243 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1244 tcg_temp_free(addr
);
1247 case 0xc100: /* mov.w R0,@(disp,GBR) */
1249 TCGv addr
= tcg_temp_new();
1250 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1251 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1252 tcg_temp_free(addr
);
1255 case 0xc200: /* mov.l R0,@(disp,GBR) */
1257 TCGv addr
= tcg_temp_new();
1258 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1259 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUL
);
1260 tcg_temp_free(addr
);
1263 case 0x8000: /* mov.b R0,@(disp,Rn) */
1265 TCGv addr
= tcg_temp_new();
1266 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1267 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1268 tcg_temp_free(addr
);
1271 case 0x8100: /* mov.w R0,@(disp,Rn) */
1273 TCGv addr
= tcg_temp_new();
1274 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1275 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1276 tcg_temp_free(addr
);
1279 case 0x8400: /* mov.b @(disp,Rn),R0 */
1281 TCGv addr
= tcg_temp_new();
1282 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1283 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1284 tcg_temp_free(addr
);
1287 case 0x8500: /* mov.w @(disp,Rn),R0 */
1289 TCGv addr
= tcg_temp_new();
1290 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1291 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1292 tcg_temp_free(addr
);
1295 case 0xc700: /* mova @(disp,PC),R0 */
1296 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1298 case 0xcb00: /* or #imm,R0 */
1299 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1301 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1304 addr
= tcg_temp_new();
1305 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1306 val
= tcg_temp_new();
1307 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1308 tcg_gen_ori_i32(val
, val
, B7_0
);
1309 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1311 tcg_temp_free(addr
);
1314 case 0xc300: /* trapa #imm */
1317 CHECK_NOT_DELAY_SLOT
1318 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1319 imm
= tcg_const_i32(B7_0
);
1320 gen_helper_trapa(cpu_env
, imm
);
1322 ctx
->bstate
= BS_BRANCH
;
1325 case 0xc800: /* tst #imm,R0 */
1327 TCGv val
= tcg_temp_new();
1328 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1329 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1333 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1335 TCGv val
= tcg_temp_new();
1336 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1337 tcg_gen_qemu_ld_i32(val
, val
, ctx
->memidx
, MO_UB
);
1338 tcg_gen_andi_i32(val
, val
, B7_0
);
1339 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1343 case 0xca00: /* xor #imm,R0 */
1344 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1346 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1349 addr
= tcg_temp_new();
1350 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1351 val
= tcg_temp_new();
1352 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1353 tcg_gen_xori_i32(val
, val
, B7_0
);
1354 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1356 tcg_temp_free(addr
);
1361 switch (ctx
->opcode
& 0xf08f) {
1362 case 0x408e: /* ldc Rm,Rn_BANK */
1364 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1366 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1368 tcg_gen_qemu_ld_i32(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1369 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1371 case 0x0082: /* stc Rm_BANK,Rn */
1373 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1375 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1378 TCGv addr
= tcg_temp_new();
1379 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1380 tcg_gen_qemu_st_i32(ALTREG(B6_4
), addr
, ctx
->memidx
, MO_TEUL
);
1381 tcg_gen_mov_i32(REG(B11_8
), addr
);
1382 tcg_temp_free(addr
);
1387 switch (ctx
->opcode
& 0xf0ff) {
1388 case 0x0023: /* braf Rn */
1389 CHECK_NOT_DELAY_SLOT
1390 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1391 ctx
->flags
|= DELAY_SLOT
;
1392 ctx
->delayed_pc
= (uint32_t) - 1;
1394 case 0x0003: /* bsrf Rn */
1395 CHECK_NOT_DELAY_SLOT
1396 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1397 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1398 ctx
->flags
|= DELAY_SLOT
;
1399 ctx
->delayed_pc
= (uint32_t) - 1;
1401 case 0x4015: /* cmp/pl Rn */
1402 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1404 case 0x4011: /* cmp/pz Rn */
1405 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1407 case 0x4010: /* dt Rn */
1408 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1409 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1411 case 0x402b: /* jmp @Rn */
1412 CHECK_NOT_DELAY_SLOT
1413 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1414 ctx
->flags
|= DELAY_SLOT
;
1415 ctx
->delayed_pc
= (uint32_t) - 1;
1417 case 0x400b: /* jsr @Rn */
1418 CHECK_NOT_DELAY_SLOT
1419 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1420 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1421 ctx
->flags
|= DELAY_SLOT
;
1422 ctx
->delayed_pc
= (uint32_t) - 1;
1424 case 0x400e: /* ldc Rm,SR */
1426 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1427 ctx
->bstate
= BS_STOP
;
1429 case 0x4007: /* ldc.l @Rm+,SR */
1432 TCGv val
= tcg_temp_new();
1433 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1434 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1436 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1437 ctx
->bstate
= BS_STOP
;
1440 case 0x0002: /* stc SR,Rn */
1442 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1444 case 0x4003: /* stc SR,@-Rn */
1447 TCGv addr
= tcg_temp_new();
1448 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1449 tcg_gen_qemu_st_i32(cpu_sr
, addr
, ctx
->memidx
, MO_TEUL
);
1450 tcg_gen_mov_i32(REG(B11_8
), addr
);
1451 tcg_temp_free(addr
);
1454 #define LD(reg,ldnum,ldpnum,prechk) \
1457 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1461 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1462 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1464 #define ST(reg,stnum,stpnum,prechk) \
1467 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1472 TCGv addr = tcg_temp_new(); \
1473 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1474 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1475 tcg_gen_mov_i32(REG(B11_8), addr); \
1476 tcg_temp_free(addr); \
1479 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1480 LD(reg,ldnum,ldpnum,prechk) \
1481 ST(reg,stnum,stpnum,prechk)
1482 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1483 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1484 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1485 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1486 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1487 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1488 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1489 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1490 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1491 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1492 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1493 case 0x406a: /* lds Rm,FPSCR */
1495 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1496 ctx
->bstate
= BS_STOP
;
1498 case 0x4066: /* lds.l @Rm+,FPSCR */
1501 TCGv addr
= tcg_temp_new();
1502 tcg_gen_qemu_ld_i32(addr
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1503 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1504 gen_helper_ld_fpscr(cpu_env
, addr
);
1505 tcg_temp_free(addr
);
1506 ctx
->bstate
= BS_STOP
;
1509 case 0x006a: /* sts FPSCR,Rn */
1511 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1513 case 0x4062: /* sts FPSCR,@-Rn */
1517 val
= tcg_temp_new();
1518 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1519 addr
= tcg_temp_new();
1520 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1521 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1522 tcg_gen_mov_i32(REG(B11_8
), addr
);
1523 tcg_temp_free(addr
);
1527 case 0x00c3: /* movca.l R0,@Rm */
1529 TCGv val
= tcg_temp_new();
1530 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1531 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1532 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1534 ctx
->has_movcal
= 1;
1537 /* MOVUA.L @Rm,R0 (Rm) -> R0
1538 Load non-boundary-aligned data */
1539 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1542 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1543 Load non-boundary-aligned data */
1544 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1545 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1547 case 0x0029: /* movt Rn */
1548 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1553 If (T == 1) R0 -> (Rn)
1556 if (ctx
->features
& SH_FEATURE_SH4A
) {
1557 TCGLabel
*label
= gen_new_label();
1558 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1559 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1560 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1561 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1562 gen_set_label(label
);
1563 tcg_gen_movi_i32(cpu_ldst
, 0);
1571 When interrupt/exception
1574 if (ctx
->features
& SH_FEATURE_SH4A
) {
1575 tcg_gen_movi_i32(cpu_ldst
, 0);
1576 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1577 tcg_gen_movi_i32(cpu_ldst
, 1);
1581 case 0x0093: /* ocbi @Rn */
1583 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1586 case 0x00a3: /* ocbp @Rn */
1587 case 0x00b3: /* ocbwb @Rn */
1588 /* These instructions are supposed to do nothing in case of
1589 a cache miss. Given that we only partially emulate caches
1590 it is safe to simply ignore them. */
1592 case 0x0083: /* pref @Rn */
1594 case 0x00d3: /* prefi @Rn */
1595 if (ctx
->features
& SH_FEATURE_SH4A
)
1599 case 0x00e3: /* icbi @Rn */
1600 if (ctx
->features
& SH_FEATURE_SH4A
)
1604 case 0x00ab: /* synco */
1605 if (ctx
->features
& SH_FEATURE_SH4A
)
1609 case 0x4024: /* rotcl Rn */
1611 TCGv tmp
= tcg_temp_new();
1612 tcg_gen_mov_i32(tmp
, cpu_sr
);
1613 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1614 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1615 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1619 case 0x4025: /* rotcr Rn */
1621 TCGv tmp
= tcg_temp_new();
1622 tcg_gen_mov_i32(tmp
, cpu_sr
);
1623 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1624 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1625 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1629 case 0x4004: /* rotl Rn */
1630 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1631 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1633 case 0x4005: /* rotr Rn */
1634 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1635 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1637 case 0x4000: /* shll Rn */
1638 case 0x4020: /* shal Rn */
1639 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1640 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1642 case 0x4021: /* shar Rn */
1643 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1644 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1646 case 0x4001: /* shlr Rn */
1647 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1648 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1650 case 0x4008: /* shll2 Rn */
1651 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1653 case 0x4018: /* shll8 Rn */
1654 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1656 case 0x4028: /* shll16 Rn */
1657 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1659 case 0x4009: /* shlr2 Rn */
1660 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1662 case 0x4019: /* shlr8 Rn */
1663 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1665 case 0x4029: /* shlr16 Rn */
1666 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1668 case 0x401b: /* tas.b @Rn */
1671 addr
= tcg_temp_local_new();
1672 tcg_gen_mov_i32(addr
, REG(B11_8
));
1673 val
= tcg_temp_local_new();
1674 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1675 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1676 tcg_gen_ori_i32(val
, val
, 0x80);
1677 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1679 tcg_temp_free(addr
);
1682 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1684 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1686 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1688 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1690 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1692 if (ctx
->flags
& FPSCR_PR
) {
1694 if (ctx
->opcode
& 0x0100)
1695 break; /* illegal instruction */
1696 fp
= tcg_temp_new_i64();
1697 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1698 gen_store_fpr64(fp
, DREG(B11_8
));
1699 tcg_temp_free_i64(fp
);
1702 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1705 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1707 if (ctx
->flags
& FPSCR_PR
) {
1709 if (ctx
->opcode
& 0x0100)
1710 break; /* illegal instruction */
1711 fp
= tcg_temp_new_i64();
1712 gen_load_fpr64(fp
, DREG(B11_8
));
1713 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1714 tcg_temp_free_i64(fp
);
1717 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1720 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1723 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1726 case 0xf05d: /* fabs FRn/DRn */
1728 if (ctx
->flags
& FPSCR_PR
) {
1729 if (ctx
->opcode
& 0x0100)
1730 break; /* illegal instruction */
1731 TCGv_i64 fp
= tcg_temp_new_i64();
1732 gen_load_fpr64(fp
, DREG(B11_8
));
1733 gen_helper_fabs_DT(fp
, fp
);
1734 gen_store_fpr64(fp
, DREG(B11_8
));
1735 tcg_temp_free_i64(fp
);
1737 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1740 case 0xf06d: /* fsqrt FRn */
1742 if (ctx
->flags
& FPSCR_PR
) {
1743 if (ctx
->opcode
& 0x0100)
1744 break; /* illegal instruction */
1745 TCGv_i64 fp
= tcg_temp_new_i64();
1746 gen_load_fpr64(fp
, DREG(B11_8
));
1747 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1748 gen_store_fpr64(fp
, DREG(B11_8
));
1749 tcg_temp_free_i64(fp
);
1751 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1752 cpu_fregs
[FREG(B11_8
)]);
1755 case 0xf07d: /* fsrra FRn */
1758 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1760 if (!(ctx
->flags
& FPSCR_PR
)) {
1761 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1764 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1766 if (!(ctx
->flags
& FPSCR_PR
)) {
1767 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1770 case 0xf0ad: /* fcnvsd FPUL,DRn */
1773 TCGv_i64 fp
= tcg_temp_new_i64();
1774 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1775 gen_store_fpr64(fp
, DREG(B11_8
));
1776 tcg_temp_free_i64(fp
);
1779 case 0xf0bd: /* fcnvds DRn,FPUL */
1782 TCGv_i64 fp
= tcg_temp_new_i64();
1783 gen_load_fpr64(fp
, DREG(B11_8
));
1784 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1785 tcg_temp_free_i64(fp
);
1788 case 0xf0ed: /* fipr FVm,FVn */
1790 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1792 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1793 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1794 gen_helper_fipr(cpu_env
, m
, n
);
1800 case 0xf0fd: /* ftrv XMTRX,FVn */
1802 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1803 (ctx
->flags
& FPSCR_PR
) == 0) {
1805 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1806 gen_helper_ftrv(cpu_env
, n
);
1813 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1814 ctx
->opcode
, ctx
->pc
);
1817 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1818 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1819 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1821 gen_helper_raise_illegal_instruction(cpu_env
);
1823 ctx
->bstate
= BS_BRANCH
;
1826 static void decode_opc(DisasContext
* ctx
)
1828 uint32_t old_flags
= ctx
->flags
;
1830 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1831 tcg_gen_debug_insn_start(ctx
->pc
);
1836 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1837 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1840 /* go out of the delay slot */
1841 uint32_t new_flags
= ctx
->flags
;
1842 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1843 gen_store_flags(new_flags
);
1846 ctx
->bstate
= BS_BRANCH
;
1847 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1848 gen_delayed_conditional_jump(ctx
);
1849 } else if (old_flags
& DELAY_SLOT
) {
1855 /* go into a delay slot */
1856 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1857 gen_store_flags(ctx
->flags
);
1861 gen_intermediate_code_internal(SuperHCPU
*cpu
, TranslationBlock
*tb
,
1864 CPUState
*cs
= CPU(cpu
);
1865 CPUSH4State
*env
= &cpu
->env
;
1867 target_ulong pc_start
;
1875 ctx
.flags
= (uint32_t)tb
->flags
;
1876 ctx
.bstate
= BS_NONE
;
1877 ctx
.memidx
= (ctx
.flags
& SR_MD
) == 0 ? 1 : 0;
1878 /* We don't know if the delayed pc came from a dynamic or static branch,
1879 so assume it is a dynamic branch. */
1880 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1882 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1883 ctx
.features
= env
->features
;
1884 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1888 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1890 max_insns
= CF_COUNT_MASK
;
1892 while (ctx
.bstate
== BS_NONE
&& !tcg_op_buf_full()) {
1893 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
1894 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
1895 if (ctx
.pc
== bp
->pc
) {
1896 /* We have hit a breakpoint - make sure PC is up-to-date */
1897 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1898 gen_helper_debug(cpu_env
);
1899 ctx
.bstate
= BS_BRANCH
;
1905 i
= tcg_op_buf_count();
1909 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1911 tcg_ctx
.gen_opc_pc
[ii
] = ctx
.pc
;
1912 gen_opc_hflags
[ii
] = ctx
.flags
;
1913 tcg_ctx
.gen_opc_instr_start
[ii
] = 1;
1914 tcg_ctx
.gen_opc_icount
[ii
] = num_insns
;
1916 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1919 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
1922 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
1926 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1928 if (cs
->singlestep_enabled
) {
1931 if (num_insns
>= max_insns
)
1936 if (tb
->cflags
& CF_LAST_IO
)
1938 if (cs
->singlestep_enabled
) {
1939 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1940 gen_helper_debug(cpu_env
);
1942 switch (ctx
.bstate
) {
1944 /* gen_op_interrupt_restart(); */
1948 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1950 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1953 /* gen_op_interrupt_restart(); */
1962 gen_tb_end(tb
, num_insns
);
1965 i
= tcg_op_buf_count();
1968 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1970 tb
->size
= ctx
.pc
- pc_start
;
1971 tb
->icount
= num_insns
;
1975 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1976 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1977 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
1983 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1985 gen_intermediate_code_internal(sh_env_get_cpu(env
), tb
, false);
1988 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1990 gen_intermediate_code_internal(sh_env_get_cpu(env
), tb
, true);
1993 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
1995 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
1996 env
->flags
= gen_opc_hflags
[pc_pos
];