4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 //#define SH4_SINGLE_STEP
24 #include "disas/disas.h"
31 typedef struct DisasContext
{
32 struct TranslationBlock
*tb
;
39 int singlestep_enabled
;
44 #if defined(CONFIG_USER_ONLY)
45 #define IS_USER(ctx) 1
47 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
51 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
54 BS_STOP
= 1, /* We want to stop translation for any reason */
55 BS_BRANCH
= 2, /* We reached a branch condition */
56 BS_EXCP
= 3, /* We reached an exception condition */
59 /* global register indexes */
60 static TCGv_ptr cpu_env
;
61 static TCGv cpu_gregs
[24];
62 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
63 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
64 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
65 static TCGv cpu_fregs
[32];
67 /* internal register indexes */
68 static TCGv cpu_flags
, cpu_delayed_pc
;
70 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
72 #include "exec/gen-icount.h"
74 void sh4_translate_init(void)
77 static int done_init
= 0;
78 static const char * const gregnames
[24] = {
79 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
80 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
81 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
82 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
83 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 static const char * const fregnames
[32] = {
86 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
87 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
88 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
89 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
90 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
91 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
92 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
93 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
99 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
101 for (i
= 0; i
< 24; i
++)
102 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
103 offsetof(CPUSH4State
, gregs
[i
]),
106 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
107 offsetof(CPUSH4State
, pc
), "PC");
108 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
109 offsetof(CPUSH4State
, sr
), "SR");
110 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUSH4State
, ssr
), "SSR");
112 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
113 offsetof(CPUSH4State
, spc
), "SPC");
114 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUSH4State
, gbr
), "GBR");
116 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUSH4State
, vbr
), "VBR");
118 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUSH4State
, sgr
), "SGR");
120 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUSH4State
, dbr
), "DBR");
122 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUSH4State
, mach
), "MACH");
124 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUSH4State
, macl
), "MACL");
126 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUSH4State
, pr
), "PR");
128 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
129 offsetof(CPUSH4State
, fpscr
), "FPSCR");
130 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
131 offsetof(CPUSH4State
, fpul
), "FPUL");
133 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, flags
), "_flags_");
135 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
136 offsetof(CPUSH4State
, delayed_pc
),
138 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, ldst
), "_ldst_");
141 for (i
= 0; i
< 32; i
++)
142 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
143 offsetof(CPUSH4State
, fregs
[i
]),
149 void superh_cpu_dump_state(CPUState
*cs
, FILE *f
,
150 fprintf_function cpu_fprintf
, int flags
)
152 SuperHCPU
*cpu
= SUPERH_CPU(cs
);
153 CPUSH4State
*env
= &cpu
->env
;
155 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
156 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
157 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
158 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
159 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
160 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
161 for (i
= 0; i
< 24; i
+= 4) {
162 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
163 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
164 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
166 if (env
->flags
& DELAY_SLOT
) {
167 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
169 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
170 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
175 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
177 TranslationBlock
*tb
;
180 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
181 !ctx
->singlestep_enabled
) {
182 /* Use a direct jump if in same page and singlestep not enabled */
184 tcg_gen_movi_i32(cpu_pc
, dest
);
185 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
187 tcg_gen_movi_i32(cpu_pc
, dest
);
188 if (ctx
->singlestep_enabled
)
189 gen_helper_debug(cpu_env
);
194 static void gen_jump(DisasContext
* ctx
)
196 if (ctx
->delayed_pc
== (uint32_t) - 1) {
197 /* Target is not statically known, it comes necessarily from a
198 delayed jump as immediate jump are conditinal jumps */
199 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
200 if (ctx
->singlestep_enabled
)
201 gen_helper_debug(cpu_env
);
204 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
208 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
211 int label
= gen_new_label();
212 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
214 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
215 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
216 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
217 gen_set_label(label
);
220 /* Immediate conditional jump (bt or bf) */
221 static void gen_conditional_jump(DisasContext
* ctx
,
222 target_ulong ift
, target_ulong ifnott
)
227 l1
= gen_new_label();
229 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
230 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
231 gen_goto_tb(ctx
, 0, ifnott
);
233 gen_goto_tb(ctx
, 1, ift
);
236 /* Delayed conditional jump (bt or bf) */
237 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
242 l1
= gen_new_label();
244 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
245 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
246 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
248 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
252 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
257 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
258 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
259 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
264 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
269 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
270 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
271 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
276 static inline void gen_store_flags(uint32_t flags
)
278 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
279 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
282 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
284 TCGv tmp
= tcg_temp_new();
289 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
290 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
292 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
294 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
295 tcg_gen_or_i32(t0
, t0
, tmp
);
300 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
302 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
305 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
307 TCGv_i32 tmp
= tcg_temp_new_i32();
308 tcg_gen_trunc_i64_i32(tmp
, t
);
309 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
310 tcg_gen_shri_i64(t
, t
, 32);
311 tcg_gen_trunc_i64_i32(tmp
, t
);
312 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
313 tcg_temp_free_i32(tmp
);
316 #define B3_0 (ctx->opcode & 0xf)
317 #define B6_4 ((ctx->opcode >> 4) & 0x7)
318 #define B7_4 ((ctx->opcode >> 4) & 0xf)
319 #define B7_0 (ctx->opcode & 0xff)
320 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
321 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
322 (ctx->opcode & 0xfff))
323 #define B11_8 ((ctx->opcode >> 8) & 0xf)
324 #define B15_12 ((ctx->opcode >> 12) & 0xf)
326 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
327 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
329 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
330 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
332 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
333 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
334 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
335 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
337 #define CHECK_NOT_DELAY_SLOT \
338 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
340 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
341 gen_helper_raise_slot_illegal_instruction(cpu_env); \
342 ctx->bstate = BS_BRANCH; \
346 #define CHECK_PRIVILEGED \
347 if (IS_USER(ctx)) { \
348 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
349 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
350 gen_helper_raise_slot_illegal_instruction(cpu_env); \
352 gen_helper_raise_illegal_instruction(cpu_env); \
354 ctx->bstate = BS_BRANCH; \
358 #define CHECK_FPU_ENABLED \
359 if (ctx->flags & SR_FD) { \
360 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
361 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
362 gen_helper_raise_slot_fpu_disable(cpu_env); \
364 gen_helper_raise_fpu_disable(cpu_env); \
366 ctx->bstate = BS_BRANCH; \
370 static void _decode_opc(DisasContext
* ctx
)
372 /* This code tries to make movcal emulation sufficiently
373 accurate for Linux purposes. This instruction writes
374 memory, and prior to that, always allocates a cache line.
375 It is used in two contexts:
376 - in memcpy, where data is copied in blocks, the first write
377 of to a block uses movca.l for performance.
378 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
379 to flush the cache. Here, the data written by movcal.l is never
380 written to memory, and the data written is just bogus.
382 To simulate this, we simulate movcal.l, we store the value to memory,
383 but we also remember the previous content. If we see ocbi, we check
384 if movcal.l for that address was done previously. If so, the write should
385 not have hit the memory, so we restore the previous content.
386 When we see an instruction that is neither movca.l
387 nor ocbi, the previous content is discarded.
389 To optimize, we only try to flush stores when we're at the start of
390 TB, or if we already saw movca.l in this TB and did not flush stores
394 int opcode
= ctx
->opcode
& 0xf0ff;
395 if (opcode
!= 0x0093 /* ocbi */
396 && opcode
!= 0x00c3 /* movca.l */)
398 gen_helper_discard_movcal_backup(cpu_env
);
404 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
407 switch (ctx
->opcode
) {
408 case 0x0019: /* div0u */
409 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
411 case 0x000b: /* rts */
413 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
414 ctx
->flags
|= DELAY_SLOT
;
415 ctx
->delayed_pc
= (uint32_t) - 1;
417 case 0x0028: /* clrmac */
418 tcg_gen_movi_i32(cpu_mach
, 0);
419 tcg_gen_movi_i32(cpu_macl
, 0);
421 case 0x0048: /* clrs */
422 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
424 case 0x0008: /* clrt */
425 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
427 case 0x0038: /* ldtlb */
429 gen_helper_ldtlb(cpu_env
);
431 case 0x002b: /* rte */
434 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
435 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
436 ctx
->flags
|= DELAY_SLOT
;
437 ctx
->delayed_pc
= (uint32_t) - 1;
439 case 0x0058: /* sets */
440 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
442 case 0x0018: /* sett */
443 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
445 case 0xfbfd: /* frchg */
446 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
447 ctx
->bstate
= BS_STOP
;
449 case 0xf3fd: /* fschg */
450 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
451 ctx
->bstate
= BS_STOP
;
453 case 0x0009: /* nop */
455 case 0x001b: /* sleep */
457 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
458 gen_helper_sleep(cpu_env
);
462 switch (ctx
->opcode
& 0xf000) {
463 case 0x1000: /* mov.l Rm,@(disp,Rn) */
465 TCGv addr
= tcg_temp_new();
466 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
467 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
471 case 0x5000: /* mov.l @(disp,Rm),Rn */
473 TCGv addr
= tcg_temp_new();
474 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
475 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
479 case 0xe000: /* mov #imm,Rn */
480 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
482 case 0x9000: /* mov.w @(disp,PC),Rn */
484 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
485 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
489 case 0xd000: /* mov.l @(disp,PC),Rn */
491 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
492 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
496 case 0x7000: /* add #imm,Rn */
497 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
499 case 0xa000: /* bra disp */
501 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
502 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
503 ctx
->flags
|= DELAY_SLOT
;
505 case 0xb000: /* bsr disp */
507 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
508 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
509 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
510 ctx
->flags
|= DELAY_SLOT
;
514 switch (ctx
->opcode
& 0xf00f) {
515 case 0x6003: /* mov Rm,Rn */
516 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
518 case 0x2000: /* mov.b Rm,@Rn */
519 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_UB
);
521 case 0x2001: /* mov.w Rm,@Rn */
522 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUW
);
524 case 0x2002: /* mov.l Rm,@Rn */
525 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
527 case 0x6000: /* mov.b @Rm,Rn */
528 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
530 case 0x6001: /* mov.w @Rm,Rn */
531 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
533 case 0x6002: /* mov.l @Rm,Rn */
534 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
536 case 0x2004: /* mov.b Rm,@-Rn */
538 TCGv addr
= tcg_temp_new();
539 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
540 /* might cause re-execution */
541 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
542 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
546 case 0x2005: /* mov.w Rm,@-Rn */
548 TCGv addr
= tcg_temp_new();
549 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
550 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
551 tcg_gen_mov_i32(REG(B11_8
), addr
);
555 case 0x2006: /* mov.l Rm,@-Rn */
557 TCGv addr
= tcg_temp_new();
558 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
559 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
560 tcg_gen_mov_i32(REG(B11_8
), addr
);
563 case 0x6004: /* mov.b @Rm+,Rn */
564 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
566 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
568 case 0x6005: /* mov.w @Rm+,Rn */
569 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
571 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
573 case 0x6006: /* mov.l @Rm+,Rn */
574 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
576 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
578 case 0x0004: /* mov.b Rm,@(R0,Rn) */
580 TCGv addr
= tcg_temp_new();
581 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
582 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
586 case 0x0005: /* mov.w Rm,@(R0,Rn) */
588 TCGv addr
= tcg_temp_new();
589 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
590 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
594 case 0x0006: /* mov.l Rm,@(R0,Rn) */
596 TCGv addr
= tcg_temp_new();
597 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
598 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
602 case 0x000c: /* mov.b @(R0,Rm),Rn */
604 TCGv addr
= tcg_temp_new();
605 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
606 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_SB
);
610 case 0x000d: /* mov.w @(R0,Rm),Rn */
612 TCGv addr
= tcg_temp_new();
613 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
614 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
618 case 0x000e: /* mov.l @(R0,Rm),Rn */
620 TCGv addr
= tcg_temp_new();
621 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
622 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
626 case 0x6008: /* swap.b Rm,Rn */
629 high
= tcg_temp_new();
630 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
631 low
= tcg_temp_new();
632 tcg_gen_ext16u_i32(low
, REG(B7_4
));
633 tcg_gen_bswap16_i32(low
, low
);
634 tcg_gen_or_i32(REG(B11_8
), high
, low
);
639 case 0x6009: /* swap.w Rm,Rn */
640 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
642 case 0x200d: /* xtrct Rm,Rn */
645 high
= tcg_temp_new();
646 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
647 low
= tcg_temp_new();
648 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
649 tcg_gen_or_i32(REG(B11_8
), high
, low
);
654 case 0x300c: /* add Rm,Rn */
655 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
657 case 0x300e: /* addc Rm,Rn */
661 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
663 tcg_gen_add_i32(t1
, REG(B7_4
), REG(B11_8
));
664 tcg_gen_add_i32(t0
, t0
, t1
);
666 tcg_gen_setcond_i32(TCG_COND_GTU
, t2
, REG(B11_8
), t1
);
667 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, t1
, t0
);
668 tcg_gen_or_i32(t1
, t1
, t2
);
670 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
671 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
673 tcg_gen_mov_i32(REG(B11_8
), t0
);
677 case 0x300f: /* addv Rm,Rn */
681 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
683 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
685 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
686 tcg_gen_andc_i32(t1
, t1
, t2
);
688 tcg_gen_shri_i32(t1
, t1
, 31);
689 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
690 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
692 tcg_gen_mov_i32(REG(B7_4
), t0
);
696 case 0x2009: /* and Rm,Rn */
697 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
699 case 0x3000: /* cmp/eq Rm,Rn */
700 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
702 case 0x3003: /* cmp/ge Rm,Rn */
703 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
705 case 0x3007: /* cmp/gt Rm,Rn */
706 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
708 case 0x3006: /* cmp/hi Rm,Rn */
709 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
711 case 0x3002: /* cmp/hs Rm,Rn */
712 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
714 case 0x200c: /* cmp/str Rm,Rn */
716 TCGv cmp1
= tcg_temp_new();
717 TCGv cmp2
= tcg_temp_new();
718 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
719 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
720 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
721 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
722 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
723 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
724 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
725 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
726 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
727 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
728 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
729 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
730 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
731 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
736 case 0x2007: /* div0s Rm,Rn */
738 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
739 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
740 TCGv val
= tcg_temp_new();
741 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
742 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
746 case 0x3004: /* div1 Rm,Rn */
747 gen_helper_div1(REG(B11_8
), cpu_env
, REG(B7_4
), REG(B11_8
));
749 case 0x300d: /* dmuls.l Rm,Rn */
750 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
752 case 0x3005: /* dmulu.l Rm,Rn */
753 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
755 case 0x600e: /* exts.b Rm,Rn */
756 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
758 case 0x600f: /* exts.w Rm,Rn */
759 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
761 case 0x600c: /* extu.b Rm,Rn */
762 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
764 case 0x600d: /* extu.w Rm,Rn */
765 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
767 case 0x000f: /* mac.l @Rm+,@Rn+ */
770 arg0
= tcg_temp_new();
771 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
772 arg1
= tcg_temp_new();
773 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
774 gen_helper_macl(cpu_env
, arg0
, arg1
);
777 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
778 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
781 case 0x400f: /* mac.w @Rm+,@Rn+ */
784 arg0
= tcg_temp_new();
785 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
786 arg1
= tcg_temp_new();
787 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
788 gen_helper_macw(cpu_env
, arg0
, arg1
);
791 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
792 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
795 case 0x0007: /* mul.l Rm,Rn */
796 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
798 case 0x200f: /* muls.w Rm,Rn */
801 arg0
= tcg_temp_new();
802 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
803 arg1
= tcg_temp_new();
804 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
805 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
810 case 0x200e: /* mulu.w Rm,Rn */
813 arg0
= tcg_temp_new();
814 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
815 arg1
= tcg_temp_new();
816 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
817 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
822 case 0x600b: /* neg Rm,Rn */
823 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
825 case 0x600a: /* negc Rm,Rn */
829 tcg_gen_neg_i32(t0
, REG(B7_4
));
831 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
832 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
833 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
834 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
835 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
836 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
837 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
842 case 0x6007: /* not Rm,Rn */
843 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
845 case 0x200b: /* or Rm,Rn */
846 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
848 case 0x400c: /* shad Rm,Rn */
850 int label1
= gen_new_label();
851 int label2
= gen_new_label();
852 int label3
= gen_new_label();
853 int label4
= gen_new_label();
855 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
856 /* Rm positive, shift to the left */
857 shift
= tcg_temp_new();
858 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
859 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
860 tcg_temp_free(shift
);
862 /* Rm negative, shift to the right */
863 gen_set_label(label1
);
864 shift
= tcg_temp_new();
865 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
866 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
867 tcg_gen_not_i32(shift
, REG(B7_4
));
868 tcg_gen_andi_i32(shift
, shift
, 0x1f);
869 tcg_gen_addi_i32(shift
, shift
, 1);
870 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
871 tcg_temp_free(shift
);
874 gen_set_label(label2
);
875 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
876 tcg_gen_movi_i32(REG(B11_8
), 0);
878 gen_set_label(label3
);
879 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
880 gen_set_label(label4
);
883 case 0x400d: /* shld Rm,Rn */
885 int label1
= gen_new_label();
886 int label2
= gen_new_label();
887 int label3
= gen_new_label();
889 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
890 /* Rm positive, shift to the left */
891 shift
= tcg_temp_new();
892 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
893 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
894 tcg_temp_free(shift
);
896 /* Rm negative, shift to the right */
897 gen_set_label(label1
);
898 shift
= tcg_temp_new();
899 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
900 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
901 tcg_gen_not_i32(shift
, REG(B7_4
));
902 tcg_gen_andi_i32(shift
, shift
, 0x1f);
903 tcg_gen_addi_i32(shift
, shift
, 1);
904 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
905 tcg_temp_free(shift
);
908 gen_set_label(label2
);
909 tcg_gen_movi_i32(REG(B11_8
), 0);
910 gen_set_label(label3
);
913 case 0x3008: /* sub Rm,Rn */
914 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
916 case 0x300a: /* subc Rm,Rn */
920 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
922 tcg_gen_sub_i32(t1
, REG(B11_8
), REG(B7_4
));
923 tcg_gen_sub_i32(t0
, t1
, t0
);
925 tcg_gen_setcond_i32(TCG_COND_LTU
, t2
, REG(B11_8
), t1
);
926 tcg_gen_setcond_i32(TCG_COND_LTU
, t1
, t1
, t0
);
927 tcg_gen_or_i32(t1
, t1
, t2
);
929 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
930 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
932 tcg_gen_mov_i32(REG(B11_8
), t0
);
936 case 0x300b: /* subv Rm,Rn */
940 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
942 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
944 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
945 tcg_gen_and_i32(t1
, t1
, t2
);
947 tcg_gen_shri_i32(t1
, t1
, 31);
948 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
949 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
951 tcg_gen_mov_i32(REG(B11_8
), t0
);
955 case 0x2008: /* tst Rm,Rn */
957 TCGv val
= tcg_temp_new();
958 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
959 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
963 case 0x200a: /* xor Rm,Rn */
964 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
966 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
968 if (ctx
->flags
& FPSCR_SZ
) {
969 TCGv_i64 fp
= tcg_temp_new_i64();
970 gen_load_fpr64(fp
, XREG(B7_4
));
971 gen_store_fpr64(fp
, XREG(B11_8
));
972 tcg_temp_free_i64(fp
);
974 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
977 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
979 if (ctx
->flags
& FPSCR_SZ
) {
980 TCGv addr_hi
= tcg_temp_new();
982 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
983 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], REG(B11_8
),
984 ctx
->memidx
, MO_TEUL
);
985 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr_hi
,
986 ctx
->memidx
, MO_TEUL
);
987 tcg_temp_free(addr_hi
);
989 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
),
990 ctx
->memidx
, MO_TEUL
);
993 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
995 if (ctx
->flags
& FPSCR_SZ
) {
996 TCGv addr_hi
= tcg_temp_new();
997 int fr
= XREG(B11_8
);
998 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
999 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
1000 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
1001 tcg_temp_free(addr_hi
);
1003 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
1004 ctx
->memidx
, MO_TEUL
);
1007 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1009 if (ctx
->flags
& FPSCR_SZ
) {
1010 TCGv addr_hi
= tcg_temp_new();
1011 int fr
= XREG(B11_8
);
1012 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1013 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
1014 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
1015 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1016 tcg_temp_free(addr_hi
);
1018 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
1019 ctx
->memidx
, MO_TEUL
);
1020 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1023 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1025 if (ctx
->flags
& FPSCR_SZ
) {
1026 TCGv addr
= tcg_temp_new_i32();
1027 int fr
= XREG(B7_4
);
1028 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1029 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
, MO_TEUL
);
1030 tcg_gen_subi_i32(addr
, addr
, 4);
1031 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], addr
, ctx
->memidx
, MO_TEUL
);
1032 tcg_gen_mov_i32(REG(B11_8
), addr
);
1033 tcg_temp_free(addr
);
1036 addr
= tcg_temp_new_i32();
1037 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1038 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
1039 ctx
->memidx
, MO_TEUL
);
1040 tcg_gen_mov_i32(REG(B11_8
), addr
);
1041 tcg_temp_free(addr
);
1044 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1047 TCGv addr
= tcg_temp_new_i32();
1048 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1049 if (ctx
->flags
& FPSCR_SZ
) {
1050 int fr
= XREG(B11_8
);
1051 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1052 ctx
->memidx
, MO_TEUL
);
1053 tcg_gen_addi_i32(addr
, addr
, 4);
1054 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1055 ctx
->memidx
, MO_TEUL
);
1057 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], addr
,
1058 ctx
->memidx
, MO_TEUL
);
1060 tcg_temp_free(addr
);
1063 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1066 TCGv addr
= tcg_temp_new();
1067 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1068 if (ctx
->flags
& FPSCR_SZ
) {
1069 int fr
= XREG(B7_4
);
1070 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1071 ctx
->memidx
, MO_TEUL
);
1072 tcg_gen_addi_i32(addr
, addr
, 4);
1073 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1074 ctx
->memidx
, MO_TEUL
);
1076 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
1077 ctx
->memidx
, MO_TEUL
);
1079 tcg_temp_free(addr
);
1082 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1083 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1084 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1085 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1086 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1087 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1090 if (ctx
->flags
& FPSCR_PR
) {
1093 if (ctx
->opcode
& 0x0110)
1094 break; /* illegal instruction */
1095 fp0
= tcg_temp_new_i64();
1096 fp1
= tcg_temp_new_i64();
1097 gen_load_fpr64(fp0
, DREG(B11_8
));
1098 gen_load_fpr64(fp1
, DREG(B7_4
));
1099 switch (ctx
->opcode
& 0xf00f) {
1100 case 0xf000: /* fadd Rm,Rn */
1101 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1103 case 0xf001: /* fsub Rm,Rn */
1104 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1106 case 0xf002: /* fmul Rm,Rn */
1107 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1109 case 0xf003: /* fdiv Rm,Rn */
1110 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1112 case 0xf004: /* fcmp/eq Rm,Rn */
1113 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1115 case 0xf005: /* fcmp/gt Rm,Rn */
1116 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1119 gen_store_fpr64(fp0
, DREG(B11_8
));
1120 tcg_temp_free_i64(fp0
);
1121 tcg_temp_free_i64(fp1
);
1123 switch (ctx
->opcode
& 0xf00f) {
1124 case 0xf000: /* fadd Rm,Rn */
1125 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1126 cpu_fregs
[FREG(B11_8
)],
1127 cpu_fregs
[FREG(B7_4
)]);
1129 case 0xf001: /* fsub Rm,Rn */
1130 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1131 cpu_fregs
[FREG(B11_8
)],
1132 cpu_fregs
[FREG(B7_4
)]);
1134 case 0xf002: /* fmul Rm,Rn */
1135 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1136 cpu_fregs
[FREG(B11_8
)],
1137 cpu_fregs
[FREG(B7_4
)]);
1139 case 0xf003: /* fdiv Rm,Rn */
1140 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1141 cpu_fregs
[FREG(B11_8
)],
1142 cpu_fregs
[FREG(B7_4
)]);
1144 case 0xf004: /* fcmp/eq Rm,Rn */
1145 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1146 cpu_fregs
[FREG(B7_4
)]);
1148 case 0xf005: /* fcmp/gt Rm,Rn */
1149 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1150 cpu_fregs
[FREG(B7_4
)]);
1156 case 0xf00e: /* fmac FR0,RM,Rn */
1159 if (ctx
->flags
& FPSCR_PR
) {
1160 break; /* illegal instruction */
1162 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1163 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1164 cpu_fregs
[FREG(B11_8
)]);
1170 switch (ctx
->opcode
& 0xff00) {
1171 case 0xc900: /* and #imm,R0 */
1172 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1174 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1177 addr
= tcg_temp_new();
1178 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1179 val
= tcg_temp_new();
1180 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1181 tcg_gen_andi_i32(val
, val
, B7_0
);
1182 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1184 tcg_temp_free(addr
);
1187 case 0x8b00: /* bf label */
1188 CHECK_NOT_DELAY_SLOT
1189 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1190 ctx
->pc
+ 4 + B7_0s
* 2);
1191 ctx
->bstate
= BS_BRANCH
;
1193 case 0x8f00: /* bf/s label */
1194 CHECK_NOT_DELAY_SLOT
1195 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1196 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1198 case 0x8900: /* bt label */
1199 CHECK_NOT_DELAY_SLOT
1200 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1202 ctx
->bstate
= BS_BRANCH
;
1204 case 0x8d00: /* bt/s label */
1205 CHECK_NOT_DELAY_SLOT
1206 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1207 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1209 case 0x8800: /* cmp/eq #imm,R0 */
1210 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1212 case 0xc400: /* mov.b @(disp,GBR),R0 */
1214 TCGv addr
= tcg_temp_new();
1215 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1216 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1217 tcg_temp_free(addr
);
1220 case 0xc500: /* mov.w @(disp,GBR),R0 */
1222 TCGv addr
= tcg_temp_new();
1223 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1224 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1225 tcg_temp_free(addr
);
1228 case 0xc600: /* mov.l @(disp,GBR),R0 */
1230 TCGv addr
= tcg_temp_new();
1231 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1232 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESL
);
1233 tcg_temp_free(addr
);
1236 case 0xc000: /* mov.b R0,@(disp,GBR) */
1238 TCGv addr
= tcg_temp_new();
1239 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1240 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1241 tcg_temp_free(addr
);
1244 case 0xc100: /* mov.w R0,@(disp,GBR) */
1246 TCGv addr
= tcg_temp_new();
1247 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1248 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1249 tcg_temp_free(addr
);
1252 case 0xc200: /* mov.l R0,@(disp,GBR) */
1254 TCGv addr
= tcg_temp_new();
1255 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1256 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUL
);
1257 tcg_temp_free(addr
);
1260 case 0x8000: /* mov.b R0,@(disp,Rn) */
1262 TCGv addr
= tcg_temp_new();
1263 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1264 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1265 tcg_temp_free(addr
);
1268 case 0x8100: /* mov.w R0,@(disp,Rn) */
1270 TCGv addr
= tcg_temp_new();
1271 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1272 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1273 tcg_temp_free(addr
);
1276 case 0x8400: /* mov.b @(disp,Rn),R0 */
1278 TCGv addr
= tcg_temp_new();
1279 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1280 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1281 tcg_temp_free(addr
);
1284 case 0x8500: /* mov.w @(disp,Rn),R0 */
1286 TCGv addr
= tcg_temp_new();
1287 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1288 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1289 tcg_temp_free(addr
);
1292 case 0xc700: /* mova @(disp,PC),R0 */
1293 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1295 case 0xcb00: /* or #imm,R0 */
1296 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1298 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1301 addr
= tcg_temp_new();
1302 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1303 val
= tcg_temp_new();
1304 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1305 tcg_gen_ori_i32(val
, val
, B7_0
);
1306 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1308 tcg_temp_free(addr
);
1311 case 0xc300: /* trapa #imm */
1314 CHECK_NOT_DELAY_SLOT
1315 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1316 imm
= tcg_const_i32(B7_0
);
1317 gen_helper_trapa(cpu_env
, imm
);
1319 ctx
->bstate
= BS_BRANCH
;
1322 case 0xc800: /* tst #imm,R0 */
1324 TCGv val
= tcg_temp_new();
1325 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1326 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1330 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1332 TCGv val
= tcg_temp_new();
1333 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1334 tcg_gen_qemu_ld_i32(val
, val
, ctx
->memidx
, MO_UB
);
1335 tcg_gen_andi_i32(val
, val
, B7_0
);
1336 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1340 case 0xca00: /* xor #imm,R0 */
1341 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1343 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1346 addr
= tcg_temp_new();
1347 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1348 val
= tcg_temp_new();
1349 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1350 tcg_gen_xori_i32(val
, val
, B7_0
);
1351 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1353 tcg_temp_free(addr
);
1358 switch (ctx
->opcode
& 0xf08f) {
1359 case 0x408e: /* ldc Rm,Rn_BANK */
1361 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1363 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1365 tcg_gen_qemu_ld_i32(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1366 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1368 case 0x0082: /* stc Rm_BANK,Rn */
1370 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1372 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1375 TCGv addr
= tcg_temp_new();
1376 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1377 tcg_gen_qemu_st_i32(ALTREG(B6_4
), addr
, ctx
->memidx
, MO_TEUL
);
1378 tcg_gen_mov_i32(REG(B11_8
), addr
);
1379 tcg_temp_free(addr
);
1384 switch (ctx
->opcode
& 0xf0ff) {
1385 case 0x0023: /* braf Rn */
1386 CHECK_NOT_DELAY_SLOT
1387 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1388 ctx
->flags
|= DELAY_SLOT
;
1389 ctx
->delayed_pc
= (uint32_t) - 1;
1391 case 0x0003: /* bsrf Rn */
1392 CHECK_NOT_DELAY_SLOT
1393 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1394 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1395 ctx
->flags
|= DELAY_SLOT
;
1396 ctx
->delayed_pc
= (uint32_t) - 1;
1398 case 0x4015: /* cmp/pl Rn */
1399 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1401 case 0x4011: /* cmp/pz Rn */
1402 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1404 case 0x4010: /* dt Rn */
1405 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1406 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1408 case 0x402b: /* jmp @Rn */
1409 CHECK_NOT_DELAY_SLOT
1410 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1411 ctx
->flags
|= DELAY_SLOT
;
1412 ctx
->delayed_pc
= (uint32_t) - 1;
1414 case 0x400b: /* jsr @Rn */
1415 CHECK_NOT_DELAY_SLOT
1416 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1417 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1418 ctx
->flags
|= DELAY_SLOT
;
1419 ctx
->delayed_pc
= (uint32_t) - 1;
1421 case 0x400e: /* ldc Rm,SR */
1423 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1424 ctx
->bstate
= BS_STOP
;
1426 case 0x4007: /* ldc.l @Rm+,SR */
1429 TCGv val
= tcg_temp_new();
1430 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1431 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1433 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1434 ctx
->bstate
= BS_STOP
;
1437 case 0x0002: /* stc SR,Rn */
1439 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1441 case 0x4003: /* stc SR,@-Rn */
1444 TCGv addr
= tcg_temp_new();
1445 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1446 tcg_gen_qemu_st_i32(cpu_sr
, addr
, ctx
->memidx
, MO_TEUL
);
1447 tcg_gen_mov_i32(REG(B11_8
), addr
);
1448 tcg_temp_free(addr
);
1451 #define LD(reg,ldnum,ldpnum,prechk) \
1454 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1458 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1459 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1461 #define ST(reg,stnum,stpnum,prechk) \
1464 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1469 TCGv addr = tcg_temp_new(); \
1470 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1471 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1472 tcg_gen_mov_i32(REG(B11_8), addr); \
1473 tcg_temp_free(addr); \
1476 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1477 LD(reg,ldnum,ldpnum,prechk) \
1478 ST(reg,stnum,stpnum,prechk)
1479 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1480 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1481 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1482 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1483 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1484 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1485 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1486 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1487 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1488 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1489 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1490 case 0x406a: /* lds Rm,FPSCR */
1492 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1493 ctx
->bstate
= BS_STOP
;
1495 case 0x4066: /* lds.l @Rm+,FPSCR */
1498 TCGv addr
= tcg_temp_new();
1499 tcg_gen_qemu_ld_i32(addr
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1500 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1501 gen_helper_ld_fpscr(cpu_env
, addr
);
1502 tcg_temp_free(addr
);
1503 ctx
->bstate
= BS_STOP
;
1506 case 0x006a: /* sts FPSCR,Rn */
1508 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1510 case 0x4062: /* sts FPSCR,@-Rn */
1514 val
= tcg_temp_new();
1515 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1516 addr
= tcg_temp_new();
1517 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1518 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1519 tcg_gen_mov_i32(REG(B11_8
), addr
);
1520 tcg_temp_free(addr
);
1524 case 0x00c3: /* movca.l R0,@Rm */
1526 TCGv val
= tcg_temp_new();
1527 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1528 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1529 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1531 ctx
->has_movcal
= 1;
1534 /* MOVUA.L @Rm,R0 (Rm) -> R0
1535 Load non-boundary-aligned data */
1536 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1539 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1540 Load non-boundary-aligned data */
1541 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1542 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1544 case 0x0029: /* movt Rn */
1545 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1550 If (T == 1) R0 -> (Rn)
1553 if (ctx
->features
& SH_FEATURE_SH4A
) {
1554 int label
= gen_new_label();
1555 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1556 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1557 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1558 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1559 gen_set_label(label
);
1560 tcg_gen_movi_i32(cpu_ldst
, 0);
1568 When interrupt/exception
1571 if (ctx
->features
& SH_FEATURE_SH4A
) {
1572 tcg_gen_movi_i32(cpu_ldst
, 0);
1573 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1574 tcg_gen_movi_i32(cpu_ldst
, 1);
1578 case 0x0093: /* ocbi @Rn */
1580 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1583 case 0x00a3: /* ocbp @Rn */
1584 case 0x00b3: /* ocbwb @Rn */
1585 /* These instructions are supposed to do nothing in case of
1586 a cache miss. Given that we only partially emulate caches
1587 it is safe to simply ignore them. */
1589 case 0x0083: /* pref @Rn */
1591 case 0x00d3: /* prefi @Rn */
1592 if (ctx
->features
& SH_FEATURE_SH4A
)
1596 case 0x00e3: /* icbi @Rn */
1597 if (ctx
->features
& SH_FEATURE_SH4A
)
1601 case 0x00ab: /* synco */
1602 if (ctx
->features
& SH_FEATURE_SH4A
)
1606 case 0x4024: /* rotcl Rn */
1608 TCGv tmp
= tcg_temp_new();
1609 tcg_gen_mov_i32(tmp
, cpu_sr
);
1610 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1611 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1612 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1616 case 0x4025: /* rotcr Rn */
1618 TCGv tmp
= tcg_temp_new();
1619 tcg_gen_mov_i32(tmp
, cpu_sr
);
1620 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1621 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1622 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1626 case 0x4004: /* rotl Rn */
1627 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1628 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1630 case 0x4005: /* rotr Rn */
1631 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1632 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1634 case 0x4000: /* shll Rn */
1635 case 0x4020: /* shal Rn */
1636 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1637 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1639 case 0x4021: /* shar Rn */
1640 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1641 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1643 case 0x4001: /* shlr Rn */
1644 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1645 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1647 case 0x4008: /* shll2 Rn */
1648 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1650 case 0x4018: /* shll8 Rn */
1651 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1653 case 0x4028: /* shll16 Rn */
1654 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1656 case 0x4009: /* shlr2 Rn */
1657 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1659 case 0x4019: /* shlr8 Rn */
1660 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1662 case 0x4029: /* shlr16 Rn */
1663 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1665 case 0x401b: /* tas.b @Rn */
1668 addr
= tcg_temp_local_new();
1669 tcg_gen_mov_i32(addr
, REG(B11_8
));
1670 val
= tcg_temp_local_new();
1671 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1672 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1673 tcg_gen_ori_i32(val
, val
, 0x80);
1674 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1676 tcg_temp_free(addr
);
1679 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1681 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1683 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1685 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1687 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1689 if (ctx
->flags
& FPSCR_PR
) {
1691 if (ctx
->opcode
& 0x0100)
1692 break; /* illegal instruction */
1693 fp
= tcg_temp_new_i64();
1694 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1695 gen_store_fpr64(fp
, DREG(B11_8
));
1696 tcg_temp_free_i64(fp
);
1699 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1702 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1704 if (ctx
->flags
& FPSCR_PR
) {
1706 if (ctx
->opcode
& 0x0100)
1707 break; /* illegal instruction */
1708 fp
= tcg_temp_new_i64();
1709 gen_load_fpr64(fp
, DREG(B11_8
));
1710 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1711 tcg_temp_free_i64(fp
);
1714 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1717 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1720 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1723 case 0xf05d: /* fabs FRn/DRn */
1725 if (ctx
->flags
& FPSCR_PR
) {
1726 if (ctx
->opcode
& 0x0100)
1727 break; /* illegal instruction */
1728 TCGv_i64 fp
= tcg_temp_new_i64();
1729 gen_load_fpr64(fp
, DREG(B11_8
));
1730 gen_helper_fabs_DT(fp
, fp
);
1731 gen_store_fpr64(fp
, DREG(B11_8
));
1732 tcg_temp_free_i64(fp
);
1734 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1737 case 0xf06d: /* fsqrt FRn */
1739 if (ctx
->flags
& FPSCR_PR
) {
1740 if (ctx
->opcode
& 0x0100)
1741 break; /* illegal instruction */
1742 TCGv_i64 fp
= tcg_temp_new_i64();
1743 gen_load_fpr64(fp
, DREG(B11_8
));
1744 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1745 gen_store_fpr64(fp
, DREG(B11_8
));
1746 tcg_temp_free_i64(fp
);
1748 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1749 cpu_fregs
[FREG(B11_8
)]);
1752 case 0xf07d: /* fsrra FRn */
1755 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1757 if (!(ctx
->flags
& FPSCR_PR
)) {
1758 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1761 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1763 if (!(ctx
->flags
& FPSCR_PR
)) {
1764 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1767 case 0xf0ad: /* fcnvsd FPUL,DRn */
1770 TCGv_i64 fp
= tcg_temp_new_i64();
1771 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1772 gen_store_fpr64(fp
, DREG(B11_8
));
1773 tcg_temp_free_i64(fp
);
1776 case 0xf0bd: /* fcnvds DRn,FPUL */
1779 TCGv_i64 fp
= tcg_temp_new_i64();
1780 gen_load_fpr64(fp
, DREG(B11_8
));
1781 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1782 tcg_temp_free_i64(fp
);
1785 case 0xf0ed: /* fipr FVm,FVn */
1787 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1789 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1790 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1791 gen_helper_fipr(cpu_env
, m
, n
);
1797 case 0xf0fd: /* ftrv XMTRX,FVn */
1799 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1800 (ctx
->flags
& FPSCR_PR
) == 0) {
1802 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1803 gen_helper_ftrv(cpu_env
, n
);
1810 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1811 ctx
->opcode
, ctx
->pc
);
1814 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1815 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1816 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1818 gen_helper_raise_illegal_instruction(cpu_env
);
1820 ctx
->bstate
= BS_BRANCH
;
1823 static void decode_opc(DisasContext
* ctx
)
1825 uint32_t old_flags
= ctx
->flags
;
1827 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1828 tcg_gen_debug_insn_start(ctx
->pc
);
1833 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1834 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1837 /* go out of the delay slot */
1838 uint32_t new_flags
= ctx
->flags
;
1839 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1840 gen_store_flags(new_flags
);
1843 ctx
->bstate
= BS_BRANCH
;
1844 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1845 gen_delayed_conditional_jump(ctx
);
1846 } else if (old_flags
& DELAY_SLOT
) {
1852 /* go into a delay slot */
1853 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1854 gen_store_flags(ctx
->flags
);
1858 gen_intermediate_code_internal(SuperHCPU
*cpu
, TranslationBlock
*tb
,
1861 CPUState
*cs
= CPU(cpu
);
1862 CPUSH4State
*env
= &cpu
->env
;
1864 target_ulong pc_start
;
1865 static uint16_t *gen_opc_end
;
1872 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1874 ctx
.flags
= (uint32_t)tb
->flags
;
1875 ctx
.bstate
= BS_NONE
;
1876 ctx
.memidx
= (ctx
.flags
& SR_MD
) == 0 ? 1 : 0;
1877 /* We don't know if the delayed pc came from a dynamic or static branch,
1878 so assume it is a dynamic branch. */
1879 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1881 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1882 ctx
.features
= env
->features
;
1883 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1887 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1889 max_insns
= CF_COUNT_MASK
;
1891 while (ctx
.bstate
== BS_NONE
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
) {
1892 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
1893 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
1894 if (ctx
.pc
== bp
->pc
) {
1895 /* We have hit a breakpoint - make sure PC is up-to-date */
1896 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1897 gen_helper_debug(cpu_env
);
1898 ctx
.bstate
= BS_BRANCH
;
1904 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1908 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1910 tcg_ctx
.gen_opc_pc
[ii
] = ctx
.pc
;
1911 gen_opc_hflags
[ii
] = ctx
.flags
;
1912 tcg_ctx
.gen_opc_instr_start
[ii
] = 1;
1913 tcg_ctx
.gen_opc_icount
[ii
] = num_insns
;
1915 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1918 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
1921 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
1925 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1927 if (cs
->singlestep_enabled
) {
1930 if (num_insns
>= max_insns
)
1935 if (tb
->cflags
& CF_LAST_IO
)
1937 if (cs
->singlestep_enabled
) {
1938 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1939 gen_helper_debug(cpu_env
);
1941 switch (ctx
.bstate
) {
1943 /* gen_op_interrupt_restart(); */
1947 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1949 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1952 /* gen_op_interrupt_restart(); */
1961 gen_tb_end(tb
, num_insns
);
1962 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1964 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1967 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1969 tb
->size
= ctx
.pc
- pc_start
;
1970 tb
->icount
= num_insns
;
1974 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1975 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1976 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
1982 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1984 gen_intermediate_code_internal(sh_env_get_cpu(env
), tb
, false);
1987 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1989 gen_intermediate_code_internal(sh_env_get_cpu(env
), tb
, true);
1992 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
1994 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
1995 env
->flags
= gen_opc_hflags
[pc_pos
];