4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 //#define SH4_SINGLE_STEP
24 #include "disas/disas.h"
31 typedef struct DisasContext
{
32 struct TranslationBlock
*tb
;
39 int singlestep_enabled
;
44 #if defined(CONFIG_USER_ONLY)
45 #define IS_USER(ctx) 1
47 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
51 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
54 BS_STOP
= 1, /* We want to stop translation for any reason */
55 BS_BRANCH
= 2, /* We reached a branch condition */
56 BS_EXCP
= 3, /* We reached an exception condition */
59 /* global register indexes */
60 static TCGv_ptr cpu_env
;
61 static TCGv cpu_gregs
[24];
62 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
63 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
64 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
65 static TCGv cpu_fregs
[32];
67 /* internal register indexes */
68 static TCGv cpu_flags
, cpu_delayed_pc
;
70 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
72 #include "exec/gen-icount.h"
74 void sh4_translate_init(void)
77 static int done_init
= 0;
78 static const char * const gregnames
[24] = {
79 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
80 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
81 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
82 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
83 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 static const char * const fregnames
[32] = {
86 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
87 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
88 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
89 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
90 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
91 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
92 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
93 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
99 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
101 for (i
= 0; i
< 24; i
++)
102 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
103 offsetof(CPUSH4State
, gregs
[i
]),
106 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
107 offsetof(CPUSH4State
, pc
), "PC");
108 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
109 offsetof(CPUSH4State
, sr
), "SR");
110 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUSH4State
, ssr
), "SSR");
112 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
113 offsetof(CPUSH4State
, spc
), "SPC");
114 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUSH4State
, gbr
), "GBR");
116 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUSH4State
, vbr
), "VBR");
118 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUSH4State
, sgr
), "SGR");
120 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUSH4State
, dbr
), "DBR");
122 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUSH4State
, mach
), "MACH");
124 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUSH4State
, macl
), "MACL");
126 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUSH4State
, pr
), "PR");
128 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
129 offsetof(CPUSH4State
, fpscr
), "FPSCR");
130 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
131 offsetof(CPUSH4State
, fpul
), "FPUL");
133 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, flags
), "_flags_");
135 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
136 offsetof(CPUSH4State
, delayed_pc
),
138 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, ldst
), "_ldst_");
141 for (i
= 0; i
< 32; i
++)
142 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
143 offsetof(CPUSH4State
, fregs
[i
]),
149 void superh_cpu_dump_state(CPUState
*cs
, FILE *f
,
150 fprintf_function cpu_fprintf
, int flags
)
152 SuperHCPU
*cpu
= SUPERH_CPU(cs
);
153 CPUSH4State
*env
= &cpu
->env
;
155 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
156 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
157 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
158 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
159 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
160 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
161 for (i
= 0; i
< 24; i
+= 4) {
162 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
163 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
164 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
166 if (env
->flags
& DELAY_SLOT
) {
167 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
169 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
170 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
175 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
177 TranslationBlock
*tb
;
180 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
181 !ctx
->singlestep_enabled
) {
182 /* Use a direct jump if in same page and singlestep not enabled */
184 tcg_gen_movi_i32(cpu_pc
, dest
);
185 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
187 tcg_gen_movi_i32(cpu_pc
, dest
);
188 if (ctx
->singlestep_enabled
)
189 gen_helper_debug(cpu_env
);
194 static void gen_jump(DisasContext
* ctx
)
196 if (ctx
->delayed_pc
== (uint32_t) - 1) {
197 /* Target is not statically known, it comes necessarily from a
198 delayed jump as immediate jump are conditinal jumps */
199 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
200 if (ctx
->singlestep_enabled
)
201 gen_helper_debug(cpu_env
);
204 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
208 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
211 int label
= gen_new_label();
212 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
214 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
215 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
216 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
217 gen_set_label(label
);
220 /* Immediate conditional jump (bt or bf) */
221 static void gen_conditional_jump(DisasContext
* ctx
,
222 target_ulong ift
, target_ulong ifnott
)
227 l1
= gen_new_label();
229 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
230 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
231 gen_goto_tb(ctx
, 0, ifnott
);
233 gen_goto_tb(ctx
, 1, ift
);
236 /* Delayed conditional jump (bt or bf) */
237 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
242 l1
= gen_new_label();
244 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
245 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
246 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
248 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
252 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
257 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
258 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
259 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
264 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
269 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
270 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
271 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
276 static inline void gen_store_flags(uint32_t flags
)
278 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
279 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
282 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
284 TCGv tmp
= tcg_temp_new();
289 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
290 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
292 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
294 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
295 tcg_gen_or_i32(t0
, t0
, tmp
);
300 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
302 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
305 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
307 TCGv_i32 tmp
= tcg_temp_new_i32();
308 tcg_gen_trunc_i64_i32(tmp
, t
);
309 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
310 tcg_gen_shri_i64(t
, t
, 32);
311 tcg_gen_trunc_i64_i32(tmp
, t
);
312 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
313 tcg_temp_free_i32(tmp
);
316 #define B3_0 (ctx->opcode & 0xf)
317 #define B6_4 ((ctx->opcode >> 4) & 0x7)
318 #define B7_4 ((ctx->opcode >> 4) & 0xf)
319 #define B7_0 (ctx->opcode & 0xff)
320 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
321 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
322 (ctx->opcode & 0xfff))
323 #define B11_8 ((ctx->opcode >> 8) & 0xf)
324 #define B15_12 ((ctx->opcode >> 12) & 0xf)
326 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
327 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
329 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
330 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
332 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
333 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
334 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
335 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
337 #define CHECK_NOT_DELAY_SLOT \
338 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
340 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
341 gen_helper_raise_slot_illegal_instruction(cpu_env); \
342 ctx->bstate = BS_BRANCH; \
346 #define CHECK_PRIVILEGED \
347 if (IS_USER(ctx)) { \
348 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
349 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
350 gen_helper_raise_slot_illegal_instruction(cpu_env); \
352 gen_helper_raise_illegal_instruction(cpu_env); \
354 ctx->bstate = BS_BRANCH; \
358 #define CHECK_FPU_ENABLED \
359 if (ctx->flags & SR_FD) { \
360 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
361 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
362 gen_helper_raise_slot_fpu_disable(cpu_env); \
364 gen_helper_raise_fpu_disable(cpu_env); \
366 ctx->bstate = BS_BRANCH; \
370 static void _decode_opc(DisasContext
* ctx
)
372 /* This code tries to make movcal emulation sufficiently
373 accurate for Linux purposes. This instruction writes
374 memory, and prior to that, always allocates a cache line.
375 It is used in two contexts:
376 - in memcpy, where data is copied in blocks, the first write
377 of to a block uses movca.l for performance.
378 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
379 to flush the cache. Here, the data written by movcal.l is never
380 written to memory, and the data written is just bogus.
382 To simulate this, we simulate movcal.l, we store the value to memory,
383 but we also remember the previous content. If we see ocbi, we check
384 if movcal.l for that address was done previously. If so, the write should
385 not have hit the memory, so we restore the previous content.
386 When we see an instruction that is neither movca.l
387 nor ocbi, the previous content is discarded.
389 To optimize, we only try to flush stores when we're at the start of
390 TB, or if we already saw movca.l in this TB and did not flush stores
394 int opcode
= ctx
->opcode
& 0xf0ff;
395 if (opcode
!= 0x0093 /* ocbi */
396 && opcode
!= 0x00c3 /* movca.l */)
398 gen_helper_discard_movcal_backup(cpu_env
);
404 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
407 switch (ctx
->opcode
) {
408 case 0x0019: /* div0u */
409 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
411 case 0x000b: /* rts */
413 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
414 ctx
->flags
|= DELAY_SLOT
;
415 ctx
->delayed_pc
= (uint32_t) - 1;
417 case 0x0028: /* clrmac */
418 tcg_gen_movi_i32(cpu_mach
, 0);
419 tcg_gen_movi_i32(cpu_macl
, 0);
421 case 0x0048: /* clrs */
422 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
424 case 0x0008: /* clrt */
425 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
427 case 0x0038: /* ldtlb */
429 gen_helper_ldtlb(cpu_env
);
431 case 0x002b: /* rte */
434 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
435 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
436 ctx
->flags
|= DELAY_SLOT
;
437 ctx
->delayed_pc
= (uint32_t) - 1;
439 case 0x0058: /* sets */
440 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
442 case 0x0018: /* sett */
443 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
445 case 0xfbfd: /* frchg */
446 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
447 ctx
->bstate
= BS_STOP
;
449 case 0xf3fd: /* fschg */
450 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
451 ctx
->bstate
= BS_STOP
;
453 case 0x0009: /* nop */
455 case 0x001b: /* sleep */
457 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
458 gen_helper_sleep(cpu_env
);
462 switch (ctx
->opcode
& 0xf000) {
463 case 0x1000: /* mov.l Rm,@(disp,Rn) */
465 TCGv addr
= tcg_temp_new();
466 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
467 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
471 case 0x5000: /* mov.l @(disp,Rm),Rn */
473 TCGv addr
= tcg_temp_new();
474 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
475 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
479 case 0xe000: /* mov #imm,Rn */
480 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
482 case 0x9000: /* mov.w @(disp,PC),Rn */
484 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
485 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
489 case 0xd000: /* mov.l @(disp,PC),Rn */
491 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
492 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
496 case 0x7000: /* add #imm,Rn */
497 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
499 case 0xa000: /* bra disp */
501 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
502 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
503 ctx
->flags
|= DELAY_SLOT
;
505 case 0xb000: /* bsr disp */
507 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
508 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
509 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
510 ctx
->flags
|= DELAY_SLOT
;
514 switch (ctx
->opcode
& 0xf00f) {
515 case 0x6003: /* mov Rm,Rn */
516 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
518 case 0x2000: /* mov.b Rm,@Rn */
519 tcg_gen_qemu_st8(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
521 case 0x2001: /* mov.w Rm,@Rn */
522 tcg_gen_qemu_st16(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
524 case 0x2002: /* mov.l Rm,@Rn */
525 tcg_gen_qemu_st32(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
527 case 0x6000: /* mov.b @Rm,Rn */
528 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
530 case 0x6001: /* mov.w @Rm,Rn */
531 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
533 case 0x6002: /* mov.l @Rm,Rn */
534 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
536 case 0x2004: /* mov.b Rm,@-Rn */
538 TCGv addr
= tcg_temp_new();
539 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
540 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
); /* might cause re-execution */
541 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
545 case 0x2005: /* mov.w Rm,@-Rn */
547 TCGv addr
= tcg_temp_new();
548 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
549 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
550 tcg_gen_mov_i32(REG(B11_8
), addr
);
554 case 0x2006: /* mov.l Rm,@-Rn */
556 TCGv addr
= tcg_temp_new();
557 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
558 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
559 tcg_gen_mov_i32(REG(B11_8
), addr
);
562 case 0x6004: /* mov.b @Rm+,Rn */
563 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
565 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
567 case 0x6005: /* mov.w @Rm+,Rn */
568 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
570 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
572 case 0x6006: /* mov.l @Rm+,Rn */
573 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
575 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
577 case 0x0004: /* mov.b Rm,@(R0,Rn) */
579 TCGv addr
= tcg_temp_new();
580 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
581 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
);
585 case 0x0005: /* mov.w Rm,@(R0,Rn) */
587 TCGv addr
= tcg_temp_new();
588 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
589 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
593 case 0x0006: /* mov.l Rm,@(R0,Rn) */
595 TCGv addr
= tcg_temp_new();
596 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
597 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
601 case 0x000c: /* mov.b @(R0,Rm),Rn */
603 TCGv addr
= tcg_temp_new();
604 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
605 tcg_gen_qemu_ld8s(REG(B11_8
), addr
, ctx
->memidx
);
609 case 0x000d: /* mov.w @(R0,Rm),Rn */
611 TCGv addr
= tcg_temp_new();
612 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
613 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
617 case 0x000e: /* mov.l @(R0,Rm),Rn */
619 TCGv addr
= tcg_temp_new();
620 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
621 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
625 case 0x6008: /* swap.b Rm,Rn */
628 high
= tcg_temp_new();
629 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
630 low
= tcg_temp_new();
631 tcg_gen_ext16u_i32(low
, REG(B7_4
));
632 tcg_gen_bswap16_i32(low
, low
);
633 tcg_gen_or_i32(REG(B11_8
), high
, low
);
638 case 0x6009: /* swap.w Rm,Rn */
639 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
641 case 0x200d: /* xtrct Rm,Rn */
644 high
= tcg_temp_new();
645 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
646 low
= tcg_temp_new();
647 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
648 tcg_gen_or_i32(REG(B11_8
), high
, low
);
653 case 0x300c: /* add Rm,Rn */
654 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
656 case 0x300e: /* addc Rm,Rn */
660 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
662 tcg_gen_add_i32(t1
, REG(B7_4
), REG(B11_8
));
663 tcg_gen_add_i32(t0
, t0
, t1
);
665 tcg_gen_setcond_i32(TCG_COND_GTU
, t2
, REG(B11_8
), t1
);
666 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, t1
, t0
);
667 tcg_gen_or_i32(t1
, t1
, t2
);
669 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
670 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
672 tcg_gen_mov_i32(REG(B11_8
), t0
);
676 case 0x300f: /* addv Rm,Rn */
680 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
682 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
684 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
685 tcg_gen_andc_i32(t1
, t1
, t2
);
687 tcg_gen_shri_i32(t1
, t1
, 31);
688 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
689 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
691 tcg_gen_mov_i32(REG(B7_4
), t0
);
695 case 0x2009: /* and Rm,Rn */
696 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
698 case 0x3000: /* cmp/eq Rm,Rn */
699 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
701 case 0x3003: /* cmp/ge Rm,Rn */
702 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
704 case 0x3007: /* cmp/gt Rm,Rn */
705 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
707 case 0x3006: /* cmp/hi Rm,Rn */
708 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
710 case 0x3002: /* cmp/hs Rm,Rn */
711 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
713 case 0x200c: /* cmp/str Rm,Rn */
715 TCGv cmp1
= tcg_temp_new();
716 TCGv cmp2
= tcg_temp_new();
717 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
718 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
719 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
720 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
721 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
722 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
723 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
724 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
725 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
726 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
727 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
728 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
729 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
730 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
735 case 0x2007: /* div0s Rm,Rn */
737 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
738 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
739 TCGv val
= tcg_temp_new();
740 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
741 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
745 case 0x3004: /* div1 Rm,Rn */
746 gen_helper_div1(REG(B11_8
), cpu_env
, REG(B7_4
), REG(B11_8
));
748 case 0x300d: /* dmuls.l Rm,Rn */
749 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
751 case 0x3005: /* dmulu.l Rm,Rn */
752 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
754 case 0x600e: /* exts.b Rm,Rn */
755 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
757 case 0x600f: /* exts.w Rm,Rn */
758 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
760 case 0x600c: /* extu.b Rm,Rn */
761 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
763 case 0x600d: /* extu.w Rm,Rn */
764 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
766 case 0x000f: /* mac.l @Rm+,@Rn+ */
769 arg0
= tcg_temp_new();
770 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
771 arg1
= tcg_temp_new();
772 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
773 gen_helper_macl(cpu_env
, arg0
, arg1
);
776 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
777 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
780 case 0x400f: /* mac.w @Rm+,@Rn+ */
783 arg0
= tcg_temp_new();
784 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
785 arg1
= tcg_temp_new();
786 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
787 gen_helper_macw(cpu_env
, arg0
, arg1
);
790 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
791 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
794 case 0x0007: /* mul.l Rm,Rn */
795 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
797 case 0x200f: /* muls.w Rm,Rn */
800 arg0
= tcg_temp_new();
801 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
802 arg1
= tcg_temp_new();
803 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
804 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
809 case 0x200e: /* mulu.w Rm,Rn */
812 arg0
= tcg_temp_new();
813 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
814 arg1
= tcg_temp_new();
815 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
816 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
821 case 0x600b: /* neg Rm,Rn */
822 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
824 case 0x600a: /* negc Rm,Rn */
828 tcg_gen_neg_i32(t0
, REG(B7_4
));
830 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
831 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
832 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
833 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
834 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
835 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
836 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
841 case 0x6007: /* not Rm,Rn */
842 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
844 case 0x200b: /* or Rm,Rn */
845 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
847 case 0x400c: /* shad Rm,Rn */
849 int label1
= gen_new_label();
850 int label2
= gen_new_label();
851 int label3
= gen_new_label();
852 int label4
= gen_new_label();
854 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
855 /* Rm positive, shift to the left */
856 shift
= tcg_temp_new();
857 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
858 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
859 tcg_temp_free(shift
);
861 /* Rm negative, shift to the right */
862 gen_set_label(label1
);
863 shift
= tcg_temp_new();
864 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
865 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
866 tcg_gen_not_i32(shift
, REG(B7_4
));
867 tcg_gen_andi_i32(shift
, shift
, 0x1f);
868 tcg_gen_addi_i32(shift
, shift
, 1);
869 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
870 tcg_temp_free(shift
);
873 gen_set_label(label2
);
874 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
875 tcg_gen_movi_i32(REG(B11_8
), 0);
877 gen_set_label(label3
);
878 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
879 gen_set_label(label4
);
882 case 0x400d: /* shld Rm,Rn */
884 int label1
= gen_new_label();
885 int label2
= gen_new_label();
886 int label3
= gen_new_label();
888 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
889 /* Rm positive, shift to the left */
890 shift
= tcg_temp_new();
891 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
892 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
893 tcg_temp_free(shift
);
895 /* Rm negative, shift to the right */
896 gen_set_label(label1
);
897 shift
= tcg_temp_new();
898 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
899 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
900 tcg_gen_not_i32(shift
, REG(B7_4
));
901 tcg_gen_andi_i32(shift
, shift
, 0x1f);
902 tcg_gen_addi_i32(shift
, shift
, 1);
903 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
904 tcg_temp_free(shift
);
907 gen_set_label(label2
);
908 tcg_gen_movi_i32(REG(B11_8
), 0);
909 gen_set_label(label3
);
912 case 0x3008: /* sub Rm,Rn */
913 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
915 case 0x300a: /* subc Rm,Rn */
919 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
921 tcg_gen_sub_i32(t1
, REG(B11_8
), REG(B7_4
));
922 tcg_gen_sub_i32(t0
, t1
, t0
);
924 tcg_gen_setcond_i32(TCG_COND_LTU
, t2
, REG(B11_8
), t1
);
925 tcg_gen_setcond_i32(TCG_COND_LTU
, t1
, t1
, t0
);
926 tcg_gen_or_i32(t1
, t1
, t2
);
928 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
929 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
931 tcg_gen_mov_i32(REG(B11_8
), t0
);
935 case 0x300b: /* subv Rm,Rn */
939 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
941 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
943 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
944 tcg_gen_and_i32(t1
, t1
, t2
);
946 tcg_gen_shri_i32(t1
, t1
, 31);
947 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
948 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
950 tcg_gen_mov_i32(REG(B11_8
), t0
);
954 case 0x2008: /* tst Rm,Rn */
956 TCGv val
= tcg_temp_new();
957 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
958 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
962 case 0x200a: /* xor Rm,Rn */
963 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
965 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
967 if (ctx
->flags
& FPSCR_SZ
) {
968 TCGv_i64 fp
= tcg_temp_new_i64();
969 gen_load_fpr64(fp
, XREG(B7_4
));
970 gen_store_fpr64(fp
, XREG(B11_8
));
971 tcg_temp_free_i64(fp
);
973 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
976 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
978 if (ctx
->flags
& FPSCR_SZ
) {
979 TCGv addr_hi
= tcg_temp_new();
981 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
982 tcg_gen_qemu_st32(cpu_fregs
[fr
], REG(B11_8
), ctx
->memidx
);
983 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
984 tcg_temp_free(addr_hi
);
986 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
), ctx
->memidx
);
989 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
991 if (ctx
->flags
& FPSCR_SZ
) {
992 TCGv addr_hi
= tcg_temp_new();
993 int fr
= XREG(B11_8
);
994 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
995 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
996 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
997 tcg_temp_free(addr_hi
);
999 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1002 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1004 if (ctx
->flags
& FPSCR_SZ
) {
1005 TCGv addr_hi
= tcg_temp_new();
1006 int fr
= XREG(B11_8
);
1007 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1008 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1009 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1010 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1011 tcg_temp_free(addr_hi
);
1013 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1014 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1017 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1019 if (ctx
->flags
& FPSCR_SZ
) {
1020 TCGv addr
= tcg_temp_new_i32();
1021 int fr
= XREG(B7_4
);
1022 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1023 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1024 tcg_gen_subi_i32(addr
, addr
, 4);
1025 tcg_gen_qemu_st32(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1026 tcg_gen_mov_i32(REG(B11_8
), addr
);
1027 tcg_temp_free(addr
);
1030 addr
= tcg_temp_new_i32();
1031 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1032 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1033 tcg_gen_mov_i32(REG(B11_8
), addr
);
1034 tcg_temp_free(addr
);
1037 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1040 TCGv addr
= tcg_temp_new_i32();
1041 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1042 if (ctx
->flags
& FPSCR_SZ
) {
1043 int fr
= XREG(B11_8
);
1044 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1045 tcg_gen_addi_i32(addr
, addr
, 4);
1046 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1048 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], addr
, ctx
->memidx
);
1050 tcg_temp_free(addr
);
1053 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1056 TCGv addr
= tcg_temp_new();
1057 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1058 if (ctx
->flags
& FPSCR_SZ
) {
1059 int fr
= XREG(B7_4
);
1060 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1061 tcg_gen_addi_i32(addr
, addr
, 4);
1062 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1064 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1066 tcg_temp_free(addr
);
1069 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1070 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1071 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1072 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1073 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1074 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1077 if (ctx
->flags
& FPSCR_PR
) {
1080 if (ctx
->opcode
& 0x0110)
1081 break; /* illegal instruction */
1082 fp0
= tcg_temp_new_i64();
1083 fp1
= tcg_temp_new_i64();
1084 gen_load_fpr64(fp0
, DREG(B11_8
));
1085 gen_load_fpr64(fp1
, DREG(B7_4
));
1086 switch (ctx
->opcode
& 0xf00f) {
1087 case 0xf000: /* fadd Rm,Rn */
1088 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1090 case 0xf001: /* fsub Rm,Rn */
1091 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1093 case 0xf002: /* fmul Rm,Rn */
1094 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1096 case 0xf003: /* fdiv Rm,Rn */
1097 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1099 case 0xf004: /* fcmp/eq Rm,Rn */
1100 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1102 case 0xf005: /* fcmp/gt Rm,Rn */
1103 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1106 gen_store_fpr64(fp0
, DREG(B11_8
));
1107 tcg_temp_free_i64(fp0
);
1108 tcg_temp_free_i64(fp1
);
1110 switch (ctx
->opcode
& 0xf00f) {
1111 case 0xf000: /* fadd Rm,Rn */
1112 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1113 cpu_fregs
[FREG(B11_8
)],
1114 cpu_fregs
[FREG(B7_4
)]);
1116 case 0xf001: /* fsub Rm,Rn */
1117 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1118 cpu_fregs
[FREG(B11_8
)],
1119 cpu_fregs
[FREG(B7_4
)]);
1121 case 0xf002: /* fmul Rm,Rn */
1122 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1123 cpu_fregs
[FREG(B11_8
)],
1124 cpu_fregs
[FREG(B7_4
)]);
1126 case 0xf003: /* fdiv Rm,Rn */
1127 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1128 cpu_fregs
[FREG(B11_8
)],
1129 cpu_fregs
[FREG(B7_4
)]);
1131 case 0xf004: /* fcmp/eq Rm,Rn */
1132 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1133 cpu_fregs
[FREG(B7_4
)]);
1135 case 0xf005: /* fcmp/gt Rm,Rn */
1136 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1137 cpu_fregs
[FREG(B7_4
)]);
1143 case 0xf00e: /* fmac FR0,RM,Rn */
1146 if (ctx
->flags
& FPSCR_PR
) {
1147 break; /* illegal instruction */
1149 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1150 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1151 cpu_fregs
[FREG(B11_8
)]);
1157 switch (ctx
->opcode
& 0xff00) {
1158 case 0xc900: /* and #imm,R0 */
1159 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1161 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1164 addr
= tcg_temp_new();
1165 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1166 val
= tcg_temp_new();
1167 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1168 tcg_gen_andi_i32(val
, val
, B7_0
);
1169 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1171 tcg_temp_free(addr
);
1174 case 0x8b00: /* bf label */
1175 CHECK_NOT_DELAY_SLOT
1176 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1177 ctx
->pc
+ 4 + B7_0s
* 2);
1178 ctx
->bstate
= BS_BRANCH
;
1180 case 0x8f00: /* bf/s label */
1181 CHECK_NOT_DELAY_SLOT
1182 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1183 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1185 case 0x8900: /* bt label */
1186 CHECK_NOT_DELAY_SLOT
1187 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1189 ctx
->bstate
= BS_BRANCH
;
1191 case 0x8d00: /* bt/s label */
1192 CHECK_NOT_DELAY_SLOT
1193 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1194 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1196 case 0x8800: /* cmp/eq #imm,R0 */
1197 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1199 case 0xc400: /* mov.b @(disp,GBR),R0 */
1201 TCGv addr
= tcg_temp_new();
1202 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1203 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1204 tcg_temp_free(addr
);
1207 case 0xc500: /* mov.w @(disp,GBR),R0 */
1209 TCGv addr
= tcg_temp_new();
1210 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1211 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1212 tcg_temp_free(addr
);
1215 case 0xc600: /* mov.l @(disp,GBR),R0 */
1217 TCGv addr
= tcg_temp_new();
1218 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1219 tcg_gen_qemu_ld32s(REG(0), addr
, ctx
->memidx
);
1220 tcg_temp_free(addr
);
1223 case 0xc000: /* mov.b R0,@(disp,GBR) */
1225 TCGv addr
= tcg_temp_new();
1226 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1227 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1228 tcg_temp_free(addr
);
1231 case 0xc100: /* mov.w R0,@(disp,GBR) */
1233 TCGv addr
= tcg_temp_new();
1234 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1235 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1236 tcg_temp_free(addr
);
1239 case 0xc200: /* mov.l R0,@(disp,GBR) */
1241 TCGv addr
= tcg_temp_new();
1242 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1243 tcg_gen_qemu_st32(REG(0), addr
, ctx
->memidx
);
1244 tcg_temp_free(addr
);
1247 case 0x8000: /* mov.b R0,@(disp,Rn) */
1249 TCGv addr
= tcg_temp_new();
1250 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1251 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1252 tcg_temp_free(addr
);
1255 case 0x8100: /* mov.w R0,@(disp,Rn) */
1257 TCGv addr
= tcg_temp_new();
1258 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1259 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1260 tcg_temp_free(addr
);
1263 case 0x8400: /* mov.b @(disp,Rn),R0 */
1265 TCGv addr
= tcg_temp_new();
1266 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1267 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1268 tcg_temp_free(addr
);
1271 case 0x8500: /* mov.w @(disp,Rn),R0 */
1273 TCGv addr
= tcg_temp_new();
1274 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1275 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1276 tcg_temp_free(addr
);
1279 case 0xc700: /* mova @(disp,PC),R0 */
1280 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1282 case 0xcb00: /* or #imm,R0 */
1283 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1285 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1288 addr
= tcg_temp_new();
1289 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1290 val
= tcg_temp_new();
1291 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1292 tcg_gen_ori_i32(val
, val
, B7_0
);
1293 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1295 tcg_temp_free(addr
);
1298 case 0xc300: /* trapa #imm */
1301 CHECK_NOT_DELAY_SLOT
1302 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1303 imm
= tcg_const_i32(B7_0
);
1304 gen_helper_trapa(cpu_env
, imm
);
1306 ctx
->bstate
= BS_BRANCH
;
1309 case 0xc800: /* tst #imm,R0 */
1311 TCGv val
= tcg_temp_new();
1312 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1313 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1317 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1319 TCGv val
= tcg_temp_new();
1320 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1321 tcg_gen_qemu_ld8u(val
, val
, ctx
->memidx
);
1322 tcg_gen_andi_i32(val
, val
, B7_0
);
1323 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1327 case 0xca00: /* xor #imm,R0 */
1328 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1330 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1333 addr
= tcg_temp_new();
1334 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1335 val
= tcg_temp_new();
1336 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1337 tcg_gen_xori_i32(val
, val
, B7_0
);
1338 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1340 tcg_temp_free(addr
);
1345 switch (ctx
->opcode
& 0xf08f) {
1346 case 0x408e: /* ldc Rm,Rn_BANK */
1348 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1350 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1352 tcg_gen_qemu_ld32s(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
);
1353 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1355 case 0x0082: /* stc Rm_BANK,Rn */
1357 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1359 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1362 TCGv addr
= tcg_temp_new();
1363 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1364 tcg_gen_qemu_st32(ALTREG(B6_4
), addr
, ctx
->memidx
);
1365 tcg_gen_mov_i32(REG(B11_8
), addr
);
1366 tcg_temp_free(addr
);
1371 switch (ctx
->opcode
& 0xf0ff) {
1372 case 0x0023: /* braf Rn */
1373 CHECK_NOT_DELAY_SLOT
1374 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1375 ctx
->flags
|= DELAY_SLOT
;
1376 ctx
->delayed_pc
= (uint32_t) - 1;
1378 case 0x0003: /* bsrf Rn */
1379 CHECK_NOT_DELAY_SLOT
1380 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1381 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1382 ctx
->flags
|= DELAY_SLOT
;
1383 ctx
->delayed_pc
= (uint32_t) - 1;
1385 case 0x4015: /* cmp/pl Rn */
1386 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1388 case 0x4011: /* cmp/pz Rn */
1389 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1391 case 0x4010: /* dt Rn */
1392 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1393 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1395 case 0x402b: /* jmp @Rn */
1396 CHECK_NOT_DELAY_SLOT
1397 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1398 ctx
->flags
|= DELAY_SLOT
;
1399 ctx
->delayed_pc
= (uint32_t) - 1;
1401 case 0x400b: /* jsr @Rn */
1402 CHECK_NOT_DELAY_SLOT
1403 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1404 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1405 ctx
->flags
|= DELAY_SLOT
;
1406 ctx
->delayed_pc
= (uint32_t) - 1;
1408 case 0x400e: /* ldc Rm,SR */
1410 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1411 ctx
->bstate
= BS_STOP
;
1413 case 0x4007: /* ldc.l @Rm+,SR */
1416 TCGv val
= tcg_temp_new();
1417 tcg_gen_qemu_ld32s(val
, REG(B11_8
), ctx
->memidx
);
1418 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1420 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1421 ctx
->bstate
= BS_STOP
;
1424 case 0x0002: /* stc SR,Rn */
1426 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1428 case 0x4003: /* stc SR,@-Rn */
1431 TCGv addr
= tcg_temp_new();
1432 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1433 tcg_gen_qemu_st32(cpu_sr
, addr
, ctx
->memidx
);
1434 tcg_gen_mov_i32(REG(B11_8
), addr
);
1435 tcg_temp_free(addr
);
1438 #define LD(reg,ldnum,ldpnum,prechk) \
1441 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1445 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1446 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1448 #define ST(reg,stnum,stpnum,prechk) \
1451 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1456 TCGv addr = tcg_temp_new(); \
1457 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1458 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1459 tcg_gen_mov_i32(REG(B11_8), addr); \
1460 tcg_temp_free(addr); \
1463 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1464 LD(reg,ldnum,ldpnum,prechk) \
1465 ST(reg,stnum,stpnum,prechk)
1466 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1467 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1468 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1469 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1470 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1471 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1472 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1473 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1474 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1475 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1476 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1477 case 0x406a: /* lds Rm,FPSCR */
1479 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1480 ctx
->bstate
= BS_STOP
;
1482 case 0x4066: /* lds.l @Rm+,FPSCR */
1485 TCGv addr
= tcg_temp_new();
1486 tcg_gen_qemu_ld32s(addr
, REG(B11_8
), ctx
->memidx
);
1487 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1488 gen_helper_ld_fpscr(cpu_env
, addr
);
1489 tcg_temp_free(addr
);
1490 ctx
->bstate
= BS_STOP
;
1493 case 0x006a: /* sts FPSCR,Rn */
1495 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1497 case 0x4062: /* sts FPSCR,@-Rn */
1501 val
= tcg_temp_new();
1502 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1503 addr
= tcg_temp_new();
1504 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1505 tcg_gen_qemu_st32(val
, addr
, ctx
->memidx
);
1506 tcg_gen_mov_i32(REG(B11_8
), addr
);
1507 tcg_temp_free(addr
);
1511 case 0x00c3: /* movca.l R0,@Rm */
1513 TCGv val
= tcg_temp_new();
1514 tcg_gen_qemu_ld32u(val
, REG(B11_8
), ctx
->memidx
);
1515 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1516 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1518 ctx
->has_movcal
= 1;
1521 /* MOVUA.L @Rm,R0 (Rm) -> R0
1522 Load non-boundary-aligned data */
1523 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1526 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1527 Load non-boundary-aligned data */
1528 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1529 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1531 case 0x0029: /* movt Rn */
1532 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1537 If (T == 1) R0 -> (Rn)
1540 if (ctx
->features
& SH_FEATURE_SH4A
) {
1541 int label
= gen_new_label();
1542 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1543 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1544 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1545 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1546 gen_set_label(label
);
1547 tcg_gen_movi_i32(cpu_ldst
, 0);
1555 When interrupt/exception
1558 if (ctx
->features
& SH_FEATURE_SH4A
) {
1559 tcg_gen_movi_i32(cpu_ldst
, 0);
1560 tcg_gen_qemu_ld32s(REG(0), REG(B11_8
), ctx
->memidx
);
1561 tcg_gen_movi_i32(cpu_ldst
, 1);
1565 case 0x0093: /* ocbi @Rn */
1567 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1570 case 0x00a3: /* ocbp @Rn */
1571 case 0x00b3: /* ocbwb @Rn */
1572 /* These instructions are supposed to do nothing in case of
1573 a cache miss. Given that we only partially emulate caches
1574 it is safe to simply ignore them. */
1576 case 0x0083: /* pref @Rn */
1578 case 0x00d3: /* prefi @Rn */
1579 if (ctx
->features
& SH_FEATURE_SH4A
)
1583 case 0x00e3: /* icbi @Rn */
1584 if (ctx
->features
& SH_FEATURE_SH4A
)
1588 case 0x00ab: /* synco */
1589 if (ctx
->features
& SH_FEATURE_SH4A
)
1593 case 0x4024: /* rotcl Rn */
1595 TCGv tmp
= tcg_temp_new();
1596 tcg_gen_mov_i32(tmp
, cpu_sr
);
1597 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1598 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1599 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1603 case 0x4025: /* rotcr Rn */
1605 TCGv tmp
= tcg_temp_new();
1606 tcg_gen_mov_i32(tmp
, cpu_sr
);
1607 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1608 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1609 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1613 case 0x4004: /* rotl Rn */
1614 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1615 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1617 case 0x4005: /* rotr Rn */
1618 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1619 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1621 case 0x4000: /* shll Rn */
1622 case 0x4020: /* shal Rn */
1623 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1624 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1626 case 0x4021: /* shar Rn */
1627 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1628 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1630 case 0x4001: /* shlr Rn */
1631 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1632 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1634 case 0x4008: /* shll2 Rn */
1635 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1637 case 0x4018: /* shll8 Rn */
1638 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1640 case 0x4028: /* shll16 Rn */
1641 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1643 case 0x4009: /* shlr2 Rn */
1644 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1646 case 0x4019: /* shlr8 Rn */
1647 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1649 case 0x4029: /* shlr16 Rn */
1650 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1652 case 0x401b: /* tas.b @Rn */
1655 addr
= tcg_temp_local_new();
1656 tcg_gen_mov_i32(addr
, REG(B11_8
));
1657 val
= tcg_temp_local_new();
1658 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1659 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1660 tcg_gen_ori_i32(val
, val
, 0x80);
1661 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1663 tcg_temp_free(addr
);
1666 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1668 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1670 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1672 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1674 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1676 if (ctx
->flags
& FPSCR_PR
) {
1678 if (ctx
->opcode
& 0x0100)
1679 break; /* illegal instruction */
1680 fp
= tcg_temp_new_i64();
1681 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1682 gen_store_fpr64(fp
, DREG(B11_8
));
1683 tcg_temp_free_i64(fp
);
1686 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1689 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1691 if (ctx
->flags
& FPSCR_PR
) {
1693 if (ctx
->opcode
& 0x0100)
1694 break; /* illegal instruction */
1695 fp
= tcg_temp_new_i64();
1696 gen_load_fpr64(fp
, DREG(B11_8
));
1697 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1698 tcg_temp_free_i64(fp
);
1701 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1704 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1707 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1710 case 0xf05d: /* fabs FRn/DRn */
1712 if (ctx
->flags
& FPSCR_PR
) {
1713 if (ctx
->opcode
& 0x0100)
1714 break; /* illegal instruction */
1715 TCGv_i64 fp
= tcg_temp_new_i64();
1716 gen_load_fpr64(fp
, DREG(B11_8
));
1717 gen_helper_fabs_DT(fp
, fp
);
1718 gen_store_fpr64(fp
, DREG(B11_8
));
1719 tcg_temp_free_i64(fp
);
1721 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1724 case 0xf06d: /* fsqrt FRn */
1726 if (ctx
->flags
& FPSCR_PR
) {
1727 if (ctx
->opcode
& 0x0100)
1728 break; /* illegal instruction */
1729 TCGv_i64 fp
= tcg_temp_new_i64();
1730 gen_load_fpr64(fp
, DREG(B11_8
));
1731 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1732 gen_store_fpr64(fp
, DREG(B11_8
));
1733 tcg_temp_free_i64(fp
);
1735 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1736 cpu_fregs
[FREG(B11_8
)]);
1739 case 0xf07d: /* fsrra FRn */
1742 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1744 if (!(ctx
->flags
& FPSCR_PR
)) {
1745 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1748 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1750 if (!(ctx
->flags
& FPSCR_PR
)) {
1751 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1754 case 0xf0ad: /* fcnvsd FPUL,DRn */
1757 TCGv_i64 fp
= tcg_temp_new_i64();
1758 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1759 gen_store_fpr64(fp
, DREG(B11_8
));
1760 tcg_temp_free_i64(fp
);
1763 case 0xf0bd: /* fcnvds DRn,FPUL */
1766 TCGv_i64 fp
= tcg_temp_new_i64();
1767 gen_load_fpr64(fp
, DREG(B11_8
));
1768 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1769 tcg_temp_free_i64(fp
);
1772 case 0xf0ed: /* fipr FVm,FVn */
1774 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1776 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1777 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1778 gen_helper_fipr(cpu_env
, m
, n
);
1784 case 0xf0fd: /* ftrv XMTRX,FVn */
1786 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1787 (ctx
->flags
& FPSCR_PR
) == 0) {
1789 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1790 gen_helper_ftrv(cpu_env
, n
);
1797 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1798 ctx
->opcode
, ctx
->pc
);
1801 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1802 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1803 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1805 gen_helper_raise_illegal_instruction(cpu_env
);
1807 ctx
->bstate
= BS_BRANCH
;
1810 static void decode_opc(DisasContext
* ctx
)
1812 uint32_t old_flags
= ctx
->flags
;
1814 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1815 tcg_gen_debug_insn_start(ctx
->pc
);
1820 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1821 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1824 /* go out of the delay slot */
1825 uint32_t new_flags
= ctx
->flags
;
1826 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1827 gen_store_flags(new_flags
);
1830 ctx
->bstate
= BS_BRANCH
;
1831 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1832 gen_delayed_conditional_jump(ctx
);
1833 } else if (old_flags
& DELAY_SLOT
) {
1839 /* go into a delay slot */
1840 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1841 gen_store_flags(ctx
->flags
);
1845 gen_intermediate_code_internal(SuperHCPU
*cpu
, TranslationBlock
*tb
,
1848 CPUState
*cs
= CPU(cpu
);
1849 CPUSH4State
*env
= &cpu
->env
;
1851 target_ulong pc_start
;
1852 static uint16_t *gen_opc_end
;
1859 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1861 ctx
.flags
= (uint32_t)tb
->flags
;
1862 ctx
.bstate
= BS_NONE
;
1863 ctx
.memidx
= (ctx
.flags
& SR_MD
) == 0 ? 1 : 0;
1864 /* We don't know if the delayed pc came from a dynamic or static branch,
1865 so assume it is a dynamic branch. */
1866 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1868 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1869 ctx
.features
= env
->features
;
1870 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1874 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1876 max_insns
= CF_COUNT_MASK
;
1878 while (ctx
.bstate
== BS_NONE
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
) {
1879 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
1880 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1881 if (ctx
.pc
== bp
->pc
) {
1882 /* We have hit a breakpoint - make sure PC is up-to-date */
1883 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1884 gen_helper_debug(cpu_env
);
1885 ctx
.bstate
= BS_BRANCH
;
1891 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1895 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1897 tcg_ctx
.gen_opc_pc
[ii
] = ctx
.pc
;
1898 gen_opc_hflags
[ii
] = ctx
.flags
;
1899 tcg_ctx
.gen_opc_instr_start
[ii
] = 1;
1900 tcg_ctx
.gen_opc_icount
[ii
] = num_insns
;
1902 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1905 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
1908 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
1912 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1914 if (cs
->singlestep_enabled
) {
1917 if (num_insns
>= max_insns
)
1922 if (tb
->cflags
& CF_LAST_IO
)
1924 if (cs
->singlestep_enabled
) {
1925 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1926 gen_helper_debug(cpu_env
);
1928 switch (ctx
.bstate
) {
1930 /* gen_op_interrupt_restart(); */
1934 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1936 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1939 /* gen_op_interrupt_restart(); */
1948 gen_tb_end(tb
, num_insns
);
1949 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1951 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1954 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1956 tb
->size
= ctx
.pc
- pc_start
;
1957 tb
->icount
= num_insns
;
1961 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1962 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1963 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
1969 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1971 gen_intermediate_code_internal(sh_env_get_cpu(env
), tb
, false);
1974 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1976 gen_intermediate_code_internal(sh_env_get_cpu(env
), tb
, true);
1979 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
1981 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
1982 env
->flags
= gen_opc_hflags
[pc_pos
];