4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 //#define SH4_SINGLE_STEP
24 #include "disas/disas.h"
31 typedef struct DisasContext
{
32 struct TranslationBlock
*tb
;
39 int singlestep_enabled
;
44 #if defined(CONFIG_USER_ONLY)
45 #define IS_USER(ctx) 1
47 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
51 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
54 BS_STOP
= 1, /* We want to stop translation for any reason */
55 BS_BRANCH
= 2, /* We reached a branch condition */
56 BS_EXCP
= 3, /* We reached an exception condition */
59 /* global register indexes */
60 static TCGv_ptr cpu_env
;
61 static TCGv cpu_gregs
[24];
62 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
63 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
64 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
65 static TCGv cpu_fregs
[32];
67 /* internal register indexes */
68 static TCGv cpu_flags
, cpu_delayed_pc
;
70 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
72 #include "exec/gen-icount.h"
74 void sh4_translate_init(void)
77 static int done_init
= 0;
78 static const char * const gregnames
[24] = {
79 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
80 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
81 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
82 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
83 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 static const char * const fregnames
[32] = {
86 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
87 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
88 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
89 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
90 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
91 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
92 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
93 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
99 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
101 for (i
= 0; i
< 24; i
++)
102 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
103 offsetof(CPUSH4State
, gregs
[i
]),
106 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
107 offsetof(CPUSH4State
, pc
), "PC");
108 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
109 offsetof(CPUSH4State
, sr
), "SR");
110 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUSH4State
, ssr
), "SSR");
112 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
113 offsetof(CPUSH4State
, spc
), "SPC");
114 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUSH4State
, gbr
), "GBR");
116 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUSH4State
, vbr
), "VBR");
118 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUSH4State
, sgr
), "SGR");
120 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUSH4State
, dbr
), "DBR");
122 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUSH4State
, mach
), "MACH");
124 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUSH4State
, macl
), "MACL");
126 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUSH4State
, pr
), "PR");
128 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
129 offsetof(CPUSH4State
, fpscr
), "FPSCR");
130 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
131 offsetof(CPUSH4State
, fpul
), "FPUL");
133 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, flags
), "_flags_");
135 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
136 offsetof(CPUSH4State
, delayed_pc
),
138 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, ldst
), "_ldst_");
141 for (i
= 0; i
< 32; i
++)
142 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
143 offsetof(CPUSH4State
, fregs
[i
]),
146 /* register helpers */
153 void superh_cpu_dump_state(CPUState
*cs
, FILE *f
,
154 fprintf_function cpu_fprintf
, int flags
)
156 SuperHCPU
*cpu
= SUPERH_CPU(cs
);
157 CPUSH4State
*env
= &cpu
->env
;
159 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
160 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
161 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
162 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
163 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
164 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
165 for (i
= 0; i
< 24; i
+= 4) {
166 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
167 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
168 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
170 if (env
->flags
& DELAY_SLOT
) {
171 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
173 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
174 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
179 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
181 TranslationBlock
*tb
;
184 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
185 !ctx
->singlestep_enabled
) {
186 /* Use a direct jump if in same page and singlestep not enabled */
188 tcg_gen_movi_i32(cpu_pc
, dest
);
189 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
191 tcg_gen_movi_i32(cpu_pc
, dest
);
192 if (ctx
->singlestep_enabled
)
193 gen_helper_debug(cpu_env
);
198 static void gen_jump(DisasContext
* ctx
)
200 if (ctx
->delayed_pc
== (uint32_t) - 1) {
201 /* Target is not statically known, it comes necessarily from a
202 delayed jump as immediate jump are conditinal jumps */
203 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
204 if (ctx
->singlestep_enabled
)
205 gen_helper_debug(cpu_env
);
208 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
212 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
215 int label
= gen_new_label();
216 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
218 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
219 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
220 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
221 gen_set_label(label
);
224 /* Immediate conditional jump (bt or bf) */
225 static void gen_conditional_jump(DisasContext
* ctx
,
226 target_ulong ift
, target_ulong ifnott
)
231 l1
= gen_new_label();
233 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
234 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
235 gen_goto_tb(ctx
, 0, ifnott
);
237 gen_goto_tb(ctx
, 1, ift
);
240 /* Delayed conditional jump (bt or bf) */
241 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
246 l1
= gen_new_label();
248 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
249 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
250 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
252 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
256 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
261 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
262 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
263 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
268 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
273 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
274 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
275 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
280 static inline void gen_store_flags(uint32_t flags
)
282 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
283 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
286 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
288 TCGv tmp
= tcg_temp_new();
293 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
294 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
296 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
298 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
299 tcg_gen_or_i32(t0
, t0
, tmp
);
304 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
306 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
309 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
311 TCGv_i32 tmp
= tcg_temp_new_i32();
312 tcg_gen_trunc_i64_i32(tmp
, t
);
313 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
314 tcg_gen_shri_i64(t
, t
, 32);
315 tcg_gen_trunc_i64_i32(tmp
, t
);
316 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
317 tcg_temp_free_i32(tmp
);
320 #define B3_0 (ctx->opcode & 0xf)
321 #define B6_4 ((ctx->opcode >> 4) & 0x7)
322 #define B7_4 ((ctx->opcode >> 4) & 0xf)
323 #define B7_0 (ctx->opcode & 0xff)
324 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
325 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
326 (ctx->opcode & 0xfff))
327 #define B11_8 ((ctx->opcode >> 8) & 0xf)
328 #define B15_12 ((ctx->opcode >> 12) & 0xf)
330 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
331 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
333 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
334 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
336 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
337 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
338 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
339 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
341 #define CHECK_NOT_DELAY_SLOT \
342 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
344 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
345 gen_helper_raise_slot_illegal_instruction(cpu_env); \
346 ctx->bstate = BS_BRANCH; \
350 #define CHECK_PRIVILEGED \
351 if (IS_USER(ctx)) { \
352 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
353 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
354 gen_helper_raise_slot_illegal_instruction(cpu_env); \
356 gen_helper_raise_illegal_instruction(cpu_env); \
358 ctx->bstate = BS_BRANCH; \
362 #define CHECK_FPU_ENABLED \
363 if (ctx->flags & SR_FD) { \
364 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
365 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
366 gen_helper_raise_slot_fpu_disable(cpu_env); \
368 gen_helper_raise_fpu_disable(cpu_env); \
370 ctx->bstate = BS_BRANCH; \
374 static void _decode_opc(DisasContext
* ctx
)
376 /* This code tries to make movcal emulation sufficiently
377 accurate for Linux purposes. This instruction writes
378 memory, and prior to that, always allocates a cache line.
379 It is used in two contexts:
380 - in memcpy, where data is copied in blocks, the first write
381 of to a block uses movca.l for performance.
382 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
383 to flush the cache. Here, the data written by movcal.l is never
384 written to memory, and the data written is just bogus.
386 To simulate this, we simulate movcal.l, we store the value to memory,
387 but we also remember the previous content. If we see ocbi, we check
388 if movcal.l for that address was done previously. If so, the write should
389 not have hit the memory, so we restore the previous content.
390 When we see an instruction that is neither movca.l
391 nor ocbi, the previous content is discarded.
393 To optimize, we only try to flush stores when we're at the start of
394 TB, or if we already saw movca.l in this TB and did not flush stores
398 int opcode
= ctx
->opcode
& 0xf0ff;
399 if (opcode
!= 0x0093 /* ocbi */
400 && opcode
!= 0x00c3 /* movca.l */)
402 gen_helper_discard_movcal_backup(cpu_env
);
408 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
411 switch (ctx
->opcode
) {
412 case 0x0019: /* div0u */
413 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
415 case 0x000b: /* rts */
417 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
418 ctx
->flags
|= DELAY_SLOT
;
419 ctx
->delayed_pc
= (uint32_t) - 1;
421 case 0x0028: /* clrmac */
422 tcg_gen_movi_i32(cpu_mach
, 0);
423 tcg_gen_movi_i32(cpu_macl
, 0);
425 case 0x0048: /* clrs */
426 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
428 case 0x0008: /* clrt */
429 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
431 case 0x0038: /* ldtlb */
433 gen_helper_ldtlb(cpu_env
);
435 case 0x002b: /* rte */
438 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
439 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
440 ctx
->flags
|= DELAY_SLOT
;
441 ctx
->delayed_pc
= (uint32_t) - 1;
443 case 0x0058: /* sets */
444 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
446 case 0x0018: /* sett */
447 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
449 case 0xfbfd: /* frchg */
450 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
451 ctx
->bstate
= BS_STOP
;
453 case 0xf3fd: /* fschg */
454 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
455 ctx
->bstate
= BS_STOP
;
457 case 0x0009: /* nop */
459 case 0x001b: /* sleep */
461 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
462 gen_helper_sleep(cpu_env
);
466 switch (ctx
->opcode
& 0xf000) {
467 case 0x1000: /* mov.l Rm,@(disp,Rn) */
469 TCGv addr
= tcg_temp_new();
470 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
471 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
475 case 0x5000: /* mov.l @(disp,Rm),Rn */
477 TCGv addr
= tcg_temp_new();
478 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
479 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
483 case 0xe000: /* mov #imm,Rn */
484 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
486 case 0x9000: /* mov.w @(disp,PC),Rn */
488 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
489 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
493 case 0xd000: /* mov.l @(disp,PC),Rn */
495 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
496 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
500 case 0x7000: /* add #imm,Rn */
501 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
503 case 0xa000: /* bra disp */
505 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
506 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
507 ctx
->flags
|= DELAY_SLOT
;
509 case 0xb000: /* bsr disp */
511 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
512 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
513 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
514 ctx
->flags
|= DELAY_SLOT
;
518 switch (ctx
->opcode
& 0xf00f) {
519 case 0x6003: /* mov Rm,Rn */
520 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
522 case 0x2000: /* mov.b Rm,@Rn */
523 tcg_gen_qemu_st8(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
525 case 0x2001: /* mov.w Rm,@Rn */
526 tcg_gen_qemu_st16(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
528 case 0x2002: /* mov.l Rm,@Rn */
529 tcg_gen_qemu_st32(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
531 case 0x6000: /* mov.b @Rm,Rn */
532 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
534 case 0x6001: /* mov.w @Rm,Rn */
535 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
537 case 0x6002: /* mov.l @Rm,Rn */
538 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
540 case 0x2004: /* mov.b Rm,@-Rn */
542 TCGv addr
= tcg_temp_new();
543 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
544 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
); /* might cause re-execution */
545 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
549 case 0x2005: /* mov.w Rm,@-Rn */
551 TCGv addr
= tcg_temp_new();
552 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
553 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
554 tcg_gen_mov_i32(REG(B11_8
), addr
);
558 case 0x2006: /* mov.l Rm,@-Rn */
560 TCGv addr
= tcg_temp_new();
561 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
562 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
563 tcg_gen_mov_i32(REG(B11_8
), addr
);
566 case 0x6004: /* mov.b @Rm+,Rn */
567 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
569 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
571 case 0x6005: /* mov.w @Rm+,Rn */
572 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
574 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
576 case 0x6006: /* mov.l @Rm+,Rn */
577 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
579 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
581 case 0x0004: /* mov.b Rm,@(R0,Rn) */
583 TCGv addr
= tcg_temp_new();
584 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
585 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
);
589 case 0x0005: /* mov.w Rm,@(R0,Rn) */
591 TCGv addr
= tcg_temp_new();
592 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
593 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
597 case 0x0006: /* mov.l Rm,@(R0,Rn) */
599 TCGv addr
= tcg_temp_new();
600 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
601 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
605 case 0x000c: /* mov.b @(R0,Rm),Rn */
607 TCGv addr
= tcg_temp_new();
608 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
609 tcg_gen_qemu_ld8s(REG(B11_8
), addr
, ctx
->memidx
);
613 case 0x000d: /* mov.w @(R0,Rm),Rn */
615 TCGv addr
= tcg_temp_new();
616 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
617 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
621 case 0x000e: /* mov.l @(R0,Rm),Rn */
623 TCGv addr
= tcg_temp_new();
624 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
625 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
629 case 0x6008: /* swap.b Rm,Rn */
632 high
= tcg_temp_new();
633 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
634 low
= tcg_temp_new();
635 tcg_gen_ext16u_i32(low
, REG(B7_4
));
636 tcg_gen_bswap16_i32(low
, low
);
637 tcg_gen_or_i32(REG(B11_8
), high
, low
);
642 case 0x6009: /* swap.w Rm,Rn */
643 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
645 case 0x200d: /* xtrct Rm,Rn */
648 high
= tcg_temp_new();
649 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
650 low
= tcg_temp_new();
651 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
652 tcg_gen_or_i32(REG(B11_8
), high
, low
);
657 case 0x300c: /* add Rm,Rn */
658 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
660 case 0x300e: /* addc Rm,Rn */
664 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
666 tcg_gen_add_i32(t1
, REG(B7_4
), REG(B11_8
));
667 tcg_gen_add_i32(t0
, t0
, t1
);
669 tcg_gen_setcond_i32(TCG_COND_GTU
, t2
, REG(B11_8
), t1
);
670 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, t1
, t0
);
671 tcg_gen_or_i32(t1
, t1
, t2
);
673 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
674 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
676 tcg_gen_mov_i32(REG(B11_8
), t0
);
680 case 0x300f: /* addv Rm,Rn */
684 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
686 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
688 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
689 tcg_gen_andc_i32(t1
, t1
, t2
);
691 tcg_gen_shri_i32(t1
, t1
, 31);
692 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
693 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
695 tcg_gen_mov_i32(REG(B7_4
), t0
);
699 case 0x2009: /* and Rm,Rn */
700 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
702 case 0x3000: /* cmp/eq Rm,Rn */
703 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
705 case 0x3003: /* cmp/ge Rm,Rn */
706 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
708 case 0x3007: /* cmp/gt Rm,Rn */
709 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
711 case 0x3006: /* cmp/hi Rm,Rn */
712 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
714 case 0x3002: /* cmp/hs Rm,Rn */
715 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
717 case 0x200c: /* cmp/str Rm,Rn */
719 TCGv cmp1
= tcg_temp_new();
720 TCGv cmp2
= tcg_temp_new();
721 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
722 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
723 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
724 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
725 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
726 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
727 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
728 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
729 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
730 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
731 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
732 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
733 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
734 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
739 case 0x2007: /* div0s Rm,Rn */
741 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
742 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
743 TCGv val
= tcg_temp_new();
744 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
745 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
749 case 0x3004: /* div1 Rm,Rn */
750 gen_helper_div1(REG(B11_8
), cpu_env
, REG(B7_4
), REG(B11_8
));
752 case 0x300d: /* dmuls.l Rm,Rn */
753 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
755 case 0x3005: /* dmulu.l Rm,Rn */
756 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
758 case 0x600e: /* exts.b Rm,Rn */
759 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
761 case 0x600f: /* exts.w Rm,Rn */
762 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
764 case 0x600c: /* extu.b Rm,Rn */
765 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
767 case 0x600d: /* extu.w Rm,Rn */
768 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
770 case 0x000f: /* mac.l @Rm+,@Rn+ */
773 arg0
= tcg_temp_new();
774 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
775 arg1
= tcg_temp_new();
776 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
777 gen_helper_macl(cpu_env
, arg0
, arg1
);
780 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
781 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
784 case 0x400f: /* mac.w @Rm+,@Rn+ */
787 arg0
= tcg_temp_new();
788 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
789 arg1
= tcg_temp_new();
790 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
791 gen_helper_macw(cpu_env
, arg0
, arg1
);
794 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
795 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
798 case 0x0007: /* mul.l Rm,Rn */
799 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
801 case 0x200f: /* muls.w Rm,Rn */
804 arg0
= tcg_temp_new();
805 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
806 arg1
= tcg_temp_new();
807 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
808 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
813 case 0x200e: /* mulu.w Rm,Rn */
816 arg0
= tcg_temp_new();
817 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
818 arg1
= tcg_temp_new();
819 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
820 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
825 case 0x600b: /* neg Rm,Rn */
826 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
828 case 0x600a: /* negc Rm,Rn */
832 tcg_gen_neg_i32(t0
, REG(B7_4
));
834 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
835 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
836 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
837 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
838 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
839 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
840 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
845 case 0x6007: /* not Rm,Rn */
846 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
848 case 0x200b: /* or Rm,Rn */
849 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
851 case 0x400c: /* shad Rm,Rn */
853 int label1
= gen_new_label();
854 int label2
= gen_new_label();
855 int label3
= gen_new_label();
856 int label4
= gen_new_label();
858 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
859 /* Rm positive, shift to the left */
860 shift
= tcg_temp_new();
861 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
862 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
863 tcg_temp_free(shift
);
865 /* Rm negative, shift to the right */
866 gen_set_label(label1
);
867 shift
= tcg_temp_new();
868 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
869 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
870 tcg_gen_not_i32(shift
, REG(B7_4
));
871 tcg_gen_andi_i32(shift
, shift
, 0x1f);
872 tcg_gen_addi_i32(shift
, shift
, 1);
873 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
874 tcg_temp_free(shift
);
877 gen_set_label(label2
);
878 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
879 tcg_gen_movi_i32(REG(B11_8
), 0);
881 gen_set_label(label3
);
882 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
883 gen_set_label(label4
);
886 case 0x400d: /* shld Rm,Rn */
888 int label1
= gen_new_label();
889 int label2
= gen_new_label();
890 int label3
= gen_new_label();
892 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
893 /* Rm positive, shift to the left */
894 shift
= tcg_temp_new();
895 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
896 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
897 tcg_temp_free(shift
);
899 /* Rm negative, shift to the right */
900 gen_set_label(label1
);
901 shift
= tcg_temp_new();
902 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
903 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
904 tcg_gen_not_i32(shift
, REG(B7_4
));
905 tcg_gen_andi_i32(shift
, shift
, 0x1f);
906 tcg_gen_addi_i32(shift
, shift
, 1);
907 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
908 tcg_temp_free(shift
);
911 gen_set_label(label2
);
912 tcg_gen_movi_i32(REG(B11_8
), 0);
913 gen_set_label(label3
);
916 case 0x3008: /* sub Rm,Rn */
917 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
919 case 0x300a: /* subc Rm,Rn */
923 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
925 tcg_gen_sub_i32(t1
, REG(B11_8
), REG(B7_4
));
926 tcg_gen_sub_i32(t0
, t1
, t0
);
928 tcg_gen_setcond_i32(TCG_COND_LTU
, t2
, REG(B11_8
), t1
);
929 tcg_gen_setcond_i32(TCG_COND_LTU
, t1
, t1
, t0
);
930 tcg_gen_or_i32(t1
, t1
, t2
);
932 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
933 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
935 tcg_gen_mov_i32(REG(B11_8
), t0
);
939 case 0x300b: /* subv Rm,Rn */
943 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
945 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
947 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
948 tcg_gen_and_i32(t1
, t1
, t2
);
950 tcg_gen_shri_i32(t1
, t1
, 31);
951 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
952 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
954 tcg_gen_mov_i32(REG(B11_8
), t0
);
958 case 0x2008: /* tst Rm,Rn */
960 TCGv val
= tcg_temp_new();
961 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
962 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
966 case 0x200a: /* xor Rm,Rn */
967 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
969 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
971 if (ctx
->flags
& FPSCR_SZ
) {
972 TCGv_i64 fp
= tcg_temp_new_i64();
973 gen_load_fpr64(fp
, XREG(B7_4
));
974 gen_store_fpr64(fp
, XREG(B11_8
));
975 tcg_temp_free_i64(fp
);
977 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
980 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
982 if (ctx
->flags
& FPSCR_SZ
) {
983 TCGv addr_hi
= tcg_temp_new();
985 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
986 tcg_gen_qemu_st32(cpu_fregs
[fr
], REG(B11_8
), ctx
->memidx
);
987 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
988 tcg_temp_free(addr_hi
);
990 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
), ctx
->memidx
);
993 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
995 if (ctx
->flags
& FPSCR_SZ
) {
996 TCGv addr_hi
= tcg_temp_new();
997 int fr
= XREG(B11_8
);
998 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
999 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1000 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1001 tcg_temp_free(addr_hi
);
1003 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1006 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1008 if (ctx
->flags
& FPSCR_SZ
) {
1009 TCGv addr_hi
= tcg_temp_new();
1010 int fr
= XREG(B11_8
);
1011 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1012 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1013 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1014 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1015 tcg_temp_free(addr_hi
);
1017 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1018 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1021 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1023 if (ctx
->flags
& FPSCR_SZ
) {
1024 TCGv addr
= tcg_temp_new_i32();
1025 int fr
= XREG(B7_4
);
1026 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1027 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1028 tcg_gen_subi_i32(addr
, addr
, 4);
1029 tcg_gen_qemu_st32(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1030 tcg_gen_mov_i32(REG(B11_8
), addr
);
1031 tcg_temp_free(addr
);
1034 addr
= tcg_temp_new_i32();
1035 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1036 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1037 tcg_gen_mov_i32(REG(B11_8
), addr
);
1038 tcg_temp_free(addr
);
1041 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1044 TCGv addr
= tcg_temp_new_i32();
1045 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1046 if (ctx
->flags
& FPSCR_SZ
) {
1047 int fr
= XREG(B11_8
);
1048 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1049 tcg_gen_addi_i32(addr
, addr
, 4);
1050 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1052 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], addr
, ctx
->memidx
);
1054 tcg_temp_free(addr
);
1057 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1060 TCGv addr
= tcg_temp_new();
1061 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1062 if (ctx
->flags
& FPSCR_SZ
) {
1063 int fr
= XREG(B7_4
);
1064 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1065 tcg_gen_addi_i32(addr
, addr
, 4);
1066 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1068 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1070 tcg_temp_free(addr
);
1073 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1074 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1075 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1076 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1077 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1078 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1081 if (ctx
->flags
& FPSCR_PR
) {
1084 if (ctx
->opcode
& 0x0110)
1085 break; /* illegal instruction */
1086 fp0
= tcg_temp_new_i64();
1087 fp1
= tcg_temp_new_i64();
1088 gen_load_fpr64(fp0
, DREG(B11_8
));
1089 gen_load_fpr64(fp1
, DREG(B7_4
));
1090 switch (ctx
->opcode
& 0xf00f) {
1091 case 0xf000: /* fadd Rm,Rn */
1092 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1094 case 0xf001: /* fsub Rm,Rn */
1095 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1097 case 0xf002: /* fmul Rm,Rn */
1098 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1100 case 0xf003: /* fdiv Rm,Rn */
1101 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1103 case 0xf004: /* fcmp/eq Rm,Rn */
1104 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1106 case 0xf005: /* fcmp/gt Rm,Rn */
1107 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1110 gen_store_fpr64(fp0
, DREG(B11_8
));
1111 tcg_temp_free_i64(fp0
);
1112 tcg_temp_free_i64(fp1
);
1114 switch (ctx
->opcode
& 0xf00f) {
1115 case 0xf000: /* fadd Rm,Rn */
1116 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1117 cpu_fregs
[FREG(B11_8
)],
1118 cpu_fregs
[FREG(B7_4
)]);
1120 case 0xf001: /* fsub Rm,Rn */
1121 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1122 cpu_fregs
[FREG(B11_8
)],
1123 cpu_fregs
[FREG(B7_4
)]);
1125 case 0xf002: /* fmul Rm,Rn */
1126 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1127 cpu_fregs
[FREG(B11_8
)],
1128 cpu_fregs
[FREG(B7_4
)]);
1130 case 0xf003: /* fdiv Rm,Rn */
1131 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1132 cpu_fregs
[FREG(B11_8
)],
1133 cpu_fregs
[FREG(B7_4
)]);
1135 case 0xf004: /* fcmp/eq Rm,Rn */
1136 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1137 cpu_fregs
[FREG(B7_4
)]);
1139 case 0xf005: /* fcmp/gt Rm,Rn */
1140 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1141 cpu_fregs
[FREG(B7_4
)]);
1147 case 0xf00e: /* fmac FR0,RM,Rn */
1150 if (ctx
->flags
& FPSCR_PR
) {
1151 break; /* illegal instruction */
1153 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1154 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1155 cpu_fregs
[FREG(B11_8
)]);
1161 switch (ctx
->opcode
& 0xff00) {
1162 case 0xc900: /* and #imm,R0 */
1163 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1165 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1168 addr
= tcg_temp_new();
1169 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1170 val
= tcg_temp_new();
1171 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1172 tcg_gen_andi_i32(val
, val
, B7_0
);
1173 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1175 tcg_temp_free(addr
);
1178 case 0x8b00: /* bf label */
1179 CHECK_NOT_DELAY_SLOT
1180 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1181 ctx
->pc
+ 4 + B7_0s
* 2);
1182 ctx
->bstate
= BS_BRANCH
;
1184 case 0x8f00: /* bf/s label */
1185 CHECK_NOT_DELAY_SLOT
1186 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1187 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1189 case 0x8900: /* bt label */
1190 CHECK_NOT_DELAY_SLOT
1191 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1193 ctx
->bstate
= BS_BRANCH
;
1195 case 0x8d00: /* bt/s label */
1196 CHECK_NOT_DELAY_SLOT
1197 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1198 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1200 case 0x8800: /* cmp/eq #imm,R0 */
1201 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1203 case 0xc400: /* mov.b @(disp,GBR),R0 */
1205 TCGv addr
= tcg_temp_new();
1206 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1207 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1208 tcg_temp_free(addr
);
1211 case 0xc500: /* mov.w @(disp,GBR),R0 */
1213 TCGv addr
= tcg_temp_new();
1214 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1215 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1216 tcg_temp_free(addr
);
1219 case 0xc600: /* mov.l @(disp,GBR),R0 */
1221 TCGv addr
= tcg_temp_new();
1222 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1223 tcg_gen_qemu_ld32s(REG(0), addr
, ctx
->memidx
);
1224 tcg_temp_free(addr
);
1227 case 0xc000: /* mov.b R0,@(disp,GBR) */
1229 TCGv addr
= tcg_temp_new();
1230 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1231 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1232 tcg_temp_free(addr
);
1235 case 0xc100: /* mov.w R0,@(disp,GBR) */
1237 TCGv addr
= tcg_temp_new();
1238 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1239 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1240 tcg_temp_free(addr
);
1243 case 0xc200: /* mov.l R0,@(disp,GBR) */
1245 TCGv addr
= tcg_temp_new();
1246 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1247 tcg_gen_qemu_st32(REG(0), addr
, ctx
->memidx
);
1248 tcg_temp_free(addr
);
1251 case 0x8000: /* mov.b R0,@(disp,Rn) */
1253 TCGv addr
= tcg_temp_new();
1254 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1255 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1256 tcg_temp_free(addr
);
1259 case 0x8100: /* mov.w R0,@(disp,Rn) */
1261 TCGv addr
= tcg_temp_new();
1262 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1263 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1264 tcg_temp_free(addr
);
1267 case 0x8400: /* mov.b @(disp,Rn),R0 */
1269 TCGv addr
= tcg_temp_new();
1270 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1271 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1272 tcg_temp_free(addr
);
1275 case 0x8500: /* mov.w @(disp,Rn),R0 */
1277 TCGv addr
= tcg_temp_new();
1278 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1279 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1280 tcg_temp_free(addr
);
1283 case 0xc700: /* mova @(disp,PC),R0 */
1284 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1286 case 0xcb00: /* or #imm,R0 */
1287 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1289 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1292 addr
= tcg_temp_new();
1293 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1294 val
= tcg_temp_new();
1295 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1296 tcg_gen_ori_i32(val
, val
, B7_0
);
1297 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1299 tcg_temp_free(addr
);
1302 case 0xc300: /* trapa #imm */
1305 CHECK_NOT_DELAY_SLOT
1306 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1307 imm
= tcg_const_i32(B7_0
);
1308 gen_helper_trapa(cpu_env
, imm
);
1310 ctx
->bstate
= BS_BRANCH
;
1313 case 0xc800: /* tst #imm,R0 */
1315 TCGv val
= tcg_temp_new();
1316 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1317 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1321 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1323 TCGv val
= tcg_temp_new();
1324 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1325 tcg_gen_qemu_ld8u(val
, val
, ctx
->memidx
);
1326 tcg_gen_andi_i32(val
, val
, B7_0
);
1327 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1331 case 0xca00: /* xor #imm,R0 */
1332 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1334 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1337 addr
= tcg_temp_new();
1338 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1339 val
= tcg_temp_new();
1340 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1341 tcg_gen_xori_i32(val
, val
, B7_0
);
1342 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1344 tcg_temp_free(addr
);
1349 switch (ctx
->opcode
& 0xf08f) {
1350 case 0x408e: /* ldc Rm,Rn_BANK */
1352 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1354 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1356 tcg_gen_qemu_ld32s(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
);
1357 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1359 case 0x0082: /* stc Rm_BANK,Rn */
1361 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1363 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1366 TCGv addr
= tcg_temp_new();
1367 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1368 tcg_gen_qemu_st32(ALTREG(B6_4
), addr
, ctx
->memidx
);
1369 tcg_gen_mov_i32(REG(B11_8
), addr
);
1370 tcg_temp_free(addr
);
1375 switch (ctx
->opcode
& 0xf0ff) {
1376 case 0x0023: /* braf Rn */
1377 CHECK_NOT_DELAY_SLOT
1378 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1379 ctx
->flags
|= DELAY_SLOT
;
1380 ctx
->delayed_pc
= (uint32_t) - 1;
1382 case 0x0003: /* bsrf Rn */
1383 CHECK_NOT_DELAY_SLOT
1384 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1385 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1386 ctx
->flags
|= DELAY_SLOT
;
1387 ctx
->delayed_pc
= (uint32_t) - 1;
1389 case 0x4015: /* cmp/pl Rn */
1390 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1392 case 0x4011: /* cmp/pz Rn */
1393 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1395 case 0x4010: /* dt Rn */
1396 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1397 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1399 case 0x402b: /* jmp @Rn */
1400 CHECK_NOT_DELAY_SLOT
1401 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1402 ctx
->flags
|= DELAY_SLOT
;
1403 ctx
->delayed_pc
= (uint32_t) - 1;
1405 case 0x400b: /* jsr @Rn */
1406 CHECK_NOT_DELAY_SLOT
1407 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1408 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1409 ctx
->flags
|= DELAY_SLOT
;
1410 ctx
->delayed_pc
= (uint32_t) - 1;
1412 case 0x400e: /* ldc Rm,SR */
1414 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1415 ctx
->bstate
= BS_STOP
;
1417 case 0x4007: /* ldc.l @Rm+,SR */
1420 TCGv val
= tcg_temp_new();
1421 tcg_gen_qemu_ld32s(val
, REG(B11_8
), ctx
->memidx
);
1422 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1424 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1425 ctx
->bstate
= BS_STOP
;
1428 case 0x0002: /* stc SR,Rn */
1430 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1432 case 0x4003: /* stc SR,@-Rn */
1435 TCGv addr
= tcg_temp_new();
1436 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1437 tcg_gen_qemu_st32(cpu_sr
, addr
, ctx
->memidx
);
1438 tcg_gen_mov_i32(REG(B11_8
), addr
);
1439 tcg_temp_free(addr
);
1442 #define LD(reg,ldnum,ldpnum,prechk) \
1445 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1449 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1450 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1452 #define ST(reg,stnum,stpnum,prechk) \
1455 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1460 TCGv addr = tcg_temp_new(); \
1461 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1462 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1463 tcg_gen_mov_i32(REG(B11_8), addr); \
1464 tcg_temp_free(addr); \
1467 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1468 LD(reg,ldnum,ldpnum,prechk) \
1469 ST(reg,stnum,stpnum,prechk)
1470 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1471 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1472 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1473 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1474 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1475 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1476 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1477 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1478 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1479 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1480 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1481 case 0x406a: /* lds Rm,FPSCR */
1483 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1484 ctx
->bstate
= BS_STOP
;
1486 case 0x4066: /* lds.l @Rm+,FPSCR */
1489 TCGv addr
= tcg_temp_new();
1490 tcg_gen_qemu_ld32s(addr
, REG(B11_8
), ctx
->memidx
);
1491 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1492 gen_helper_ld_fpscr(cpu_env
, addr
);
1493 tcg_temp_free(addr
);
1494 ctx
->bstate
= BS_STOP
;
1497 case 0x006a: /* sts FPSCR,Rn */
1499 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1501 case 0x4062: /* sts FPSCR,@-Rn */
1505 val
= tcg_temp_new();
1506 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1507 addr
= tcg_temp_new();
1508 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1509 tcg_gen_qemu_st32(val
, addr
, ctx
->memidx
);
1510 tcg_gen_mov_i32(REG(B11_8
), addr
);
1511 tcg_temp_free(addr
);
1515 case 0x00c3: /* movca.l R0,@Rm */
1517 TCGv val
= tcg_temp_new();
1518 tcg_gen_qemu_ld32u(val
, REG(B11_8
), ctx
->memidx
);
1519 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1520 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1522 ctx
->has_movcal
= 1;
1525 /* MOVUA.L @Rm,R0 (Rm) -> R0
1526 Load non-boundary-aligned data */
1527 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1530 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1531 Load non-boundary-aligned data */
1532 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1533 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1535 case 0x0029: /* movt Rn */
1536 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1541 If (T == 1) R0 -> (Rn)
1544 if (ctx
->features
& SH_FEATURE_SH4A
) {
1545 int label
= gen_new_label();
1546 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1547 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1548 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1549 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1550 gen_set_label(label
);
1551 tcg_gen_movi_i32(cpu_ldst
, 0);
1559 When interrupt/exception
1562 if (ctx
->features
& SH_FEATURE_SH4A
) {
1563 tcg_gen_movi_i32(cpu_ldst
, 0);
1564 tcg_gen_qemu_ld32s(REG(0), REG(B11_8
), ctx
->memidx
);
1565 tcg_gen_movi_i32(cpu_ldst
, 1);
1569 case 0x0093: /* ocbi @Rn */
1571 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1574 case 0x00a3: /* ocbp @Rn */
1575 case 0x00b3: /* ocbwb @Rn */
1576 /* These instructions are supposed to do nothing in case of
1577 a cache miss. Given that we only partially emulate caches
1578 it is safe to simply ignore them. */
1580 case 0x0083: /* pref @Rn */
1582 case 0x00d3: /* prefi @Rn */
1583 if (ctx
->features
& SH_FEATURE_SH4A
)
1587 case 0x00e3: /* icbi @Rn */
1588 if (ctx
->features
& SH_FEATURE_SH4A
)
1592 case 0x00ab: /* synco */
1593 if (ctx
->features
& SH_FEATURE_SH4A
)
1597 case 0x4024: /* rotcl Rn */
1599 TCGv tmp
= tcg_temp_new();
1600 tcg_gen_mov_i32(tmp
, cpu_sr
);
1601 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1602 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1603 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1607 case 0x4025: /* rotcr Rn */
1609 TCGv tmp
= tcg_temp_new();
1610 tcg_gen_mov_i32(tmp
, cpu_sr
);
1611 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1612 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1613 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1617 case 0x4004: /* rotl Rn */
1618 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1619 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1621 case 0x4005: /* rotr Rn */
1622 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1623 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1625 case 0x4000: /* shll Rn */
1626 case 0x4020: /* shal Rn */
1627 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1628 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1630 case 0x4021: /* shar Rn */
1631 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1632 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1634 case 0x4001: /* shlr Rn */
1635 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1636 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1638 case 0x4008: /* shll2 Rn */
1639 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1641 case 0x4018: /* shll8 Rn */
1642 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1644 case 0x4028: /* shll16 Rn */
1645 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1647 case 0x4009: /* shlr2 Rn */
1648 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1650 case 0x4019: /* shlr8 Rn */
1651 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1653 case 0x4029: /* shlr16 Rn */
1654 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1656 case 0x401b: /* tas.b @Rn */
1659 addr
= tcg_temp_local_new();
1660 tcg_gen_mov_i32(addr
, REG(B11_8
));
1661 val
= tcg_temp_local_new();
1662 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1663 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1664 tcg_gen_ori_i32(val
, val
, 0x80);
1665 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1667 tcg_temp_free(addr
);
1670 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1672 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1674 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1676 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1678 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1680 if (ctx
->flags
& FPSCR_PR
) {
1682 if (ctx
->opcode
& 0x0100)
1683 break; /* illegal instruction */
1684 fp
= tcg_temp_new_i64();
1685 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1686 gen_store_fpr64(fp
, DREG(B11_8
));
1687 tcg_temp_free_i64(fp
);
1690 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1693 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1695 if (ctx
->flags
& FPSCR_PR
) {
1697 if (ctx
->opcode
& 0x0100)
1698 break; /* illegal instruction */
1699 fp
= tcg_temp_new_i64();
1700 gen_load_fpr64(fp
, DREG(B11_8
));
1701 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1702 tcg_temp_free_i64(fp
);
1705 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1708 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1711 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1714 case 0xf05d: /* fabs FRn/DRn */
1716 if (ctx
->flags
& FPSCR_PR
) {
1717 if (ctx
->opcode
& 0x0100)
1718 break; /* illegal instruction */
1719 TCGv_i64 fp
= tcg_temp_new_i64();
1720 gen_load_fpr64(fp
, DREG(B11_8
));
1721 gen_helper_fabs_DT(fp
, fp
);
1722 gen_store_fpr64(fp
, DREG(B11_8
));
1723 tcg_temp_free_i64(fp
);
1725 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1728 case 0xf06d: /* fsqrt FRn */
1730 if (ctx
->flags
& FPSCR_PR
) {
1731 if (ctx
->opcode
& 0x0100)
1732 break; /* illegal instruction */
1733 TCGv_i64 fp
= tcg_temp_new_i64();
1734 gen_load_fpr64(fp
, DREG(B11_8
));
1735 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1736 gen_store_fpr64(fp
, DREG(B11_8
));
1737 tcg_temp_free_i64(fp
);
1739 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1740 cpu_fregs
[FREG(B11_8
)]);
1743 case 0xf07d: /* fsrra FRn */
1746 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1748 if (!(ctx
->flags
& FPSCR_PR
)) {
1749 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1752 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1754 if (!(ctx
->flags
& FPSCR_PR
)) {
1755 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1758 case 0xf0ad: /* fcnvsd FPUL,DRn */
1761 TCGv_i64 fp
= tcg_temp_new_i64();
1762 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1763 gen_store_fpr64(fp
, DREG(B11_8
));
1764 tcg_temp_free_i64(fp
);
1767 case 0xf0bd: /* fcnvds DRn,FPUL */
1770 TCGv_i64 fp
= tcg_temp_new_i64();
1771 gen_load_fpr64(fp
, DREG(B11_8
));
1772 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1773 tcg_temp_free_i64(fp
);
1776 case 0xf0ed: /* fipr FVm,FVn */
1778 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1780 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1781 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1782 gen_helper_fipr(cpu_env
, m
, n
);
1788 case 0xf0fd: /* ftrv XMTRX,FVn */
1790 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1791 (ctx
->flags
& FPSCR_PR
) == 0) {
1793 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1794 gen_helper_ftrv(cpu_env
, n
);
1801 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1802 ctx
->opcode
, ctx
->pc
);
1805 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1806 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1807 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1809 gen_helper_raise_illegal_instruction(cpu_env
);
1811 ctx
->bstate
= BS_BRANCH
;
1814 static void decode_opc(DisasContext
* ctx
)
1816 uint32_t old_flags
= ctx
->flags
;
1818 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1819 tcg_gen_debug_insn_start(ctx
->pc
);
1824 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1825 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1828 /* go out of the delay slot */
1829 uint32_t new_flags
= ctx
->flags
;
1830 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1831 gen_store_flags(new_flags
);
1834 ctx
->bstate
= BS_BRANCH
;
1835 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1836 gen_delayed_conditional_jump(ctx
);
1837 } else if (old_flags
& DELAY_SLOT
) {
1843 /* go into a delay slot */
1844 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1845 gen_store_flags(ctx
->flags
);
1849 gen_intermediate_code_internal(SuperHCPU
*cpu
, TranslationBlock
*tb
,
1852 CPUSH4State
*env
= &cpu
->env
;
1854 target_ulong pc_start
;
1855 static uint16_t *gen_opc_end
;
1862 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1864 ctx
.flags
= (uint32_t)tb
->flags
;
1865 ctx
.bstate
= BS_NONE
;
1866 ctx
.memidx
= (ctx
.flags
& SR_MD
) == 0 ? 1 : 0;
1867 /* We don't know if the delayed pc came from a dynamic or static branch,
1868 so assume it is a dynamic branch. */
1869 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1871 ctx
.singlestep_enabled
= env
->singlestep_enabled
;
1872 ctx
.features
= env
->features
;
1873 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1877 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1879 max_insns
= CF_COUNT_MASK
;
1881 while (ctx
.bstate
== BS_NONE
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
) {
1882 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
1883 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1884 if (ctx
.pc
== bp
->pc
) {
1885 /* We have hit a breakpoint - make sure PC is up-to-date */
1886 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1887 gen_helper_debug(cpu_env
);
1888 ctx
.bstate
= BS_BRANCH
;
1894 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1898 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1900 tcg_ctx
.gen_opc_pc
[ii
] = ctx
.pc
;
1901 gen_opc_hflags
[ii
] = ctx
.flags
;
1902 tcg_ctx
.gen_opc_instr_start
[ii
] = 1;
1903 tcg_ctx
.gen_opc_icount
[ii
] = num_insns
;
1905 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1908 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
1911 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
1915 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1917 if (env
->singlestep_enabled
)
1919 if (num_insns
>= max_insns
)
1924 if (tb
->cflags
& CF_LAST_IO
)
1926 if (env
->singlestep_enabled
) {
1927 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1928 gen_helper_debug(cpu_env
);
1930 switch (ctx
.bstate
) {
1932 /* gen_op_interrupt_restart(); */
1936 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1938 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1941 /* gen_op_interrupt_restart(); */
1950 gen_tb_end(tb
, num_insns
);
1951 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1953 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1956 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1958 tb
->size
= ctx
.pc
- pc_start
;
1959 tb
->icount
= num_insns
;
1963 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1964 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1965 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
1971 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1973 gen_intermediate_code_internal(sh_env_get_cpu(env
), tb
, false);
1976 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1978 gen_intermediate_code_internal(sh_env_get_cpu(env
), tb
, true);
1981 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
1983 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
1984 env
->flags
= gen_opc_hflags
[pc_pos
];