4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 //#define SH4_SINGLE_STEP
24 #include "disas/disas.h"
31 typedef struct DisasContext
{
32 struct TranslationBlock
*tb
;
39 int singlestep_enabled
;
44 #if defined(CONFIG_USER_ONLY)
45 #define IS_USER(ctx) 1
47 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
51 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
54 BS_STOP
= 1, /* We want to stop translation for any reason */
55 BS_BRANCH
= 2, /* We reached a branch condition */
56 BS_EXCP
= 3, /* We reached an exception condition */
59 /* global register indexes */
60 static TCGv_ptr cpu_env
;
61 static TCGv cpu_gregs
[24];
62 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
63 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
64 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
65 static TCGv cpu_fregs
[32];
67 /* internal register indexes */
68 static TCGv cpu_flags
, cpu_delayed_pc
;
70 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
72 #include "gen-icount.h"
74 static void sh4_translate_init(void)
77 static int done_init
= 0;
78 static const char * const gregnames
[24] = {
79 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
80 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
81 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
82 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
83 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 static const char * const fregnames
[32] = {
86 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
87 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
88 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
89 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
90 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
91 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
92 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
93 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
99 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
101 for (i
= 0; i
< 24; i
++)
102 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
103 offsetof(CPUSH4State
, gregs
[i
]),
106 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
107 offsetof(CPUSH4State
, pc
), "PC");
108 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
109 offsetof(CPUSH4State
, sr
), "SR");
110 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUSH4State
, ssr
), "SSR");
112 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
113 offsetof(CPUSH4State
, spc
), "SPC");
114 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUSH4State
, gbr
), "GBR");
116 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUSH4State
, vbr
), "VBR");
118 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUSH4State
, sgr
), "SGR");
120 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUSH4State
, dbr
), "DBR");
122 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUSH4State
, mach
), "MACH");
124 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUSH4State
, macl
), "MACL");
126 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUSH4State
, pr
), "PR");
128 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
129 offsetof(CPUSH4State
, fpscr
), "FPSCR");
130 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
131 offsetof(CPUSH4State
, fpul
), "FPUL");
133 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, flags
), "_flags_");
135 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
136 offsetof(CPUSH4State
, delayed_pc
),
138 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, ldst
), "_ldst_");
141 for (i
= 0; i
< 32; i
++)
142 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
143 offsetof(CPUSH4State
, fregs
[i
]),
146 /* register helpers */
153 void cpu_dump_state(CPUSH4State
* env
, FILE * f
,
154 int (*cpu_fprintf
) (FILE * f
, const char *fmt
, ...),
158 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
159 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
160 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
161 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
162 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
163 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
164 for (i
= 0; i
< 24; i
+= 4) {
165 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
166 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
167 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
169 if (env
->flags
& DELAY_SLOT
) {
170 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
172 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
173 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
187 static sh4_def_t sh4_defs
[] = {
190 .id
= SH_CPU_SH7750R
,
194 .features
= SH_FEATURE_BCR3_AND_BCR4
,
197 .id
= SH_CPU_SH7751R
,
200 .cvr
= 0x00110000, /* Neutered caches, should be 0x20480000 */
201 .features
= SH_FEATURE_BCR3_AND_BCR4
,
208 .features
= SH_FEATURE_SH4A
,
212 static const sh4_def_t
*cpu_sh4_find_by_name(const char *name
)
216 if (strcasecmp(name
, "any") == 0)
219 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
220 if (strcasecmp(name
, sh4_defs
[i
].name
) == 0)
226 void sh4_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
230 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
231 (*cpu_fprintf
)(f
, "%s\n", sh4_defs
[i
].name
);
234 static void cpu_register(CPUSH4State
*env
, const sh4_def_t
*def
)
242 SuperHCPU
*cpu_sh4_init(const char *cpu_model
)
246 const sh4_def_t
*def
;
248 def
= cpu_sh4_find_by_name(cpu_model
);
251 cpu
= SUPERH_CPU(object_new(TYPE_SUPERH_CPU
));
253 env
->features
= def
->features
;
254 sh4_translate_init();
255 env
->cpu_model_str
= cpu_model
;
257 cpu_register(env
, def
);
262 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
264 TranslationBlock
*tb
;
267 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
268 !ctx
->singlestep_enabled
) {
269 /* Use a direct jump if in same page and singlestep not enabled */
271 tcg_gen_movi_i32(cpu_pc
, dest
);
272 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
274 tcg_gen_movi_i32(cpu_pc
, dest
);
275 if (ctx
->singlestep_enabled
)
276 gen_helper_debug(cpu_env
);
281 static void gen_jump(DisasContext
* ctx
)
283 if (ctx
->delayed_pc
== (uint32_t) - 1) {
284 /* Target is not statically known, it comes necessarily from a
285 delayed jump as immediate jump are conditinal jumps */
286 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
287 if (ctx
->singlestep_enabled
)
288 gen_helper_debug(cpu_env
);
291 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
295 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
298 int label
= gen_new_label();
299 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
301 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
302 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
303 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
304 gen_set_label(label
);
307 /* Immediate conditional jump (bt or bf) */
308 static void gen_conditional_jump(DisasContext
* ctx
,
309 target_ulong ift
, target_ulong ifnott
)
314 l1
= gen_new_label();
316 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
317 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
318 gen_goto_tb(ctx
, 0, ifnott
);
320 gen_goto_tb(ctx
, 1, ift
);
323 /* Delayed conditional jump (bt or bf) */
324 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
329 l1
= gen_new_label();
331 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
332 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
333 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
335 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
339 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
344 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
345 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
346 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
351 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
356 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
357 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
358 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
363 static inline void gen_store_flags(uint32_t flags
)
365 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
366 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
369 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
371 TCGv tmp
= tcg_temp_new();
376 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
377 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
379 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
381 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
382 tcg_gen_or_i32(t0
, t0
, tmp
);
387 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
389 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
392 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
394 TCGv_i32 tmp
= tcg_temp_new_i32();
395 tcg_gen_trunc_i64_i32(tmp
, t
);
396 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
397 tcg_gen_shri_i64(t
, t
, 32);
398 tcg_gen_trunc_i64_i32(tmp
, t
);
399 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
400 tcg_temp_free_i32(tmp
);
403 #define B3_0 (ctx->opcode & 0xf)
404 #define B6_4 ((ctx->opcode >> 4) & 0x7)
405 #define B7_4 ((ctx->opcode >> 4) & 0xf)
406 #define B7_0 (ctx->opcode & 0xff)
407 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
408 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
409 (ctx->opcode & 0xfff))
410 #define B11_8 ((ctx->opcode >> 8) & 0xf)
411 #define B15_12 ((ctx->opcode >> 12) & 0xf)
413 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
414 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
416 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
417 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
419 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
420 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
421 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
422 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
424 #define CHECK_NOT_DELAY_SLOT \
425 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
427 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
428 gen_helper_raise_slot_illegal_instruction(cpu_env); \
429 ctx->bstate = BS_BRANCH; \
433 #define CHECK_PRIVILEGED \
434 if (IS_USER(ctx)) { \
435 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
436 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
437 gen_helper_raise_slot_illegal_instruction(cpu_env); \
439 gen_helper_raise_illegal_instruction(cpu_env); \
441 ctx->bstate = BS_BRANCH; \
445 #define CHECK_FPU_ENABLED \
446 if (ctx->flags & SR_FD) { \
447 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
448 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
449 gen_helper_raise_slot_fpu_disable(cpu_env); \
451 gen_helper_raise_fpu_disable(cpu_env); \
453 ctx->bstate = BS_BRANCH; \
457 static void _decode_opc(DisasContext
* ctx
)
459 /* This code tries to make movcal emulation sufficiently
460 accurate for Linux purposes. This instruction writes
461 memory, and prior to that, always allocates a cache line.
462 It is used in two contexts:
463 - in memcpy, where data is copied in blocks, the first write
464 of to a block uses movca.l for performance.
465 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
466 to flush the cache. Here, the data written by movcal.l is never
467 written to memory, and the data written is just bogus.
469 To simulate this, we simulate movcal.l, we store the value to memory,
470 but we also remember the previous content. If we see ocbi, we check
471 if movcal.l for that address was done previously. If so, the write should
472 not have hit the memory, so we restore the previous content.
473 When we see an instruction that is neither movca.l
474 nor ocbi, the previous content is discarded.
476 To optimize, we only try to flush stores when we're at the start of
477 TB, or if we already saw movca.l in this TB and did not flush stores
481 int opcode
= ctx
->opcode
& 0xf0ff;
482 if (opcode
!= 0x0093 /* ocbi */
483 && opcode
!= 0x00c3 /* movca.l */)
485 gen_helper_discard_movcal_backup(cpu_env
);
491 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
494 switch (ctx
->opcode
) {
495 case 0x0019: /* div0u */
496 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
498 case 0x000b: /* rts */
500 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
501 ctx
->flags
|= DELAY_SLOT
;
502 ctx
->delayed_pc
= (uint32_t) - 1;
504 case 0x0028: /* clrmac */
505 tcg_gen_movi_i32(cpu_mach
, 0);
506 tcg_gen_movi_i32(cpu_macl
, 0);
508 case 0x0048: /* clrs */
509 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
511 case 0x0008: /* clrt */
512 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
514 case 0x0038: /* ldtlb */
516 gen_helper_ldtlb(cpu_env
);
518 case 0x002b: /* rte */
521 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
522 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
523 ctx
->flags
|= DELAY_SLOT
;
524 ctx
->delayed_pc
= (uint32_t) - 1;
526 case 0x0058: /* sets */
527 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
529 case 0x0018: /* sett */
530 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
532 case 0xfbfd: /* frchg */
533 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
534 ctx
->bstate
= BS_STOP
;
536 case 0xf3fd: /* fschg */
537 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
538 ctx
->bstate
= BS_STOP
;
540 case 0x0009: /* nop */
542 case 0x001b: /* sleep */
544 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
545 gen_helper_sleep(cpu_env
);
549 switch (ctx
->opcode
& 0xf000) {
550 case 0x1000: /* mov.l Rm,@(disp,Rn) */
552 TCGv addr
= tcg_temp_new();
553 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
554 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
558 case 0x5000: /* mov.l @(disp,Rm),Rn */
560 TCGv addr
= tcg_temp_new();
561 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
562 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
566 case 0xe000: /* mov #imm,Rn */
567 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
569 case 0x9000: /* mov.w @(disp,PC),Rn */
571 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
572 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
576 case 0xd000: /* mov.l @(disp,PC),Rn */
578 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
579 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
583 case 0x7000: /* add #imm,Rn */
584 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
586 case 0xa000: /* bra disp */
588 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
589 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
590 ctx
->flags
|= DELAY_SLOT
;
592 case 0xb000: /* bsr disp */
594 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
595 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
596 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
597 ctx
->flags
|= DELAY_SLOT
;
601 switch (ctx
->opcode
& 0xf00f) {
602 case 0x6003: /* mov Rm,Rn */
603 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
605 case 0x2000: /* mov.b Rm,@Rn */
606 tcg_gen_qemu_st8(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
608 case 0x2001: /* mov.w Rm,@Rn */
609 tcg_gen_qemu_st16(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
611 case 0x2002: /* mov.l Rm,@Rn */
612 tcg_gen_qemu_st32(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
614 case 0x6000: /* mov.b @Rm,Rn */
615 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
617 case 0x6001: /* mov.w @Rm,Rn */
618 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
620 case 0x6002: /* mov.l @Rm,Rn */
621 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
623 case 0x2004: /* mov.b Rm,@-Rn */
625 TCGv addr
= tcg_temp_new();
626 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
627 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
); /* might cause re-execution */
628 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
632 case 0x2005: /* mov.w Rm,@-Rn */
634 TCGv addr
= tcg_temp_new();
635 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
636 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
637 tcg_gen_mov_i32(REG(B11_8
), addr
);
641 case 0x2006: /* mov.l Rm,@-Rn */
643 TCGv addr
= tcg_temp_new();
644 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
645 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
646 tcg_gen_mov_i32(REG(B11_8
), addr
);
649 case 0x6004: /* mov.b @Rm+,Rn */
650 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
652 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
654 case 0x6005: /* mov.w @Rm+,Rn */
655 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
657 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
659 case 0x6006: /* mov.l @Rm+,Rn */
660 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
662 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
664 case 0x0004: /* mov.b Rm,@(R0,Rn) */
666 TCGv addr
= tcg_temp_new();
667 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
668 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
);
672 case 0x0005: /* mov.w Rm,@(R0,Rn) */
674 TCGv addr
= tcg_temp_new();
675 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
676 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
680 case 0x0006: /* mov.l Rm,@(R0,Rn) */
682 TCGv addr
= tcg_temp_new();
683 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
684 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
688 case 0x000c: /* mov.b @(R0,Rm),Rn */
690 TCGv addr
= tcg_temp_new();
691 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
692 tcg_gen_qemu_ld8s(REG(B11_8
), addr
, ctx
->memidx
);
696 case 0x000d: /* mov.w @(R0,Rm),Rn */
698 TCGv addr
= tcg_temp_new();
699 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
700 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
704 case 0x000e: /* mov.l @(R0,Rm),Rn */
706 TCGv addr
= tcg_temp_new();
707 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
708 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
712 case 0x6008: /* swap.b Rm,Rn */
715 high
= tcg_temp_new();
716 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
717 low
= tcg_temp_new();
718 tcg_gen_ext16u_i32(low
, REG(B7_4
));
719 tcg_gen_bswap16_i32(low
, low
);
720 tcg_gen_or_i32(REG(B11_8
), high
, low
);
725 case 0x6009: /* swap.w Rm,Rn */
726 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
728 case 0x200d: /* xtrct Rm,Rn */
731 high
= tcg_temp_new();
732 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
733 low
= tcg_temp_new();
734 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
735 tcg_gen_or_i32(REG(B11_8
), high
, low
);
740 case 0x300c: /* add Rm,Rn */
741 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
743 case 0x300e: /* addc Rm,Rn */
747 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
749 tcg_gen_add_i32(t1
, REG(B7_4
), REG(B11_8
));
750 tcg_gen_add_i32(t0
, t0
, t1
);
752 tcg_gen_setcond_i32(TCG_COND_GTU
, t2
, REG(B11_8
), t1
);
753 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, t1
, t0
);
754 tcg_gen_or_i32(t1
, t1
, t2
);
756 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
757 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
759 tcg_gen_mov_i32(REG(B11_8
), t0
);
763 case 0x300f: /* addv Rm,Rn */
767 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
769 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
771 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
772 tcg_gen_andc_i32(t1
, t1
, t2
);
774 tcg_gen_shri_i32(t1
, t1
, 31);
775 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
776 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
778 tcg_gen_mov_i32(REG(B7_4
), t0
);
782 case 0x2009: /* and Rm,Rn */
783 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
785 case 0x3000: /* cmp/eq Rm,Rn */
786 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
788 case 0x3003: /* cmp/ge Rm,Rn */
789 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
791 case 0x3007: /* cmp/gt Rm,Rn */
792 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
794 case 0x3006: /* cmp/hi Rm,Rn */
795 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
797 case 0x3002: /* cmp/hs Rm,Rn */
798 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
800 case 0x200c: /* cmp/str Rm,Rn */
802 TCGv cmp1
= tcg_temp_new();
803 TCGv cmp2
= tcg_temp_new();
804 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
805 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
806 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
807 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
808 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
809 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
810 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
811 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
812 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
813 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
814 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
815 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
816 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
817 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
822 case 0x2007: /* div0s Rm,Rn */
824 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
825 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
826 TCGv val
= tcg_temp_new();
827 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
828 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
832 case 0x3004: /* div1 Rm,Rn */
833 gen_helper_div1(REG(B11_8
), cpu_env
, REG(B7_4
), REG(B11_8
));
835 case 0x300d: /* dmuls.l Rm,Rn */
837 TCGv_i64 tmp1
= tcg_temp_new_i64();
838 TCGv_i64 tmp2
= tcg_temp_new_i64();
840 tcg_gen_ext_i32_i64(tmp1
, REG(B7_4
));
841 tcg_gen_ext_i32_i64(tmp2
, REG(B11_8
));
842 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
843 tcg_gen_trunc_i64_i32(cpu_macl
, tmp1
);
844 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
845 tcg_gen_trunc_i64_i32(cpu_mach
, tmp1
);
847 tcg_temp_free_i64(tmp2
);
848 tcg_temp_free_i64(tmp1
);
851 case 0x3005: /* dmulu.l Rm,Rn */
853 TCGv_i64 tmp1
= tcg_temp_new_i64();
854 TCGv_i64 tmp2
= tcg_temp_new_i64();
856 tcg_gen_extu_i32_i64(tmp1
, REG(B7_4
));
857 tcg_gen_extu_i32_i64(tmp2
, REG(B11_8
));
858 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
859 tcg_gen_trunc_i64_i32(cpu_macl
, tmp1
);
860 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
861 tcg_gen_trunc_i64_i32(cpu_mach
, tmp1
);
863 tcg_temp_free_i64(tmp2
);
864 tcg_temp_free_i64(tmp1
);
867 case 0x600e: /* exts.b Rm,Rn */
868 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
870 case 0x600f: /* exts.w Rm,Rn */
871 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
873 case 0x600c: /* extu.b Rm,Rn */
874 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
876 case 0x600d: /* extu.w Rm,Rn */
877 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
879 case 0x000f: /* mac.l @Rm+,@Rn+ */
882 arg0
= tcg_temp_new();
883 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
884 arg1
= tcg_temp_new();
885 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
886 gen_helper_macl(cpu_env
, arg0
, arg1
);
889 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
890 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
893 case 0x400f: /* mac.w @Rm+,@Rn+ */
896 arg0
= tcg_temp_new();
897 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
898 arg1
= tcg_temp_new();
899 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
900 gen_helper_macw(cpu_env
, arg0
, arg1
);
903 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
904 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
907 case 0x0007: /* mul.l Rm,Rn */
908 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
910 case 0x200f: /* muls.w Rm,Rn */
913 arg0
= tcg_temp_new();
914 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
915 arg1
= tcg_temp_new();
916 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
917 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
922 case 0x200e: /* mulu.w Rm,Rn */
925 arg0
= tcg_temp_new();
926 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
927 arg1
= tcg_temp_new();
928 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
929 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
934 case 0x600b: /* neg Rm,Rn */
935 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
937 case 0x600a: /* negc Rm,Rn */
941 tcg_gen_neg_i32(t0
, REG(B7_4
));
943 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
944 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
945 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
946 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
947 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
948 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
949 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
954 case 0x6007: /* not Rm,Rn */
955 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
957 case 0x200b: /* or Rm,Rn */
958 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
960 case 0x400c: /* shad Rm,Rn */
962 int label1
= gen_new_label();
963 int label2
= gen_new_label();
964 int label3
= gen_new_label();
965 int label4
= gen_new_label();
967 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
968 /* Rm positive, shift to the left */
969 shift
= tcg_temp_new();
970 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
971 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
972 tcg_temp_free(shift
);
974 /* Rm negative, shift to the right */
975 gen_set_label(label1
);
976 shift
= tcg_temp_new();
977 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
978 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
979 tcg_gen_not_i32(shift
, REG(B7_4
));
980 tcg_gen_andi_i32(shift
, shift
, 0x1f);
981 tcg_gen_addi_i32(shift
, shift
, 1);
982 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
983 tcg_temp_free(shift
);
986 gen_set_label(label2
);
987 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
988 tcg_gen_movi_i32(REG(B11_8
), 0);
990 gen_set_label(label3
);
991 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
992 gen_set_label(label4
);
995 case 0x400d: /* shld Rm,Rn */
997 int label1
= gen_new_label();
998 int label2
= gen_new_label();
999 int label3
= gen_new_label();
1001 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
1002 /* Rm positive, shift to the left */
1003 shift
= tcg_temp_new();
1004 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
1005 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
1006 tcg_temp_free(shift
);
1008 /* Rm negative, shift to the right */
1009 gen_set_label(label1
);
1010 shift
= tcg_temp_new();
1011 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
1012 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
1013 tcg_gen_not_i32(shift
, REG(B7_4
));
1014 tcg_gen_andi_i32(shift
, shift
, 0x1f);
1015 tcg_gen_addi_i32(shift
, shift
, 1);
1016 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
1017 tcg_temp_free(shift
);
1020 gen_set_label(label2
);
1021 tcg_gen_movi_i32(REG(B11_8
), 0);
1022 gen_set_label(label3
);
1025 case 0x3008: /* sub Rm,Rn */
1026 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1028 case 0x300a: /* subc Rm,Rn */
1031 t0
= tcg_temp_new();
1032 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
1033 t1
= tcg_temp_new();
1034 tcg_gen_sub_i32(t1
, REG(B11_8
), REG(B7_4
));
1035 tcg_gen_sub_i32(t0
, t1
, t0
);
1036 t2
= tcg_temp_new();
1037 tcg_gen_setcond_i32(TCG_COND_LTU
, t2
, REG(B11_8
), t1
);
1038 tcg_gen_setcond_i32(TCG_COND_LTU
, t1
, t1
, t0
);
1039 tcg_gen_or_i32(t1
, t1
, t2
);
1041 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1042 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
1044 tcg_gen_mov_i32(REG(B11_8
), t0
);
1048 case 0x300b: /* subv Rm,Rn */
1051 t0
= tcg_temp_new();
1052 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
1053 t1
= tcg_temp_new();
1054 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
1055 t2
= tcg_temp_new();
1056 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
1057 tcg_gen_and_i32(t1
, t1
, t2
);
1059 tcg_gen_shri_i32(t1
, t1
, 31);
1060 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1061 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
1063 tcg_gen_mov_i32(REG(B11_8
), t0
);
1067 case 0x2008: /* tst Rm,Rn */
1069 TCGv val
= tcg_temp_new();
1070 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
1071 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1075 case 0x200a: /* xor Rm,Rn */
1076 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1078 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1080 if (ctx
->flags
& FPSCR_SZ
) {
1081 TCGv_i64 fp
= tcg_temp_new_i64();
1082 gen_load_fpr64(fp
, XREG(B7_4
));
1083 gen_store_fpr64(fp
, XREG(B11_8
));
1084 tcg_temp_free_i64(fp
);
1086 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1089 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1091 if (ctx
->flags
& FPSCR_SZ
) {
1092 TCGv addr_hi
= tcg_temp_new();
1093 int fr
= XREG(B7_4
);
1094 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
1095 tcg_gen_qemu_st32(cpu_fregs
[fr
], REG(B11_8
), ctx
->memidx
);
1096 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1097 tcg_temp_free(addr_hi
);
1099 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
), ctx
->memidx
);
1102 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1104 if (ctx
->flags
& FPSCR_SZ
) {
1105 TCGv addr_hi
= tcg_temp_new();
1106 int fr
= XREG(B11_8
);
1107 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1108 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1109 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1110 tcg_temp_free(addr_hi
);
1112 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1115 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1117 if (ctx
->flags
& FPSCR_SZ
) {
1118 TCGv addr_hi
= tcg_temp_new();
1119 int fr
= XREG(B11_8
);
1120 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1121 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1122 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1123 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1124 tcg_temp_free(addr_hi
);
1126 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1127 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1130 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1132 if (ctx
->flags
& FPSCR_SZ
) {
1133 TCGv addr
= tcg_temp_new_i32();
1134 int fr
= XREG(B7_4
);
1135 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1136 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1137 tcg_gen_subi_i32(addr
, addr
, 4);
1138 tcg_gen_qemu_st32(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1139 tcg_gen_mov_i32(REG(B11_8
), addr
);
1140 tcg_temp_free(addr
);
1143 addr
= tcg_temp_new_i32();
1144 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1145 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1146 tcg_gen_mov_i32(REG(B11_8
), addr
);
1147 tcg_temp_free(addr
);
1150 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1153 TCGv addr
= tcg_temp_new_i32();
1154 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1155 if (ctx
->flags
& FPSCR_SZ
) {
1156 int fr
= XREG(B11_8
);
1157 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1158 tcg_gen_addi_i32(addr
, addr
, 4);
1159 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1161 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], addr
, ctx
->memidx
);
1163 tcg_temp_free(addr
);
1166 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1169 TCGv addr
= tcg_temp_new();
1170 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1171 if (ctx
->flags
& FPSCR_SZ
) {
1172 int fr
= XREG(B7_4
);
1173 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1174 tcg_gen_addi_i32(addr
, addr
, 4);
1175 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1177 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1179 tcg_temp_free(addr
);
1182 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1183 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1184 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1185 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1186 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1187 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1190 if (ctx
->flags
& FPSCR_PR
) {
1193 if (ctx
->opcode
& 0x0110)
1194 break; /* illegal instruction */
1195 fp0
= tcg_temp_new_i64();
1196 fp1
= tcg_temp_new_i64();
1197 gen_load_fpr64(fp0
, DREG(B11_8
));
1198 gen_load_fpr64(fp1
, DREG(B7_4
));
1199 switch (ctx
->opcode
& 0xf00f) {
1200 case 0xf000: /* fadd Rm,Rn */
1201 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1203 case 0xf001: /* fsub Rm,Rn */
1204 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1206 case 0xf002: /* fmul Rm,Rn */
1207 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1209 case 0xf003: /* fdiv Rm,Rn */
1210 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1212 case 0xf004: /* fcmp/eq Rm,Rn */
1213 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1215 case 0xf005: /* fcmp/gt Rm,Rn */
1216 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1219 gen_store_fpr64(fp0
, DREG(B11_8
));
1220 tcg_temp_free_i64(fp0
);
1221 tcg_temp_free_i64(fp1
);
1223 switch (ctx
->opcode
& 0xf00f) {
1224 case 0xf000: /* fadd Rm,Rn */
1225 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1226 cpu_fregs
[FREG(B11_8
)],
1227 cpu_fregs
[FREG(B7_4
)]);
1229 case 0xf001: /* fsub Rm,Rn */
1230 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1231 cpu_fregs
[FREG(B11_8
)],
1232 cpu_fregs
[FREG(B7_4
)]);
1234 case 0xf002: /* fmul Rm,Rn */
1235 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1236 cpu_fregs
[FREG(B11_8
)],
1237 cpu_fregs
[FREG(B7_4
)]);
1239 case 0xf003: /* fdiv Rm,Rn */
1240 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1241 cpu_fregs
[FREG(B11_8
)],
1242 cpu_fregs
[FREG(B7_4
)]);
1244 case 0xf004: /* fcmp/eq Rm,Rn */
1245 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1246 cpu_fregs
[FREG(B7_4
)]);
1248 case 0xf005: /* fcmp/gt Rm,Rn */
1249 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1250 cpu_fregs
[FREG(B7_4
)]);
1256 case 0xf00e: /* fmac FR0,RM,Rn */
1259 if (ctx
->flags
& FPSCR_PR
) {
1260 break; /* illegal instruction */
1262 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1263 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1264 cpu_fregs
[FREG(B11_8
)]);
1270 switch (ctx
->opcode
& 0xff00) {
1271 case 0xc900: /* and #imm,R0 */
1272 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1274 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1277 addr
= tcg_temp_new();
1278 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1279 val
= tcg_temp_new();
1280 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1281 tcg_gen_andi_i32(val
, val
, B7_0
);
1282 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1284 tcg_temp_free(addr
);
1287 case 0x8b00: /* bf label */
1288 CHECK_NOT_DELAY_SLOT
1289 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1290 ctx
->pc
+ 4 + B7_0s
* 2);
1291 ctx
->bstate
= BS_BRANCH
;
1293 case 0x8f00: /* bf/s label */
1294 CHECK_NOT_DELAY_SLOT
1295 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1296 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1298 case 0x8900: /* bt label */
1299 CHECK_NOT_DELAY_SLOT
1300 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1302 ctx
->bstate
= BS_BRANCH
;
1304 case 0x8d00: /* bt/s label */
1305 CHECK_NOT_DELAY_SLOT
1306 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1307 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1309 case 0x8800: /* cmp/eq #imm,R0 */
1310 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1312 case 0xc400: /* mov.b @(disp,GBR),R0 */
1314 TCGv addr
= tcg_temp_new();
1315 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1316 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1317 tcg_temp_free(addr
);
1320 case 0xc500: /* mov.w @(disp,GBR),R0 */
1322 TCGv addr
= tcg_temp_new();
1323 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1324 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1325 tcg_temp_free(addr
);
1328 case 0xc600: /* mov.l @(disp,GBR),R0 */
1330 TCGv addr
= tcg_temp_new();
1331 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1332 tcg_gen_qemu_ld32s(REG(0), addr
, ctx
->memidx
);
1333 tcg_temp_free(addr
);
1336 case 0xc000: /* mov.b R0,@(disp,GBR) */
1338 TCGv addr
= tcg_temp_new();
1339 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1340 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1341 tcg_temp_free(addr
);
1344 case 0xc100: /* mov.w R0,@(disp,GBR) */
1346 TCGv addr
= tcg_temp_new();
1347 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1348 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1349 tcg_temp_free(addr
);
1352 case 0xc200: /* mov.l R0,@(disp,GBR) */
1354 TCGv addr
= tcg_temp_new();
1355 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1356 tcg_gen_qemu_st32(REG(0), addr
, ctx
->memidx
);
1357 tcg_temp_free(addr
);
1360 case 0x8000: /* mov.b R0,@(disp,Rn) */
1362 TCGv addr
= tcg_temp_new();
1363 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1364 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1365 tcg_temp_free(addr
);
1368 case 0x8100: /* mov.w R0,@(disp,Rn) */
1370 TCGv addr
= tcg_temp_new();
1371 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1372 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1373 tcg_temp_free(addr
);
1376 case 0x8400: /* mov.b @(disp,Rn),R0 */
1378 TCGv addr
= tcg_temp_new();
1379 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1380 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1381 tcg_temp_free(addr
);
1384 case 0x8500: /* mov.w @(disp,Rn),R0 */
1386 TCGv addr
= tcg_temp_new();
1387 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1388 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1389 tcg_temp_free(addr
);
1392 case 0xc700: /* mova @(disp,PC),R0 */
1393 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1395 case 0xcb00: /* or #imm,R0 */
1396 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1398 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1401 addr
= tcg_temp_new();
1402 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1403 val
= tcg_temp_new();
1404 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1405 tcg_gen_ori_i32(val
, val
, B7_0
);
1406 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1408 tcg_temp_free(addr
);
1411 case 0xc300: /* trapa #imm */
1414 CHECK_NOT_DELAY_SLOT
1415 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1416 imm
= tcg_const_i32(B7_0
);
1417 gen_helper_trapa(cpu_env
, imm
);
1419 ctx
->bstate
= BS_BRANCH
;
1422 case 0xc800: /* tst #imm,R0 */
1424 TCGv val
= tcg_temp_new();
1425 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1426 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1430 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1432 TCGv val
= tcg_temp_new();
1433 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1434 tcg_gen_qemu_ld8u(val
, val
, ctx
->memidx
);
1435 tcg_gen_andi_i32(val
, val
, B7_0
);
1436 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1440 case 0xca00: /* xor #imm,R0 */
1441 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1443 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1446 addr
= tcg_temp_new();
1447 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1448 val
= tcg_temp_new();
1449 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1450 tcg_gen_xori_i32(val
, val
, B7_0
);
1451 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1453 tcg_temp_free(addr
);
1458 switch (ctx
->opcode
& 0xf08f) {
1459 case 0x408e: /* ldc Rm,Rn_BANK */
1461 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1463 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1465 tcg_gen_qemu_ld32s(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
);
1466 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1468 case 0x0082: /* stc Rm_BANK,Rn */
1470 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1472 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1475 TCGv addr
= tcg_temp_new();
1476 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1477 tcg_gen_qemu_st32(ALTREG(B6_4
), addr
, ctx
->memidx
);
1478 tcg_gen_mov_i32(REG(B11_8
), addr
);
1479 tcg_temp_free(addr
);
1484 switch (ctx
->opcode
& 0xf0ff) {
1485 case 0x0023: /* braf Rn */
1486 CHECK_NOT_DELAY_SLOT
1487 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1488 ctx
->flags
|= DELAY_SLOT
;
1489 ctx
->delayed_pc
= (uint32_t) - 1;
1491 case 0x0003: /* bsrf Rn */
1492 CHECK_NOT_DELAY_SLOT
1493 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1494 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1495 ctx
->flags
|= DELAY_SLOT
;
1496 ctx
->delayed_pc
= (uint32_t) - 1;
1498 case 0x4015: /* cmp/pl Rn */
1499 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1501 case 0x4011: /* cmp/pz Rn */
1502 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1504 case 0x4010: /* dt Rn */
1505 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1506 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1508 case 0x402b: /* jmp @Rn */
1509 CHECK_NOT_DELAY_SLOT
1510 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1511 ctx
->flags
|= DELAY_SLOT
;
1512 ctx
->delayed_pc
= (uint32_t) - 1;
1514 case 0x400b: /* jsr @Rn */
1515 CHECK_NOT_DELAY_SLOT
1516 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1517 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1518 ctx
->flags
|= DELAY_SLOT
;
1519 ctx
->delayed_pc
= (uint32_t) - 1;
1521 case 0x400e: /* ldc Rm,SR */
1523 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1524 ctx
->bstate
= BS_STOP
;
1526 case 0x4007: /* ldc.l @Rm+,SR */
1529 TCGv val
= tcg_temp_new();
1530 tcg_gen_qemu_ld32s(val
, REG(B11_8
), ctx
->memidx
);
1531 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1533 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1534 ctx
->bstate
= BS_STOP
;
1537 case 0x0002: /* stc SR,Rn */
1539 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1541 case 0x4003: /* stc SR,@-Rn */
1544 TCGv addr
= tcg_temp_new();
1545 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1546 tcg_gen_qemu_st32(cpu_sr
, addr
, ctx
->memidx
);
1547 tcg_gen_mov_i32(REG(B11_8
), addr
);
1548 tcg_temp_free(addr
);
1551 #define LD(reg,ldnum,ldpnum,prechk) \
1554 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1558 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1559 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1561 #define ST(reg,stnum,stpnum,prechk) \
1564 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1569 TCGv addr = tcg_temp_new(); \
1570 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1571 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1572 tcg_gen_mov_i32(REG(B11_8), addr); \
1573 tcg_temp_free(addr); \
1576 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1577 LD(reg,ldnum,ldpnum,prechk) \
1578 ST(reg,stnum,stpnum,prechk)
1579 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1580 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1581 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1582 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1583 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1584 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1585 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1586 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1587 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1588 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1589 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1590 case 0x406a: /* lds Rm,FPSCR */
1592 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1593 ctx
->bstate
= BS_STOP
;
1595 case 0x4066: /* lds.l @Rm+,FPSCR */
1598 TCGv addr
= tcg_temp_new();
1599 tcg_gen_qemu_ld32s(addr
, REG(B11_8
), ctx
->memidx
);
1600 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1601 gen_helper_ld_fpscr(cpu_env
, addr
);
1602 tcg_temp_free(addr
);
1603 ctx
->bstate
= BS_STOP
;
1606 case 0x006a: /* sts FPSCR,Rn */
1608 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1610 case 0x4062: /* sts FPSCR,@-Rn */
1614 val
= tcg_temp_new();
1615 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1616 addr
= tcg_temp_new();
1617 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1618 tcg_gen_qemu_st32(val
, addr
, ctx
->memidx
);
1619 tcg_gen_mov_i32(REG(B11_8
), addr
);
1620 tcg_temp_free(addr
);
1624 case 0x00c3: /* movca.l R0,@Rm */
1626 TCGv val
= tcg_temp_new();
1627 tcg_gen_qemu_ld32u(val
, REG(B11_8
), ctx
->memidx
);
1628 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1629 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1631 ctx
->has_movcal
= 1;
1634 /* MOVUA.L @Rm,R0 (Rm) -> R0
1635 Load non-boundary-aligned data */
1636 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1639 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1640 Load non-boundary-aligned data */
1641 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1642 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1644 case 0x0029: /* movt Rn */
1645 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1650 If (T == 1) R0 -> (Rn)
1653 if (ctx
->features
& SH_FEATURE_SH4A
) {
1654 int label
= gen_new_label();
1655 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1656 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1657 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1658 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1659 gen_set_label(label
);
1660 tcg_gen_movi_i32(cpu_ldst
, 0);
1668 When interrupt/exception
1671 if (ctx
->features
& SH_FEATURE_SH4A
) {
1672 tcg_gen_movi_i32(cpu_ldst
, 0);
1673 tcg_gen_qemu_ld32s(REG(0), REG(B11_8
), ctx
->memidx
);
1674 tcg_gen_movi_i32(cpu_ldst
, 1);
1678 case 0x0093: /* ocbi @Rn */
1680 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1683 case 0x00a3: /* ocbp @Rn */
1684 case 0x00b3: /* ocbwb @Rn */
1685 /* These instructions are supposed to do nothing in case of
1686 a cache miss. Given that we only partially emulate caches
1687 it is safe to simply ignore them. */
1689 case 0x0083: /* pref @Rn */
1691 case 0x00d3: /* prefi @Rn */
1692 if (ctx
->features
& SH_FEATURE_SH4A
)
1696 case 0x00e3: /* icbi @Rn */
1697 if (ctx
->features
& SH_FEATURE_SH4A
)
1701 case 0x00ab: /* synco */
1702 if (ctx
->features
& SH_FEATURE_SH4A
)
1706 case 0x4024: /* rotcl Rn */
1708 TCGv tmp
= tcg_temp_new();
1709 tcg_gen_mov_i32(tmp
, cpu_sr
);
1710 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1711 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1712 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1716 case 0x4025: /* rotcr Rn */
1718 TCGv tmp
= tcg_temp_new();
1719 tcg_gen_mov_i32(tmp
, cpu_sr
);
1720 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1721 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1722 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1726 case 0x4004: /* rotl Rn */
1727 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1728 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1730 case 0x4005: /* rotr Rn */
1731 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1732 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1734 case 0x4000: /* shll Rn */
1735 case 0x4020: /* shal Rn */
1736 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1737 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1739 case 0x4021: /* shar Rn */
1740 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1741 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1743 case 0x4001: /* shlr Rn */
1744 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1745 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1747 case 0x4008: /* shll2 Rn */
1748 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1750 case 0x4018: /* shll8 Rn */
1751 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1753 case 0x4028: /* shll16 Rn */
1754 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1756 case 0x4009: /* shlr2 Rn */
1757 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1759 case 0x4019: /* shlr8 Rn */
1760 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1762 case 0x4029: /* shlr16 Rn */
1763 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1765 case 0x401b: /* tas.b @Rn */
1768 addr
= tcg_temp_local_new();
1769 tcg_gen_mov_i32(addr
, REG(B11_8
));
1770 val
= tcg_temp_local_new();
1771 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1772 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1773 tcg_gen_ori_i32(val
, val
, 0x80);
1774 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1776 tcg_temp_free(addr
);
1779 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1781 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1783 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1785 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1787 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1789 if (ctx
->flags
& FPSCR_PR
) {
1791 if (ctx
->opcode
& 0x0100)
1792 break; /* illegal instruction */
1793 fp
= tcg_temp_new_i64();
1794 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1795 gen_store_fpr64(fp
, DREG(B11_8
));
1796 tcg_temp_free_i64(fp
);
1799 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1802 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1804 if (ctx
->flags
& FPSCR_PR
) {
1806 if (ctx
->opcode
& 0x0100)
1807 break; /* illegal instruction */
1808 fp
= tcg_temp_new_i64();
1809 gen_load_fpr64(fp
, DREG(B11_8
));
1810 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1811 tcg_temp_free_i64(fp
);
1814 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1817 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1820 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1823 case 0xf05d: /* fabs FRn/DRn */
1825 if (ctx
->flags
& FPSCR_PR
) {
1826 if (ctx
->opcode
& 0x0100)
1827 break; /* illegal instruction */
1828 TCGv_i64 fp
= tcg_temp_new_i64();
1829 gen_load_fpr64(fp
, DREG(B11_8
));
1830 gen_helper_fabs_DT(fp
, fp
);
1831 gen_store_fpr64(fp
, DREG(B11_8
));
1832 tcg_temp_free_i64(fp
);
1834 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1837 case 0xf06d: /* fsqrt FRn */
1839 if (ctx
->flags
& FPSCR_PR
) {
1840 if (ctx
->opcode
& 0x0100)
1841 break; /* illegal instruction */
1842 TCGv_i64 fp
= tcg_temp_new_i64();
1843 gen_load_fpr64(fp
, DREG(B11_8
));
1844 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1845 gen_store_fpr64(fp
, DREG(B11_8
));
1846 tcg_temp_free_i64(fp
);
1848 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1849 cpu_fregs
[FREG(B11_8
)]);
1852 case 0xf07d: /* fsrra FRn */
1855 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1857 if (!(ctx
->flags
& FPSCR_PR
)) {
1858 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1861 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1863 if (!(ctx
->flags
& FPSCR_PR
)) {
1864 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1867 case 0xf0ad: /* fcnvsd FPUL,DRn */
1870 TCGv_i64 fp
= tcg_temp_new_i64();
1871 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1872 gen_store_fpr64(fp
, DREG(B11_8
));
1873 tcg_temp_free_i64(fp
);
1876 case 0xf0bd: /* fcnvds DRn,FPUL */
1879 TCGv_i64 fp
= tcg_temp_new_i64();
1880 gen_load_fpr64(fp
, DREG(B11_8
));
1881 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1882 tcg_temp_free_i64(fp
);
1885 case 0xf0ed: /* fipr FVm,FVn */
1887 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1889 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1890 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1891 gen_helper_fipr(cpu_env
, m
, n
);
1897 case 0xf0fd: /* ftrv XMTRX,FVn */
1899 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1900 (ctx
->flags
& FPSCR_PR
) == 0) {
1902 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1903 gen_helper_ftrv(cpu_env
, n
);
1910 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1911 ctx
->opcode
, ctx
->pc
);
1914 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1915 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1916 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1918 gen_helper_raise_illegal_instruction(cpu_env
);
1920 ctx
->bstate
= BS_BRANCH
;
1923 static void decode_opc(DisasContext
* ctx
)
1925 uint32_t old_flags
= ctx
->flags
;
1927 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1928 tcg_gen_debug_insn_start(ctx
->pc
);
1933 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1934 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1937 /* go out of the delay slot */
1938 uint32_t new_flags
= ctx
->flags
;
1939 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1940 gen_store_flags(new_flags
);
1943 ctx
->bstate
= BS_BRANCH
;
1944 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1945 gen_delayed_conditional_jump(ctx
);
1946 } else if (old_flags
& DELAY_SLOT
) {
1952 /* go into a delay slot */
1953 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1954 gen_store_flags(ctx
->flags
);
1958 gen_intermediate_code_internal(CPUSH4State
* env
, TranslationBlock
* tb
,
1962 target_ulong pc_start
;
1963 static uint16_t *gen_opc_end
;
1970 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1972 ctx
.flags
= (uint32_t)tb
->flags
;
1973 ctx
.bstate
= BS_NONE
;
1974 ctx
.memidx
= (ctx
.flags
& SR_MD
) == 0 ? 1 : 0;
1975 /* We don't know if the delayed pc came from a dynamic or static branch,
1976 so assume it is a dynamic branch. */
1977 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1979 ctx
.singlestep_enabled
= env
->singlestep_enabled
;
1980 ctx
.features
= env
->features
;
1981 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1985 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1987 max_insns
= CF_COUNT_MASK
;
1989 while (ctx
.bstate
== BS_NONE
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
) {
1990 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
1991 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1992 if (ctx
.pc
== bp
->pc
) {
1993 /* We have hit a breakpoint - make sure PC is up-to-date */
1994 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1995 gen_helper_debug(cpu_env
);
1996 ctx
.bstate
= BS_BRANCH
;
2002 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
2006 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
2008 tcg_ctx
.gen_opc_pc
[ii
] = ctx
.pc
;
2009 gen_opc_hflags
[ii
] = ctx
.flags
;
2010 tcg_ctx
.gen_opc_instr_start
[ii
] = 1;
2011 tcg_ctx
.gen_opc_icount
[ii
] = num_insns
;
2013 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
2016 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
2019 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
2023 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
2025 if (env
->singlestep_enabled
)
2027 if (num_insns
>= max_insns
)
2032 if (tb
->cflags
& CF_LAST_IO
)
2034 if (env
->singlestep_enabled
) {
2035 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
2036 gen_helper_debug(cpu_env
);
2038 switch (ctx
.bstate
) {
2040 /* gen_op_interrupt_restart(); */
2044 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
2046 gen_goto_tb(&ctx
, 0, ctx
.pc
);
2049 /* gen_op_interrupt_restart(); */
2058 gen_icount_end(tb
, num_insns
);
2059 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
2061 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
2064 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
2066 tb
->size
= ctx
.pc
- pc_start
;
2067 tb
->icount
= num_insns
;
2071 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2072 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2073 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
2079 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2081 gen_intermediate_code_internal(env
, tb
, 0);
2084 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2086 gen_intermediate_code_internal(env
, tb
, 1);
2089 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
2091 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
2092 env
->flags
= gen_opc_hflags
[pc_pos
];