4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 typedef struct DisasContext
{
37 struct TranslationBlock
*tb
;
40 uint32_t tbflags
; /* should stay unmodified during the TB translation */
41 uint32_t envflags
; /* should stay in sync with env->flags using TCG ops */
47 int singlestep_enabled
;
52 #if defined(CONFIG_USER_ONLY)
53 #define IS_USER(ctx) 1
55 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
59 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
62 BS_STOP
= 1, /* We want to stop translation for any reason */
63 BS_BRANCH
= 2, /* We reached a branch condition */
64 BS_EXCP
= 3, /* We reached an exception condition */
67 /* global register indexes */
68 static TCGv_env cpu_env
;
69 static TCGv cpu_gregs
[32];
70 static TCGv cpu_sr
, cpu_sr_m
, cpu_sr_q
, cpu_sr_t
;
71 static TCGv cpu_pc
, cpu_ssr
, cpu_spc
, cpu_gbr
;
72 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
73 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
74 static TCGv cpu_fregs
[32];
76 /* internal register indexes */
77 static TCGv cpu_flags
, cpu_delayed_pc
, cpu_delayed_cond
;
79 #include "exec/gen-icount.h"
81 void sh4_translate_init(void)
84 static int done_init
= 0;
85 static const char * const gregnames
[24] = {
86 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
87 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
88 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
89 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
90 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
92 static const char * const fregnames
[32] = {
93 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
94 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
95 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
96 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
97 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
98 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
99 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
100 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
107 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
108 tcg_ctx
.tcg_env
= cpu_env
;
110 for (i
= 0; i
< 24; i
++) {
111 cpu_gregs
[i
] = tcg_global_mem_new_i32(cpu_env
,
112 offsetof(CPUSH4State
, gregs
[i
]),
115 memcpy(cpu_gregs
+ 24, cpu_gregs
+ 8, 8 * sizeof(TCGv
));
117 cpu_pc
= tcg_global_mem_new_i32(cpu_env
,
118 offsetof(CPUSH4State
, pc
), "PC");
119 cpu_sr
= tcg_global_mem_new_i32(cpu_env
,
120 offsetof(CPUSH4State
, sr
), "SR");
121 cpu_sr_m
= tcg_global_mem_new_i32(cpu_env
,
122 offsetof(CPUSH4State
, sr_m
), "SR_M");
123 cpu_sr_q
= tcg_global_mem_new_i32(cpu_env
,
124 offsetof(CPUSH4State
, sr_q
), "SR_Q");
125 cpu_sr_t
= tcg_global_mem_new_i32(cpu_env
,
126 offsetof(CPUSH4State
, sr_t
), "SR_T");
127 cpu_ssr
= tcg_global_mem_new_i32(cpu_env
,
128 offsetof(CPUSH4State
, ssr
), "SSR");
129 cpu_spc
= tcg_global_mem_new_i32(cpu_env
,
130 offsetof(CPUSH4State
, spc
), "SPC");
131 cpu_gbr
= tcg_global_mem_new_i32(cpu_env
,
132 offsetof(CPUSH4State
, gbr
), "GBR");
133 cpu_vbr
= tcg_global_mem_new_i32(cpu_env
,
134 offsetof(CPUSH4State
, vbr
), "VBR");
135 cpu_sgr
= tcg_global_mem_new_i32(cpu_env
,
136 offsetof(CPUSH4State
, sgr
), "SGR");
137 cpu_dbr
= tcg_global_mem_new_i32(cpu_env
,
138 offsetof(CPUSH4State
, dbr
), "DBR");
139 cpu_mach
= tcg_global_mem_new_i32(cpu_env
,
140 offsetof(CPUSH4State
, mach
), "MACH");
141 cpu_macl
= tcg_global_mem_new_i32(cpu_env
,
142 offsetof(CPUSH4State
, macl
), "MACL");
143 cpu_pr
= tcg_global_mem_new_i32(cpu_env
,
144 offsetof(CPUSH4State
, pr
), "PR");
145 cpu_fpscr
= tcg_global_mem_new_i32(cpu_env
,
146 offsetof(CPUSH4State
, fpscr
), "FPSCR");
147 cpu_fpul
= tcg_global_mem_new_i32(cpu_env
,
148 offsetof(CPUSH4State
, fpul
), "FPUL");
150 cpu_flags
= tcg_global_mem_new_i32(cpu_env
,
151 offsetof(CPUSH4State
, flags
), "_flags_");
152 cpu_delayed_pc
= tcg_global_mem_new_i32(cpu_env
,
153 offsetof(CPUSH4State
, delayed_pc
),
155 cpu_delayed_cond
= tcg_global_mem_new_i32(cpu_env
,
156 offsetof(CPUSH4State
,
159 cpu_ldst
= tcg_global_mem_new_i32(cpu_env
,
160 offsetof(CPUSH4State
, ldst
), "_ldst_");
162 for (i
= 0; i
< 32; i
++)
163 cpu_fregs
[i
] = tcg_global_mem_new_i32(cpu_env
,
164 offsetof(CPUSH4State
, fregs
[i
]),
170 void superh_cpu_dump_state(CPUState
*cs
, FILE *f
,
171 fprintf_function cpu_fprintf
, int flags
)
173 SuperHCPU
*cpu
= SUPERH_CPU(cs
);
174 CPUSH4State
*env
= &cpu
->env
;
176 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
177 env
->pc
, cpu_read_sr(env
), env
->pr
, env
->fpscr
);
178 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
179 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
180 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
181 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
182 for (i
= 0; i
< 24; i
+= 4) {
183 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
184 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
185 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
187 if (env
->flags
& DELAY_SLOT
) {
188 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
190 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
191 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
193 } else if (env
->flags
& DELAY_SLOT_RTE
) {
194 cpu_fprintf(f
, "in rte delay slot (delayed_pc=0x%08x)\n",
199 static void gen_read_sr(TCGv dst
)
201 TCGv t0
= tcg_temp_new();
202 tcg_gen_shli_i32(t0
, cpu_sr_q
, SR_Q
);
203 tcg_gen_or_i32(dst
, dst
, t0
);
204 tcg_gen_shli_i32(t0
, cpu_sr_m
, SR_M
);
205 tcg_gen_or_i32(dst
, dst
, t0
);
206 tcg_gen_shli_i32(t0
, cpu_sr_t
, SR_T
);
207 tcg_gen_or_i32(dst
, cpu_sr
, t0
);
208 tcg_temp_free_i32(t0
);
211 static void gen_write_sr(TCGv src
)
213 tcg_gen_andi_i32(cpu_sr
, src
,
214 ~((1u << SR_Q
) | (1u << SR_M
) | (1u << SR_T
)));
215 tcg_gen_extract_i32(cpu_sr_q
, src
, SR_Q
, 1);
216 tcg_gen_extract_i32(cpu_sr_m
, src
, SR_M
, 1);
217 tcg_gen_extract_i32(cpu_sr_t
, src
, SR_T
, 1);
220 static inline void gen_save_cpu_state(DisasContext
*ctx
, bool save_pc
)
223 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
225 if (ctx
->delayed_pc
!= (uint32_t) -1) {
226 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
228 if ((ctx
->tbflags
& TB_FLAG_ENVFLAGS_MASK
) != ctx
->envflags
) {
229 tcg_gen_movi_i32(cpu_flags
, ctx
->envflags
);
233 static inline bool use_exit_tb(DisasContext
*ctx
)
235 return (ctx
->tbflags
& GUSA_EXCLUSIVE
) != 0;
238 static inline bool use_goto_tb(DisasContext
*ctx
, target_ulong dest
)
240 /* Use a direct jump if in same page and singlestep not enabled */
241 if (unlikely(ctx
->singlestep_enabled
|| use_exit_tb(ctx
))) {
244 #ifndef CONFIG_USER_ONLY
245 return (ctx
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
251 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
253 if (use_goto_tb(ctx
, dest
)) {
255 tcg_gen_movi_i32(cpu_pc
, dest
);
256 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ n
);
258 tcg_gen_movi_i32(cpu_pc
, dest
);
259 if (ctx
->singlestep_enabled
) {
260 gen_helper_debug(cpu_env
);
261 } else if (use_exit_tb(ctx
)) {
264 tcg_gen_lookup_and_goto_ptr(cpu_pc
);
269 static void gen_jump(DisasContext
* ctx
)
271 if (ctx
->delayed_pc
== -1) {
272 /* Target is not statically known, it comes necessarily from a
273 delayed jump as immediate jump are conditinal jumps */
274 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
275 tcg_gen_discard_i32(cpu_delayed_pc
);
276 if (ctx
->singlestep_enabled
) {
277 gen_helper_debug(cpu_env
);
278 } else if (use_exit_tb(ctx
)) {
281 tcg_gen_lookup_and_goto_ptr(cpu_pc
);
284 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
288 /* Immediate conditional jump (bt or bf) */
289 static void gen_conditional_jump(DisasContext
*ctx
, target_ulong dest
,
292 TCGLabel
*l1
= gen_new_label();
293 TCGCond cond_not_taken
= jump_if_true
? TCG_COND_EQ
: TCG_COND_NE
;
295 if (ctx
->tbflags
& GUSA_EXCLUSIVE
) {
296 /* When in an exclusive region, we must continue to the end.
297 Therefore, exit the region on a taken branch, but otherwise
298 fall through to the next instruction. */
299 tcg_gen_brcondi_i32(cond_not_taken
, cpu_sr_t
, 0, l1
);
300 tcg_gen_movi_i32(cpu_flags
, ctx
->envflags
& ~GUSA_MASK
);
301 /* Note that this won't actually use a goto_tb opcode because we
302 disallow it in use_goto_tb, but it handles exit + singlestep. */
303 gen_goto_tb(ctx
, 0, dest
);
308 gen_save_cpu_state(ctx
, false);
309 tcg_gen_brcondi_i32(cond_not_taken
, cpu_sr_t
, 0, l1
);
310 gen_goto_tb(ctx
, 0, dest
);
312 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
313 ctx
->bstate
= BS_BRANCH
;
316 /* Delayed conditional jump (bt or bf) */
317 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
319 TCGLabel
*l1
= gen_new_label();
320 TCGv ds
= tcg_temp_new();
322 tcg_gen_mov_i32(ds
, cpu_delayed_cond
);
323 tcg_gen_discard_i32(cpu_delayed_cond
);
325 if (ctx
->tbflags
& GUSA_EXCLUSIVE
) {
326 /* When in an exclusive region, we must continue to the end.
327 Therefore, exit the region on a taken branch, but otherwise
328 fall through to the next instruction. */
329 tcg_gen_brcondi_i32(TCG_COND_EQ
, ds
, 0, l1
);
331 /* Leave the gUSA region. */
332 tcg_gen_movi_i32(cpu_flags
, ctx
->envflags
& ~GUSA_MASK
);
339 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
340 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
345 static inline void gen_load_fpr64(DisasContext
*ctx
, TCGv_i64 t
, int reg
)
347 /* We have already signaled illegal instruction for odd Dr. */
348 tcg_debug_assert((reg
& 1) == 0);
350 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
353 static inline void gen_store_fpr64(DisasContext
*ctx
, TCGv_i64 t
, int reg
)
355 /* We have already signaled illegal instruction for odd Dr. */
356 tcg_debug_assert((reg
& 1) == 0);
358 tcg_gen_extr_i64_i32(cpu_fregs
[reg
+ 1], cpu_fregs
[reg
], t
);
361 #define B3_0 (ctx->opcode & 0xf)
362 #define B6_4 ((ctx->opcode >> 4) & 0x7)
363 #define B7_4 ((ctx->opcode >> 4) & 0xf)
364 #define B7_0 (ctx->opcode & 0xff)
365 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
366 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
367 (ctx->opcode & 0xfff))
368 #define B11_8 ((ctx->opcode >> 8) & 0xf)
369 #define B15_12 ((ctx->opcode >> 12) & 0xf)
371 #define REG(x) cpu_gregs[(x) ^ ctx->gbank]
372 #define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
373 #define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
375 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
377 #define CHECK_NOT_DELAY_SLOT \
378 if (ctx->envflags & DELAY_SLOT_MASK) { \
379 goto do_illegal_slot; \
382 #define CHECK_PRIVILEGED \
383 if (IS_USER(ctx)) { \
387 #define CHECK_FPU_ENABLED \
388 if (ctx->tbflags & (1u << SR_FD)) { \
389 goto do_fpu_disabled; \
392 #define CHECK_FPSCR_PR_0 \
393 if (ctx->tbflags & FPSCR_PR) { \
397 #define CHECK_FPSCR_PR_1 \
398 if (!(ctx->tbflags & FPSCR_PR)) { \
403 if (!(ctx->features & SH_FEATURE_SH4A)) { \
407 static void _decode_opc(DisasContext
* ctx
)
409 /* This code tries to make movcal emulation sufficiently
410 accurate for Linux purposes. This instruction writes
411 memory, and prior to that, always allocates a cache line.
412 It is used in two contexts:
413 - in memcpy, where data is copied in blocks, the first write
414 of to a block uses movca.l for performance.
415 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
416 to flush the cache. Here, the data written by movcal.l is never
417 written to memory, and the data written is just bogus.
419 To simulate this, we simulate movcal.l, we store the value to memory,
420 but we also remember the previous content. If we see ocbi, we check
421 if movcal.l for that address was done previously. If so, the write should
422 not have hit the memory, so we restore the previous content.
423 When we see an instruction that is neither movca.l
424 nor ocbi, the previous content is discarded.
426 To optimize, we only try to flush stores when we're at the start of
427 TB, or if we already saw movca.l in this TB and did not flush stores
431 int opcode
= ctx
->opcode
& 0xf0ff;
432 if (opcode
!= 0x0093 /* ocbi */
433 && opcode
!= 0x00c3 /* movca.l */)
435 gen_helper_discard_movcal_backup(cpu_env
);
441 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
444 switch (ctx
->opcode
) {
445 case 0x0019: /* div0u */
446 tcg_gen_movi_i32(cpu_sr_m
, 0);
447 tcg_gen_movi_i32(cpu_sr_q
, 0);
448 tcg_gen_movi_i32(cpu_sr_t
, 0);
450 case 0x000b: /* rts */
452 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
453 ctx
->envflags
|= DELAY_SLOT
;
454 ctx
->delayed_pc
= (uint32_t) - 1;
456 case 0x0028: /* clrmac */
457 tcg_gen_movi_i32(cpu_mach
, 0);
458 tcg_gen_movi_i32(cpu_macl
, 0);
460 case 0x0048: /* clrs */
461 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(1u << SR_S
));
463 case 0x0008: /* clrt */
464 tcg_gen_movi_i32(cpu_sr_t
, 0);
466 case 0x0038: /* ldtlb */
468 gen_helper_ldtlb(cpu_env
);
470 case 0x002b: /* rte */
473 gen_write_sr(cpu_ssr
);
474 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
475 ctx
->envflags
|= DELAY_SLOT_RTE
;
476 ctx
->delayed_pc
= (uint32_t) - 1;
477 ctx
->bstate
= BS_STOP
;
479 case 0x0058: /* sets */
480 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, (1u << SR_S
));
482 case 0x0018: /* sett */
483 tcg_gen_movi_i32(cpu_sr_t
, 1);
485 case 0xfbfd: /* frchg */
487 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
488 ctx
->bstate
= BS_STOP
;
490 case 0xf3fd: /* fschg */
492 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
493 ctx
->bstate
= BS_STOP
;
495 case 0xf7fd: /* fpchg */
497 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_PR
);
498 ctx
->bstate
= BS_STOP
;
500 case 0x0009: /* nop */
502 case 0x001b: /* sleep */
504 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
505 gen_helper_sleep(cpu_env
);
509 switch (ctx
->opcode
& 0xf000) {
510 case 0x1000: /* mov.l Rm,@(disp,Rn) */
512 TCGv addr
= tcg_temp_new();
513 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
514 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
518 case 0x5000: /* mov.l @(disp,Rm),Rn */
520 TCGv addr
= tcg_temp_new();
521 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
522 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
526 case 0xe000: /* mov #imm,Rn */
527 #ifdef CONFIG_USER_ONLY
528 /* Detect the start of a gUSA region. If so, update envflags
529 and end the TB. This will allow us to see the end of the
530 region (stored in R0) in the next TB. */
531 if (B11_8
== 15 && B7_0s
< 0 && parallel_cpus
) {
532 ctx
->envflags
= deposit32(ctx
->envflags
, GUSA_SHIFT
, 8, B7_0s
);
533 ctx
->bstate
= BS_STOP
;
536 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
538 case 0x9000: /* mov.w @(disp,PC),Rn */
540 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
541 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
545 case 0xd000: /* mov.l @(disp,PC),Rn */
547 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
548 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
552 case 0x7000: /* add #imm,Rn */
553 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
555 case 0xa000: /* bra disp */
557 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
558 ctx
->envflags
|= DELAY_SLOT
;
560 case 0xb000: /* bsr disp */
562 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
563 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
564 ctx
->envflags
|= DELAY_SLOT
;
568 switch (ctx
->opcode
& 0xf00f) {
569 case 0x6003: /* mov Rm,Rn */
570 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
572 case 0x2000: /* mov.b Rm,@Rn */
573 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_UB
);
575 case 0x2001: /* mov.w Rm,@Rn */
576 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUW
);
578 case 0x2002: /* mov.l Rm,@Rn */
579 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
581 case 0x6000: /* mov.b @Rm,Rn */
582 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
584 case 0x6001: /* mov.w @Rm,Rn */
585 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
587 case 0x6002: /* mov.l @Rm,Rn */
588 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
590 case 0x2004: /* mov.b Rm,@-Rn */
592 TCGv addr
= tcg_temp_new();
593 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
594 /* might cause re-execution */
595 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
596 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
600 case 0x2005: /* mov.w Rm,@-Rn */
602 TCGv addr
= tcg_temp_new();
603 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
604 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
605 tcg_gen_mov_i32(REG(B11_8
), addr
);
609 case 0x2006: /* mov.l Rm,@-Rn */
611 TCGv addr
= tcg_temp_new();
612 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
613 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
614 tcg_gen_mov_i32(REG(B11_8
), addr
);
617 case 0x6004: /* mov.b @Rm+,Rn */
618 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
620 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
622 case 0x6005: /* mov.w @Rm+,Rn */
623 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
625 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
627 case 0x6006: /* mov.l @Rm+,Rn */
628 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
630 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
632 case 0x0004: /* mov.b Rm,@(R0,Rn) */
634 TCGv addr
= tcg_temp_new();
635 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
636 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
640 case 0x0005: /* mov.w Rm,@(R0,Rn) */
642 TCGv addr
= tcg_temp_new();
643 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
644 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
648 case 0x0006: /* mov.l Rm,@(R0,Rn) */
650 TCGv addr
= tcg_temp_new();
651 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
652 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
656 case 0x000c: /* mov.b @(R0,Rm),Rn */
658 TCGv addr
= tcg_temp_new();
659 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
660 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_SB
);
664 case 0x000d: /* mov.w @(R0,Rm),Rn */
666 TCGv addr
= tcg_temp_new();
667 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
668 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
672 case 0x000e: /* mov.l @(R0,Rm),Rn */
674 TCGv addr
= tcg_temp_new();
675 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
676 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
680 case 0x6008: /* swap.b Rm,Rn */
682 TCGv low
= tcg_temp_new();;
683 tcg_gen_ext16u_i32(low
, REG(B7_4
));
684 tcg_gen_bswap16_i32(low
, low
);
685 tcg_gen_deposit_i32(REG(B11_8
), REG(B7_4
), low
, 0, 16);
689 case 0x6009: /* swap.w Rm,Rn */
690 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
692 case 0x200d: /* xtrct Rm,Rn */
695 high
= tcg_temp_new();
696 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
697 low
= tcg_temp_new();
698 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
699 tcg_gen_or_i32(REG(B11_8
), high
, low
);
704 case 0x300c: /* add Rm,Rn */
705 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
707 case 0x300e: /* addc Rm,Rn */
710 t0
= tcg_const_tl(0);
712 tcg_gen_add2_i32(t1
, cpu_sr_t
, cpu_sr_t
, t0
, REG(B7_4
), t0
);
713 tcg_gen_add2_i32(REG(B11_8
), cpu_sr_t
,
714 REG(B11_8
), t0
, t1
, cpu_sr_t
);
719 case 0x300f: /* addv Rm,Rn */
723 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
725 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
727 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
728 tcg_gen_andc_i32(cpu_sr_t
, t1
, t2
);
730 tcg_gen_shri_i32(cpu_sr_t
, cpu_sr_t
, 31);
732 tcg_gen_mov_i32(REG(B7_4
), t0
);
736 case 0x2009: /* and Rm,Rn */
737 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
739 case 0x3000: /* cmp/eq Rm,Rn */
740 tcg_gen_setcond_i32(TCG_COND_EQ
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
742 case 0x3003: /* cmp/ge Rm,Rn */
743 tcg_gen_setcond_i32(TCG_COND_GE
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
745 case 0x3007: /* cmp/gt Rm,Rn */
746 tcg_gen_setcond_i32(TCG_COND_GT
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
748 case 0x3006: /* cmp/hi Rm,Rn */
749 tcg_gen_setcond_i32(TCG_COND_GTU
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
751 case 0x3002: /* cmp/hs Rm,Rn */
752 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
754 case 0x200c: /* cmp/str Rm,Rn */
756 TCGv cmp1
= tcg_temp_new();
757 TCGv cmp2
= tcg_temp_new();
758 tcg_gen_xor_i32(cmp2
, REG(B7_4
), REG(B11_8
));
759 tcg_gen_subi_i32(cmp1
, cmp2
, 0x01010101);
760 tcg_gen_andc_i32(cmp1
, cmp1
, cmp2
);
761 tcg_gen_andi_i32(cmp1
, cmp1
, 0x80808080);
762 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_sr_t
, cmp1
, 0);
767 case 0x2007: /* div0s Rm,Rn */
768 tcg_gen_shri_i32(cpu_sr_q
, REG(B11_8
), 31); /* SR_Q */
769 tcg_gen_shri_i32(cpu_sr_m
, REG(B7_4
), 31); /* SR_M */
770 tcg_gen_xor_i32(cpu_sr_t
, cpu_sr_q
, cpu_sr_m
); /* SR_T */
772 case 0x3004: /* div1 Rm,Rn */
774 TCGv t0
= tcg_temp_new();
775 TCGv t1
= tcg_temp_new();
776 TCGv t2
= tcg_temp_new();
777 TCGv zero
= tcg_const_i32(0);
779 /* shift left arg1, saving the bit being pushed out and inserting
781 tcg_gen_shri_i32(t0
, REG(B11_8
), 31);
782 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
783 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), cpu_sr_t
);
785 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
786 using 64-bit temps, we compute arg0's high part from q ^ m, so
787 that it is 0x00000000 when adding the value or 0xffffffff when
789 tcg_gen_xor_i32(t1
, cpu_sr_q
, cpu_sr_m
);
790 tcg_gen_subi_i32(t1
, t1
, 1);
791 tcg_gen_neg_i32(t2
, REG(B7_4
));
792 tcg_gen_movcond_i32(TCG_COND_EQ
, t2
, t1
, zero
, REG(B7_4
), t2
);
793 tcg_gen_add2_i32(REG(B11_8
), t1
, REG(B11_8
), zero
, t2
, t1
);
795 /* compute T and Q depending on carry */
796 tcg_gen_andi_i32(t1
, t1
, 1);
797 tcg_gen_xor_i32(t1
, t1
, t0
);
798 tcg_gen_xori_i32(cpu_sr_t
, t1
, 1);
799 tcg_gen_xor_i32(cpu_sr_q
, cpu_sr_m
, t1
);
807 case 0x300d: /* dmuls.l Rm,Rn */
808 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
810 case 0x3005: /* dmulu.l Rm,Rn */
811 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
813 case 0x600e: /* exts.b Rm,Rn */
814 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
816 case 0x600f: /* exts.w Rm,Rn */
817 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
819 case 0x600c: /* extu.b Rm,Rn */
820 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
822 case 0x600d: /* extu.w Rm,Rn */
823 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
825 case 0x000f: /* mac.l @Rm+,@Rn+ */
828 arg0
= tcg_temp_new();
829 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
830 arg1
= tcg_temp_new();
831 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
832 gen_helper_macl(cpu_env
, arg0
, arg1
);
835 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
836 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
839 case 0x400f: /* mac.w @Rm+,@Rn+ */
842 arg0
= tcg_temp_new();
843 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
844 arg1
= tcg_temp_new();
845 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
846 gen_helper_macw(cpu_env
, arg0
, arg1
);
849 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
850 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
853 case 0x0007: /* mul.l Rm,Rn */
854 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
856 case 0x200f: /* muls.w Rm,Rn */
859 arg0
= tcg_temp_new();
860 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
861 arg1
= tcg_temp_new();
862 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
863 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
868 case 0x200e: /* mulu.w Rm,Rn */
871 arg0
= tcg_temp_new();
872 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
873 arg1
= tcg_temp_new();
874 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
875 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
880 case 0x600b: /* neg Rm,Rn */
881 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
883 case 0x600a: /* negc Rm,Rn */
885 TCGv t0
= tcg_const_i32(0);
886 tcg_gen_add2_i32(REG(B11_8
), cpu_sr_t
,
887 REG(B7_4
), t0
, cpu_sr_t
, t0
);
888 tcg_gen_sub2_i32(REG(B11_8
), cpu_sr_t
,
889 t0
, t0
, REG(B11_8
), cpu_sr_t
);
890 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
894 case 0x6007: /* not Rm,Rn */
895 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
897 case 0x200b: /* or Rm,Rn */
898 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
900 case 0x400c: /* shad Rm,Rn */
902 TCGv t0
= tcg_temp_new();
903 TCGv t1
= tcg_temp_new();
904 TCGv t2
= tcg_temp_new();
906 tcg_gen_andi_i32(t0
, REG(B7_4
), 0x1f);
908 /* positive case: shift to the left */
909 tcg_gen_shl_i32(t1
, REG(B11_8
), t0
);
911 /* negative case: shift to the right in two steps to
912 correctly handle the -32 case */
913 tcg_gen_xori_i32(t0
, t0
, 0x1f);
914 tcg_gen_sar_i32(t2
, REG(B11_8
), t0
);
915 tcg_gen_sari_i32(t2
, t2
, 1);
917 /* select between the two cases */
918 tcg_gen_movi_i32(t0
, 0);
919 tcg_gen_movcond_i32(TCG_COND_GE
, REG(B11_8
), REG(B7_4
), t0
, t1
, t2
);
926 case 0x400d: /* shld Rm,Rn */
928 TCGv t0
= tcg_temp_new();
929 TCGv t1
= tcg_temp_new();
930 TCGv t2
= tcg_temp_new();
932 tcg_gen_andi_i32(t0
, REG(B7_4
), 0x1f);
934 /* positive case: shift to the left */
935 tcg_gen_shl_i32(t1
, REG(B11_8
), t0
);
937 /* negative case: shift to the right in two steps to
938 correctly handle the -32 case */
939 tcg_gen_xori_i32(t0
, t0
, 0x1f);
940 tcg_gen_shr_i32(t2
, REG(B11_8
), t0
);
941 tcg_gen_shri_i32(t2
, t2
, 1);
943 /* select between the two cases */
944 tcg_gen_movi_i32(t0
, 0);
945 tcg_gen_movcond_i32(TCG_COND_GE
, REG(B11_8
), REG(B7_4
), t0
, t1
, t2
);
952 case 0x3008: /* sub Rm,Rn */
953 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
955 case 0x300a: /* subc Rm,Rn */
958 t0
= tcg_const_tl(0);
960 tcg_gen_add2_i32(t1
, cpu_sr_t
, cpu_sr_t
, t0
, REG(B7_4
), t0
);
961 tcg_gen_sub2_i32(REG(B11_8
), cpu_sr_t
,
962 REG(B11_8
), t0
, t1
, cpu_sr_t
);
963 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
968 case 0x300b: /* subv Rm,Rn */
972 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
974 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
976 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
977 tcg_gen_and_i32(t1
, t1
, t2
);
979 tcg_gen_shri_i32(cpu_sr_t
, t1
, 31);
981 tcg_gen_mov_i32(REG(B11_8
), t0
);
985 case 0x2008: /* tst Rm,Rn */
987 TCGv val
= tcg_temp_new();
988 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
989 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
993 case 0x200a: /* xor Rm,Rn */
994 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
996 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
998 if (ctx
->tbflags
& FPSCR_SZ
) {
999 int xsrc
= XHACK(B7_4
);
1000 int xdst
= XHACK(B11_8
);
1001 tcg_gen_mov_i32(FREG(xdst
), FREG(xsrc
));
1002 tcg_gen_mov_i32(FREG(xdst
+ 1), FREG(xsrc
+ 1));
1004 tcg_gen_mov_i32(FREG(B11_8
), FREG(B7_4
));
1007 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1009 if (ctx
->tbflags
& FPSCR_SZ
) {
1010 TCGv_i64 fp
= tcg_temp_new_i64();
1011 gen_load_fpr64(ctx
, fp
, XHACK(B7_4
));
1012 tcg_gen_qemu_st_i64(fp
, REG(B11_8
), ctx
->memidx
, MO_TEQ
);
1013 tcg_temp_free_i64(fp
);
1015 tcg_gen_qemu_st_i32(FREG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1018 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1020 if (ctx
->tbflags
& FPSCR_SZ
) {
1021 TCGv_i64 fp
= tcg_temp_new_i64();
1022 tcg_gen_qemu_ld_i64(fp
, REG(B7_4
), ctx
->memidx
, MO_TEQ
);
1023 gen_store_fpr64(ctx
, fp
, XHACK(B11_8
));
1024 tcg_temp_free_i64(fp
);
1026 tcg_gen_qemu_ld_i32(FREG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TEUL
);
1029 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1031 if (ctx
->tbflags
& FPSCR_SZ
) {
1032 TCGv_i64 fp
= tcg_temp_new_i64();
1033 tcg_gen_qemu_ld_i64(fp
, REG(B7_4
), ctx
->memidx
, MO_TEQ
);
1034 gen_store_fpr64(ctx
, fp
, XHACK(B11_8
));
1035 tcg_temp_free_i64(fp
);
1036 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1038 tcg_gen_qemu_ld_i32(FREG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TEUL
);
1039 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1042 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1045 TCGv addr
= tcg_temp_new_i32();
1046 if (ctx
->tbflags
& FPSCR_SZ
) {
1047 TCGv_i64 fp
= tcg_temp_new_i64();
1048 gen_load_fpr64(ctx
, fp
, XHACK(B7_4
));
1049 tcg_gen_subi_i32(addr
, REG(B11_8
), 8);
1050 tcg_gen_qemu_st_i64(fp
, addr
, ctx
->memidx
, MO_TEQ
);
1051 tcg_temp_free_i64(fp
);
1053 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1054 tcg_gen_qemu_st_i32(FREG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
1056 tcg_gen_mov_i32(REG(B11_8
), addr
);
1057 tcg_temp_free(addr
);
1060 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1063 TCGv addr
= tcg_temp_new_i32();
1064 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1065 if (ctx
->tbflags
& FPSCR_SZ
) {
1066 TCGv_i64 fp
= tcg_temp_new_i64();
1067 tcg_gen_qemu_ld_i64(fp
, addr
, ctx
->memidx
, MO_TEQ
);
1068 gen_store_fpr64(ctx
, fp
, XHACK(B11_8
));
1069 tcg_temp_free_i64(fp
);
1071 tcg_gen_qemu_ld_i32(FREG(B11_8
), addr
, ctx
->memidx
, MO_TEUL
);
1073 tcg_temp_free(addr
);
1076 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1079 TCGv addr
= tcg_temp_new();
1080 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1081 if (ctx
->tbflags
& FPSCR_SZ
) {
1082 TCGv_i64 fp
= tcg_temp_new_i64();
1083 gen_load_fpr64(ctx
, fp
, XHACK(B7_4
));
1084 tcg_gen_qemu_st_i64(fp
, addr
, ctx
->memidx
, MO_TEQ
);
1085 tcg_temp_free_i64(fp
);
1087 tcg_gen_qemu_st_i32(FREG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
1089 tcg_temp_free(addr
);
1092 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1093 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1094 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1095 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1096 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1097 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1100 if (ctx
->tbflags
& FPSCR_PR
) {
1103 if (ctx
->opcode
& 0x0110) {
1106 fp0
= tcg_temp_new_i64();
1107 fp1
= tcg_temp_new_i64();
1108 gen_load_fpr64(ctx
, fp0
, B11_8
);
1109 gen_load_fpr64(ctx
, fp1
, B7_4
);
1110 switch (ctx
->opcode
& 0xf00f) {
1111 case 0xf000: /* fadd Rm,Rn */
1112 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1114 case 0xf001: /* fsub Rm,Rn */
1115 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1117 case 0xf002: /* fmul Rm,Rn */
1118 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1120 case 0xf003: /* fdiv Rm,Rn */
1121 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1123 case 0xf004: /* fcmp/eq Rm,Rn */
1124 gen_helper_fcmp_eq_DT(cpu_sr_t
, cpu_env
, fp0
, fp1
);
1126 case 0xf005: /* fcmp/gt Rm,Rn */
1127 gen_helper_fcmp_gt_DT(cpu_sr_t
, cpu_env
, fp0
, fp1
);
1130 gen_store_fpr64(ctx
, fp0
, B11_8
);
1131 tcg_temp_free_i64(fp0
);
1132 tcg_temp_free_i64(fp1
);
1134 switch (ctx
->opcode
& 0xf00f) {
1135 case 0xf000: /* fadd Rm,Rn */
1136 gen_helper_fadd_FT(FREG(B11_8
), cpu_env
,
1137 FREG(B11_8
), FREG(B7_4
));
1139 case 0xf001: /* fsub Rm,Rn */
1140 gen_helper_fsub_FT(FREG(B11_8
), cpu_env
,
1141 FREG(B11_8
), FREG(B7_4
));
1143 case 0xf002: /* fmul Rm,Rn */
1144 gen_helper_fmul_FT(FREG(B11_8
), cpu_env
,
1145 FREG(B11_8
), FREG(B7_4
));
1147 case 0xf003: /* fdiv Rm,Rn */
1148 gen_helper_fdiv_FT(FREG(B11_8
), cpu_env
,
1149 FREG(B11_8
), FREG(B7_4
));
1151 case 0xf004: /* fcmp/eq Rm,Rn */
1152 gen_helper_fcmp_eq_FT(cpu_sr_t
, cpu_env
,
1153 FREG(B11_8
), FREG(B7_4
));
1155 case 0xf005: /* fcmp/gt Rm,Rn */
1156 gen_helper_fcmp_gt_FT(cpu_sr_t
, cpu_env
,
1157 FREG(B11_8
), FREG(B7_4
));
1163 case 0xf00e: /* fmac FR0,RM,Rn */
1166 gen_helper_fmac_FT(FREG(B11_8
), cpu_env
,
1167 FREG(0), FREG(B7_4
), FREG(B11_8
));
1171 switch (ctx
->opcode
& 0xff00) {
1172 case 0xc900: /* and #imm,R0 */
1173 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1175 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1178 addr
= tcg_temp_new();
1179 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1180 val
= tcg_temp_new();
1181 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1182 tcg_gen_andi_i32(val
, val
, B7_0
);
1183 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1185 tcg_temp_free(addr
);
1188 case 0x8b00: /* bf label */
1189 CHECK_NOT_DELAY_SLOT
1190 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2, false);
1192 case 0x8f00: /* bf/s label */
1193 CHECK_NOT_DELAY_SLOT
1194 tcg_gen_xori_i32(cpu_delayed_cond
, cpu_sr_t
, 1);
1195 ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2;
1196 ctx
->envflags
|= DELAY_SLOT_CONDITIONAL
;
1198 case 0x8900: /* bt label */
1199 CHECK_NOT_DELAY_SLOT
1200 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2, true);
1202 case 0x8d00: /* bt/s label */
1203 CHECK_NOT_DELAY_SLOT
1204 tcg_gen_mov_i32(cpu_delayed_cond
, cpu_sr_t
);
1205 ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2;
1206 ctx
->envflags
|= DELAY_SLOT_CONDITIONAL
;
1208 case 0x8800: /* cmp/eq #imm,R0 */
1209 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, REG(0), B7_0s
);
1211 case 0xc400: /* mov.b @(disp,GBR),R0 */
1213 TCGv addr
= tcg_temp_new();
1214 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1215 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1216 tcg_temp_free(addr
);
1219 case 0xc500: /* mov.w @(disp,GBR),R0 */
1221 TCGv addr
= tcg_temp_new();
1222 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1223 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1224 tcg_temp_free(addr
);
1227 case 0xc600: /* mov.l @(disp,GBR),R0 */
1229 TCGv addr
= tcg_temp_new();
1230 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1231 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESL
);
1232 tcg_temp_free(addr
);
1235 case 0xc000: /* mov.b R0,@(disp,GBR) */
1237 TCGv addr
= tcg_temp_new();
1238 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1239 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1240 tcg_temp_free(addr
);
1243 case 0xc100: /* mov.w R0,@(disp,GBR) */
1245 TCGv addr
= tcg_temp_new();
1246 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1247 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1248 tcg_temp_free(addr
);
1251 case 0xc200: /* mov.l R0,@(disp,GBR) */
1253 TCGv addr
= tcg_temp_new();
1254 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1255 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUL
);
1256 tcg_temp_free(addr
);
1259 case 0x8000: /* mov.b R0,@(disp,Rn) */
1261 TCGv addr
= tcg_temp_new();
1262 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1263 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1264 tcg_temp_free(addr
);
1267 case 0x8100: /* mov.w R0,@(disp,Rn) */
1269 TCGv addr
= tcg_temp_new();
1270 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1271 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1272 tcg_temp_free(addr
);
1275 case 0x8400: /* mov.b @(disp,Rn),R0 */
1277 TCGv addr
= tcg_temp_new();
1278 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1279 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1280 tcg_temp_free(addr
);
1283 case 0x8500: /* mov.w @(disp,Rn),R0 */
1285 TCGv addr
= tcg_temp_new();
1286 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1287 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1288 tcg_temp_free(addr
);
1291 case 0xc700: /* mova @(disp,PC),R0 */
1292 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1294 case 0xcb00: /* or #imm,R0 */
1295 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1297 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1300 addr
= tcg_temp_new();
1301 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1302 val
= tcg_temp_new();
1303 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1304 tcg_gen_ori_i32(val
, val
, B7_0
);
1305 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1307 tcg_temp_free(addr
);
1310 case 0xc300: /* trapa #imm */
1313 CHECK_NOT_DELAY_SLOT
1314 gen_save_cpu_state(ctx
, true);
1315 imm
= tcg_const_i32(B7_0
);
1316 gen_helper_trapa(cpu_env
, imm
);
1318 ctx
->bstate
= BS_EXCP
;
1321 case 0xc800: /* tst #imm,R0 */
1323 TCGv val
= tcg_temp_new();
1324 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1325 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1329 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1331 TCGv val
= tcg_temp_new();
1332 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1333 tcg_gen_qemu_ld_i32(val
, val
, ctx
->memidx
, MO_UB
);
1334 tcg_gen_andi_i32(val
, val
, B7_0
);
1335 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1339 case 0xca00: /* xor #imm,R0 */
1340 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1342 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1345 addr
= tcg_temp_new();
1346 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1347 val
= tcg_temp_new();
1348 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1349 tcg_gen_xori_i32(val
, val
, B7_0
);
1350 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1352 tcg_temp_free(addr
);
1357 switch (ctx
->opcode
& 0xf08f) {
1358 case 0x408e: /* ldc Rm,Rn_BANK */
1360 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1362 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1364 tcg_gen_qemu_ld_i32(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1365 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1367 case 0x0082: /* stc Rm_BANK,Rn */
1369 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1371 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1374 TCGv addr
= tcg_temp_new();
1375 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1376 tcg_gen_qemu_st_i32(ALTREG(B6_4
), addr
, ctx
->memidx
, MO_TEUL
);
1377 tcg_gen_mov_i32(REG(B11_8
), addr
);
1378 tcg_temp_free(addr
);
1383 switch (ctx
->opcode
& 0xf0ff) {
1384 case 0x0023: /* braf Rn */
1385 CHECK_NOT_DELAY_SLOT
1386 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1387 ctx
->envflags
|= DELAY_SLOT
;
1388 ctx
->delayed_pc
= (uint32_t) - 1;
1390 case 0x0003: /* bsrf Rn */
1391 CHECK_NOT_DELAY_SLOT
1392 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1393 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1394 ctx
->envflags
|= DELAY_SLOT
;
1395 ctx
->delayed_pc
= (uint32_t) - 1;
1397 case 0x4015: /* cmp/pl Rn */
1398 tcg_gen_setcondi_i32(TCG_COND_GT
, cpu_sr_t
, REG(B11_8
), 0);
1400 case 0x4011: /* cmp/pz Rn */
1401 tcg_gen_setcondi_i32(TCG_COND_GE
, cpu_sr_t
, REG(B11_8
), 0);
1403 case 0x4010: /* dt Rn */
1404 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1405 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, REG(B11_8
), 0);
1407 case 0x402b: /* jmp @Rn */
1408 CHECK_NOT_DELAY_SLOT
1409 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1410 ctx
->envflags
|= DELAY_SLOT
;
1411 ctx
->delayed_pc
= (uint32_t) - 1;
1413 case 0x400b: /* jsr @Rn */
1414 CHECK_NOT_DELAY_SLOT
1415 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1416 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1417 ctx
->envflags
|= DELAY_SLOT
;
1418 ctx
->delayed_pc
= (uint32_t) - 1;
1420 case 0x400e: /* ldc Rm,SR */
1423 TCGv val
= tcg_temp_new();
1424 tcg_gen_andi_i32(val
, REG(B11_8
), 0x700083f3);
1427 ctx
->bstate
= BS_STOP
;
1430 case 0x4007: /* ldc.l @Rm+,SR */
1433 TCGv val
= tcg_temp_new();
1434 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1435 tcg_gen_andi_i32(val
, val
, 0x700083f3);
1438 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1439 ctx
->bstate
= BS_STOP
;
1442 case 0x0002: /* stc SR,Rn */
1444 gen_read_sr(REG(B11_8
));
1446 case 0x4003: /* stc SR,@-Rn */
1449 TCGv addr
= tcg_temp_new();
1450 TCGv val
= tcg_temp_new();
1451 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1453 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1454 tcg_gen_mov_i32(REG(B11_8
), addr
);
1456 tcg_temp_free(addr
);
1459 #define LD(reg,ldnum,ldpnum,prechk) \
1462 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1466 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1467 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1469 #define ST(reg,stnum,stpnum,prechk) \
1472 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1477 TCGv addr = tcg_temp_new(); \
1478 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1479 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1480 tcg_gen_mov_i32(REG(B11_8), addr); \
1481 tcg_temp_free(addr); \
1484 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1485 LD(reg,ldnum,ldpnum,prechk) \
1486 ST(reg,stnum,stpnum,prechk)
1487 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1488 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1489 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1490 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1491 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1492 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A
)
1493 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1494 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1495 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1496 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1497 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1498 case 0x406a: /* lds Rm,FPSCR */
1500 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1501 ctx
->bstate
= BS_STOP
;
1503 case 0x4066: /* lds.l @Rm+,FPSCR */
1506 TCGv addr
= tcg_temp_new();
1507 tcg_gen_qemu_ld_i32(addr
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1508 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1509 gen_helper_ld_fpscr(cpu_env
, addr
);
1510 tcg_temp_free(addr
);
1511 ctx
->bstate
= BS_STOP
;
1514 case 0x006a: /* sts FPSCR,Rn */
1516 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1518 case 0x4062: /* sts FPSCR,@-Rn */
1522 val
= tcg_temp_new();
1523 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1524 addr
= tcg_temp_new();
1525 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1526 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1527 tcg_gen_mov_i32(REG(B11_8
), addr
);
1528 tcg_temp_free(addr
);
1532 case 0x00c3: /* movca.l R0,@Rm */
1534 TCGv val
= tcg_temp_new();
1535 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1536 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1537 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1539 ctx
->has_movcal
= 1;
1541 case 0x40a9: /* movua.l @Rm,R0 */
1543 /* Load non-boundary-aligned data */
1544 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
,
1545 MO_TEUL
| MO_UNALN
);
1548 case 0x40e9: /* movua.l @Rm+,R0 */
1550 /* Load non-boundary-aligned data */
1551 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
,
1552 MO_TEUL
| MO_UNALN
);
1553 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1556 case 0x0029: /* movt Rn */
1557 tcg_gen_mov_i32(REG(B11_8
), cpu_sr_t
);
1562 If (T == 1) R0 -> (Rn)
1567 TCGLabel
*label
= gen_new_label();
1568 tcg_gen_mov_i32(cpu_sr_t
, cpu_ldst
);
1569 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1570 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1571 gen_set_label(label
);
1572 tcg_gen_movi_i32(cpu_ldst
, 0);
1579 When interrupt/exception
1583 tcg_gen_movi_i32(cpu_ldst
, 0);
1584 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1585 tcg_gen_movi_i32(cpu_ldst
, 1);
1587 case 0x0093: /* ocbi @Rn */
1589 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1592 case 0x00a3: /* ocbp @Rn */
1593 case 0x00b3: /* ocbwb @Rn */
1594 /* These instructions are supposed to do nothing in case of
1595 a cache miss. Given that we only partially emulate caches
1596 it is safe to simply ignore them. */
1598 case 0x0083: /* pref @Rn */
1600 case 0x00d3: /* prefi @Rn */
1603 case 0x00e3: /* icbi @Rn */
1606 case 0x00ab: /* synco */
1608 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1611 case 0x4024: /* rotcl Rn */
1613 TCGv tmp
= tcg_temp_new();
1614 tcg_gen_mov_i32(tmp
, cpu_sr_t
);
1615 tcg_gen_shri_i32(cpu_sr_t
, REG(B11_8
), 31);
1616 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1617 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), tmp
);
1621 case 0x4025: /* rotcr Rn */
1623 TCGv tmp
= tcg_temp_new();
1624 tcg_gen_shli_i32(tmp
, cpu_sr_t
, 31);
1625 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1626 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1627 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), tmp
);
1631 case 0x4004: /* rotl Rn */
1632 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1633 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 0);
1635 case 0x4005: /* rotr Rn */
1636 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 0);
1637 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1639 case 0x4000: /* shll Rn */
1640 case 0x4020: /* shal Rn */
1641 tcg_gen_shri_i32(cpu_sr_t
, REG(B11_8
), 31);
1642 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1644 case 0x4021: /* shar Rn */
1645 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1646 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1648 case 0x4001: /* shlr Rn */
1649 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1650 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1652 case 0x4008: /* shll2 Rn */
1653 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1655 case 0x4018: /* shll8 Rn */
1656 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1658 case 0x4028: /* shll16 Rn */
1659 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1661 case 0x4009: /* shlr2 Rn */
1662 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1664 case 0x4019: /* shlr8 Rn */
1665 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1667 case 0x4029: /* shlr16 Rn */
1668 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1670 case 0x401b: /* tas.b @Rn */
1672 TCGv val
= tcg_const_i32(0x80);
1673 tcg_gen_atomic_fetch_or_i32(val
, REG(B11_8
), val
,
1674 ctx
->memidx
, MO_UB
);
1675 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1679 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1681 tcg_gen_mov_i32(FREG(B11_8
), cpu_fpul
);
1683 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1685 tcg_gen_mov_i32(cpu_fpul
, FREG(B11_8
));
1687 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1689 if (ctx
->tbflags
& FPSCR_PR
) {
1691 if (ctx
->opcode
& 0x0100) {
1694 fp
= tcg_temp_new_i64();
1695 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1696 gen_store_fpr64(ctx
, fp
, B11_8
);
1697 tcg_temp_free_i64(fp
);
1700 gen_helper_float_FT(FREG(B11_8
), cpu_env
, cpu_fpul
);
1703 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1705 if (ctx
->tbflags
& FPSCR_PR
) {
1707 if (ctx
->opcode
& 0x0100) {
1710 fp
= tcg_temp_new_i64();
1711 gen_load_fpr64(ctx
, fp
, B11_8
);
1712 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1713 tcg_temp_free_i64(fp
);
1716 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, FREG(B11_8
));
1719 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1721 tcg_gen_xori_i32(FREG(B11_8
), FREG(B11_8
), 0x80000000);
1723 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1725 tcg_gen_andi_i32(FREG(B11_8
), FREG(B11_8
), 0x7fffffff);
1727 case 0xf06d: /* fsqrt FRn */
1729 if (ctx
->tbflags
& FPSCR_PR
) {
1730 if (ctx
->opcode
& 0x0100) {
1733 TCGv_i64 fp
= tcg_temp_new_i64();
1734 gen_load_fpr64(ctx
, fp
, B11_8
);
1735 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1736 gen_store_fpr64(ctx
, fp
, B11_8
);
1737 tcg_temp_free_i64(fp
);
1739 gen_helper_fsqrt_FT(FREG(B11_8
), cpu_env
, FREG(B11_8
));
1742 case 0xf07d: /* fsrra FRn */
1745 gen_helper_fsrra_FT(FREG(B11_8
), cpu_env
, FREG(B11_8
));
1747 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1750 tcg_gen_movi_i32(FREG(B11_8
), 0);
1752 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1755 tcg_gen_movi_i32(FREG(B11_8
), 0x3f800000);
1757 case 0xf0ad: /* fcnvsd FPUL,DRn */
1760 TCGv_i64 fp
= tcg_temp_new_i64();
1761 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1762 gen_store_fpr64(ctx
, fp
, B11_8
);
1763 tcg_temp_free_i64(fp
);
1766 case 0xf0bd: /* fcnvds DRn,FPUL */
1769 TCGv_i64 fp
= tcg_temp_new_i64();
1770 gen_load_fpr64(ctx
, fp
, B11_8
);
1771 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1772 tcg_temp_free_i64(fp
);
1775 case 0xf0ed: /* fipr FVm,FVn */
1779 TCGv m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1780 TCGv n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1781 gen_helper_fipr(cpu_env
, m
, n
);
1787 case 0xf0fd: /* ftrv XMTRX,FVn */
1791 if ((ctx
->opcode
& 0x0300) != 0x0100) {
1794 TCGv n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1795 gen_helper_ftrv(cpu_env
, n
);
1802 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1803 ctx
->opcode
, ctx
->pc
);
1807 if (ctx
->envflags
& DELAY_SLOT_MASK
) {
1809 gen_save_cpu_state(ctx
, true);
1810 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1812 gen_save_cpu_state(ctx
, true);
1813 gen_helper_raise_illegal_instruction(cpu_env
);
1815 ctx
->bstate
= BS_EXCP
;
1819 gen_save_cpu_state(ctx
, true);
1820 if (ctx
->envflags
& DELAY_SLOT_MASK
) {
1821 gen_helper_raise_slot_fpu_disable(cpu_env
);
1823 gen_helper_raise_fpu_disable(cpu_env
);
1825 ctx
->bstate
= BS_EXCP
;
1829 static void decode_opc(DisasContext
* ctx
)
1831 uint32_t old_flags
= ctx
->envflags
;
1835 if (old_flags
& DELAY_SLOT_MASK
) {
1836 /* go out of the delay slot */
1837 ctx
->envflags
&= ~DELAY_SLOT_MASK
;
1839 /* When in an exclusive region, we must continue to the end
1840 for conditional branches. */
1841 if (ctx
->tbflags
& GUSA_EXCLUSIVE
1842 && old_flags
& DELAY_SLOT_CONDITIONAL
) {
1843 gen_delayed_conditional_jump(ctx
);
1846 /* Otherwise this is probably an invalid gUSA region.
1847 Drop the GUSA bits so the next TB doesn't see them. */
1848 ctx
->envflags
&= ~GUSA_MASK
;
1850 tcg_gen_movi_i32(cpu_flags
, ctx
->envflags
);
1851 ctx
->bstate
= BS_BRANCH
;
1852 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1853 gen_delayed_conditional_jump(ctx
);
1860 #ifdef CONFIG_USER_ONLY
1861 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1862 Upon an interrupt, a real kernel would simply notice magic values in
1863 the registers and reset the PC to the start of the sequence.
1865 For QEMU, we cannot do this in quite the same way. Instead, we notice
1866 the normal start of such a sequence (mov #-x,r15). While we can handle
1867 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1868 sequences and transform them into atomic operations as seen by the host.
1870 static int decode_gusa(DisasContext
*ctx
, CPUSH4State
*env
, int *pmax_insns
)
1873 int ld_adr
, ld_dst
, ld_mop
;
1874 int op_dst
, op_src
, op_opc
;
1875 int mv_src
, mt_dst
, st_src
, st_mop
;
1878 uint32_t pc
= ctx
->pc
;
1879 uint32_t pc_end
= ctx
->tb
->cs_base
;
1880 int backup
= sextract32(ctx
->tbflags
, GUSA_SHIFT
, 8);
1881 int max_insns
= (pc_end
- pc
) / 2;
1884 if (pc
!= pc_end
+ backup
|| max_insns
< 2) {
1885 /* This is a malformed gUSA region. Don't do anything special,
1886 since the interpreter is likely to get confused. */
1887 ctx
->envflags
&= ~GUSA_MASK
;
1891 if (ctx
->tbflags
& GUSA_EXCLUSIVE
) {
1892 /* Regardless of single-stepping or the end of the page,
1893 we must complete execution of the gUSA region while
1894 holding the exclusive lock. */
1895 *pmax_insns
= max_insns
;
1899 /* The state machine below will consume only a few insns.
1900 If there are more than that in a region, fail now. */
1901 if (max_insns
> ARRAY_SIZE(insns
)) {
1905 /* Read all of the insns for the region. */
1906 for (i
= 0; i
< max_insns
; ++i
) {
1907 insns
[i
] = cpu_lduw_code(env
, pc
+ i
* 2);
1910 ld_adr
= ld_dst
= ld_mop
= -1;
1912 op_dst
= op_src
= op_opc
= -1;
1914 st_src
= st_mop
= -1;
1915 TCGV_UNUSED(op_arg
);
1919 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1922 * Expect a load to begin the region.
1925 switch (ctx
->opcode
& 0xf00f) {
1926 case 0x6000: /* mov.b @Rm,Rn */
1929 case 0x6001: /* mov.w @Rm,Rn */
1932 case 0x6002: /* mov.l @Rm,Rn */
1940 if (ld_adr
== ld_dst
) {
1943 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1947 * Expect an optional register move.
1950 switch (ctx
->opcode
& 0xf00f) {
1951 case 0x6003: /* mov Rm,Rn */
1952 /* Here we want to recognize ld_dst being saved for later consumtion,
1953 or for another input register being copied so that ld_dst need not
1954 be clobbered during the operation. */
1957 if (op_dst
== ld_dst
) {
1958 /* Overwriting the load output. */
1961 if (mv_src
!= ld_dst
) {
1962 /* Copying a new input; constrain op_src to match the load. */
1968 /* Put back and re-examine as operation. */
1973 * Expect the operation.
1976 switch (ctx
->opcode
& 0xf00f) {
1977 case 0x300c: /* add Rm,Rn */
1978 op_opc
= INDEX_op_add_i32
;
1980 case 0x2009: /* and Rm,Rn */
1981 op_opc
= INDEX_op_and_i32
;
1983 case 0x200a: /* xor Rm,Rn */
1984 op_opc
= INDEX_op_xor_i32
;
1986 case 0x200b: /* or Rm,Rn */
1987 op_opc
= INDEX_op_or_i32
;
1989 /* The operation register should be as expected, and the
1990 other input cannot depend on the load. */
1991 if (op_dst
!= B11_8
) {
1995 /* Unconstrainted input. */
1997 } else if (op_src
== B7_4
) {
1998 /* Constrained input matched load. All operations are
1999 commutative; "swap" them by "moving" the load output
2000 to the (implicit) first argument and the move source
2001 to the (explicit) second argument. */
2006 op_arg
= REG(op_src
);
2009 case 0x6007: /* not Rm,Rn */
2010 if (ld_dst
!= B7_4
|| mv_src
>= 0) {
2014 op_opc
= INDEX_op_xor_i32
;
2015 op_arg
= tcg_const_i32(-1);
2018 case 0x7000 ... 0x700f: /* add #imm,Rn */
2019 if (op_dst
!= B11_8
|| mv_src
>= 0) {
2022 op_opc
= INDEX_op_add_i32
;
2023 op_arg
= tcg_const_i32(B7_0s
);
2026 case 0x3000: /* cmp/eq Rm,Rn */
2027 /* Looking for the middle of a compare-and-swap sequence,
2028 beginning with the compare. Operands can be either order,
2029 but with only one overlapping the load. */
2030 if ((ld_dst
== B11_8
) + (ld_dst
== B7_4
) != 1 || mv_src
>= 0) {
2033 op_opc
= INDEX_op_setcond_i32
; /* placeholder */
2034 op_src
= (ld_dst
== B11_8
? B7_4
: B11_8
);
2035 op_arg
= REG(op_src
);
2038 switch (ctx
->opcode
& 0xff00) {
2039 case 0x8b00: /* bf label */
2040 case 0x8f00: /* bf/s label */
2041 if (pc
+ (i
+ 1 + B7_0s
) * 2 != pc_end
) {
2044 if ((ctx
->opcode
& 0xff00) == 0x8b00) { /* bf label */
2047 /* We're looking to unconditionally modify Rn with the
2048 result of the comparison, within the delay slot of
2049 the branch. This is used by older gcc. */
2051 if ((ctx
->opcode
& 0xf0ff) == 0x0029) { /* movt Rn */
2063 case 0x2008: /* tst Rm,Rn */
2064 /* Looking for a compare-and-swap against zero. */
2065 if (ld_dst
!= B11_8
|| ld_dst
!= B7_4
|| mv_src
>= 0) {
2068 op_opc
= INDEX_op_setcond_i32
;
2069 op_arg
= tcg_const_i32(0);
2072 if ((ctx
->opcode
& 0xff00) != 0x8900 /* bt label */
2073 || pc
+ (i
+ 1 + B7_0s
) * 2 != pc_end
) {
2079 /* Put back and re-examine as store. */
2086 /* The store must be the last insn. */
2087 if (i
!= max_insns
- 1) {
2091 switch (ctx
->opcode
& 0xf00f) {
2092 case 0x2000: /* mov.b Rm,@Rn */
2095 case 0x2001: /* mov.w Rm,@Rn */
2098 case 0x2002: /* mov.l Rm,@Rn */
2104 /* The store must match the load. */
2105 if (ld_adr
!= B11_8
|| st_mop
!= (ld_mop
& MO_SIZE
)) {
2113 * Emit the operation.
2115 tcg_gen_insn_start(pc
, ctx
->envflags
);
2118 /* No operation found. Look for exchange pattern. */
2119 if (st_src
== ld_dst
|| mv_src
>= 0) {
2122 tcg_gen_atomic_xchg_i32(REG(ld_dst
), REG(ld_adr
), REG(st_src
),
2123 ctx
->memidx
, ld_mop
);
2126 case INDEX_op_add_i32
:
2127 if (op_dst
!= st_src
) {
2130 if (op_dst
== ld_dst
&& st_mop
== MO_UL
) {
2131 tcg_gen_atomic_add_fetch_i32(REG(ld_dst
), REG(ld_adr
),
2132 op_arg
, ctx
->memidx
, ld_mop
);
2134 tcg_gen_atomic_fetch_add_i32(REG(ld_dst
), REG(ld_adr
),
2135 op_arg
, ctx
->memidx
, ld_mop
);
2136 if (op_dst
!= ld_dst
) {
2137 /* Note that mop sizes < 4 cannot use add_fetch
2138 because it won't carry into the higher bits. */
2139 tcg_gen_add_i32(REG(op_dst
), REG(ld_dst
), op_arg
);
2144 case INDEX_op_and_i32
:
2145 if (op_dst
!= st_src
) {
2148 if (op_dst
== ld_dst
) {
2149 tcg_gen_atomic_and_fetch_i32(REG(ld_dst
), REG(ld_adr
),
2150 op_arg
, ctx
->memidx
, ld_mop
);
2152 tcg_gen_atomic_fetch_and_i32(REG(ld_dst
), REG(ld_adr
),
2153 op_arg
, ctx
->memidx
, ld_mop
);
2154 tcg_gen_and_i32(REG(op_dst
), REG(ld_dst
), op_arg
);
2158 case INDEX_op_or_i32
:
2159 if (op_dst
!= st_src
) {
2162 if (op_dst
== ld_dst
) {
2163 tcg_gen_atomic_or_fetch_i32(REG(ld_dst
), REG(ld_adr
),
2164 op_arg
, ctx
->memidx
, ld_mop
);
2166 tcg_gen_atomic_fetch_or_i32(REG(ld_dst
), REG(ld_adr
),
2167 op_arg
, ctx
->memidx
, ld_mop
);
2168 tcg_gen_or_i32(REG(op_dst
), REG(ld_dst
), op_arg
);
2172 case INDEX_op_xor_i32
:
2173 if (op_dst
!= st_src
) {
2176 if (op_dst
== ld_dst
) {
2177 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst
), REG(ld_adr
),
2178 op_arg
, ctx
->memidx
, ld_mop
);
2180 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst
), REG(ld_adr
),
2181 op_arg
, ctx
->memidx
, ld_mop
);
2182 tcg_gen_xor_i32(REG(op_dst
), REG(ld_dst
), op_arg
);
2186 case INDEX_op_setcond_i32
:
2187 if (st_src
== ld_dst
) {
2190 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst
), REG(ld_adr
), op_arg
,
2191 REG(st_src
), ctx
->memidx
, ld_mop
);
2192 tcg_gen_setcond_i32(TCG_COND_EQ
, cpu_sr_t
, REG(ld_dst
), op_arg
);
2194 tcg_gen_mov_i32(REG(mt_dst
), cpu_sr_t
);
2199 g_assert_not_reached();
2202 /* If op_src is not a valid register, then op_arg was a constant. */
2204 tcg_temp_free_i32(op_arg
);
2207 /* The entire region has been translated. */
2208 ctx
->envflags
&= ~GUSA_MASK
;
2213 qemu_log_mask(LOG_UNIMP
, "Unrecognized gUSA sequence %08x-%08x\n",
2216 /* Restart with the EXCLUSIVE bit set, within a TB run via
2217 cpu_exec_step_atomic holding the exclusive lock. */
2218 tcg_gen_insn_start(pc
, ctx
->envflags
);
2219 ctx
->envflags
|= GUSA_EXCLUSIVE
;
2220 gen_save_cpu_state(ctx
, false);
2221 gen_helper_exclusive(cpu_env
);
2222 ctx
->bstate
= BS_EXCP
;
2224 /* We're not executing an instruction, but we must report one for the
2225 purposes of accounting within the TB. We might as well report the
2226 entire region consumed via ctx->pc so that it's immediately available
2227 in the disassembly dump. */
2233 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
2235 CPUSH4State
*env
= cs
->env_ptr
;
2237 target_ulong pc_start
;
2243 ctx
.tbflags
= (uint32_t)tb
->flags
;
2244 ctx
.envflags
= tb
->flags
& TB_FLAG_ENVFLAGS_MASK
;
2245 ctx
.bstate
= BS_NONE
;
2246 ctx
.memidx
= (ctx
.tbflags
& (1u << SR_MD
)) == 0 ? 1 : 0;
2247 /* We don't know if the delayed pc came from a dynamic or static branch,
2248 so assume it is a dynamic branch. */
2249 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
2251 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2252 ctx
.features
= env
->features
;
2253 ctx
.has_movcal
= (ctx
.tbflags
& TB_FLAG_PENDING_MOVCA
);
2254 ctx
.gbank
= ((ctx
.tbflags
& (1 << SR_MD
)) &&
2255 (ctx
.tbflags
& (1 << SR_RB
))) * 0x10;
2256 ctx
.fbank
= ctx
.tbflags
& FPSCR_FR
? 0x10 : 0;
2258 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2259 if (max_insns
== 0) {
2260 max_insns
= CF_COUNT_MASK
;
2262 max_insns
= MIN(max_insns
, TCG_MAX_INSNS
);
2264 /* Since the ISA is fixed-width, we can bound by the number
2265 of instructions remaining on the page. */
2266 num_insns
= -(ctx
.pc
| TARGET_PAGE_MASK
) / 2;
2267 max_insns
= MIN(max_insns
, num_insns
);
2269 /* Single stepping means just that. */
2270 if (ctx
.singlestep_enabled
|| singlestep
) {
2277 #ifdef CONFIG_USER_ONLY
2278 if (ctx
.tbflags
& GUSA_MASK
) {
2279 num_insns
= decode_gusa(&ctx
, env
, &max_insns
);
2283 while (ctx
.bstate
== BS_NONE
2284 && num_insns
< max_insns
2285 && !tcg_op_buf_full()) {
2286 tcg_gen_insn_start(ctx
.pc
, ctx
.envflags
);
2289 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
2290 /* We have hit a breakpoint - make sure PC is up-to-date */
2291 gen_save_cpu_state(&ctx
, true);
2292 gen_helper_debug(cpu_env
);
2293 ctx
.bstate
= BS_EXCP
;
2294 /* The address covered by the breakpoint must be included in
2295 [tb->pc, tb->pc + tb->size) in order to for it to be
2296 properly cleared -- thus we increment the PC here so that
2297 the logic setting tb->size below does the right thing. */
2302 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2306 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
2310 if (tb
->cflags
& CF_LAST_IO
) {
2314 if (ctx
.tbflags
& GUSA_EXCLUSIVE
) {
2315 /* Ending the region of exclusivity. Clear the bits. */
2316 ctx
.envflags
&= ~GUSA_MASK
;
2319 if (cs
->singlestep_enabled
) {
2320 gen_save_cpu_state(&ctx
, true);
2321 gen_helper_debug(cpu_env
);
2323 switch (ctx
.bstate
) {
2325 gen_save_cpu_state(&ctx
, true);
2329 gen_save_cpu_state(&ctx
, false);
2330 gen_goto_tb(&ctx
, 0, ctx
.pc
);
2340 gen_tb_end(tb
, num_insns
);
2342 tb
->size
= ctx
.pc
- pc_start
;
2343 tb
->icount
= num_insns
;
2346 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
2347 && qemu_log_in_addr_range(pc_start
)) {
2349 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2350 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 0);
2357 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
,
2361 env
->flags
= data
[1];
2362 /* Theoretically delayed_pc should also be restored. In practice the
2363 branch instruction is re-executed after exception, so the delayed
2364 branch target will be recomputed. */