4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 typedef struct DisasContext
{
37 struct TranslationBlock
*tb
;
44 int singlestep_enabled
;
49 #if defined(CONFIG_USER_ONLY)
50 #define IS_USER(ctx) 1
52 #define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
56 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
59 BS_STOP
= 1, /* We want to stop translation for any reason */
60 BS_BRANCH
= 2, /* We reached a branch condition */
61 BS_EXCP
= 3, /* We reached an exception condition */
64 /* global register indexes */
65 static TCGv_env cpu_env
;
66 static TCGv cpu_gregs
[24];
67 static TCGv cpu_sr
, cpu_sr_m
, cpu_sr_q
, cpu_sr_t
;
68 static TCGv cpu_pc
, cpu_ssr
, cpu_spc
, cpu_gbr
;
69 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
70 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
71 static TCGv cpu_fregs
[32];
73 /* internal register indexes */
74 static TCGv cpu_flags
, cpu_delayed_pc
;
76 #include "exec/gen-icount.h"
78 void sh4_translate_init(void)
81 static int done_init
= 0;
82 static const char * const gregnames
[24] = {
83 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
84 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
85 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
86 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
87 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
89 static const char * const fregnames
[32] = {
90 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
91 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
92 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
93 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
94 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
95 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
96 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
97 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
103 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
105 for (i
= 0; i
< 24; i
++)
106 cpu_gregs
[i
] = tcg_global_mem_new_i32(cpu_env
,
107 offsetof(CPUSH4State
, gregs
[i
]),
110 cpu_pc
= tcg_global_mem_new_i32(cpu_env
,
111 offsetof(CPUSH4State
, pc
), "PC");
112 cpu_sr
= tcg_global_mem_new_i32(cpu_env
,
113 offsetof(CPUSH4State
, sr
), "SR");
114 cpu_sr_m
= tcg_global_mem_new_i32(cpu_env
,
115 offsetof(CPUSH4State
, sr_m
), "SR_M");
116 cpu_sr_q
= tcg_global_mem_new_i32(cpu_env
,
117 offsetof(CPUSH4State
, sr_q
), "SR_Q");
118 cpu_sr_t
= tcg_global_mem_new_i32(cpu_env
,
119 offsetof(CPUSH4State
, sr_t
), "SR_T");
120 cpu_ssr
= tcg_global_mem_new_i32(cpu_env
,
121 offsetof(CPUSH4State
, ssr
), "SSR");
122 cpu_spc
= tcg_global_mem_new_i32(cpu_env
,
123 offsetof(CPUSH4State
, spc
), "SPC");
124 cpu_gbr
= tcg_global_mem_new_i32(cpu_env
,
125 offsetof(CPUSH4State
, gbr
), "GBR");
126 cpu_vbr
= tcg_global_mem_new_i32(cpu_env
,
127 offsetof(CPUSH4State
, vbr
), "VBR");
128 cpu_sgr
= tcg_global_mem_new_i32(cpu_env
,
129 offsetof(CPUSH4State
, sgr
), "SGR");
130 cpu_dbr
= tcg_global_mem_new_i32(cpu_env
,
131 offsetof(CPUSH4State
, dbr
), "DBR");
132 cpu_mach
= tcg_global_mem_new_i32(cpu_env
,
133 offsetof(CPUSH4State
, mach
), "MACH");
134 cpu_macl
= tcg_global_mem_new_i32(cpu_env
,
135 offsetof(CPUSH4State
, macl
), "MACL");
136 cpu_pr
= tcg_global_mem_new_i32(cpu_env
,
137 offsetof(CPUSH4State
, pr
), "PR");
138 cpu_fpscr
= tcg_global_mem_new_i32(cpu_env
,
139 offsetof(CPUSH4State
, fpscr
), "FPSCR");
140 cpu_fpul
= tcg_global_mem_new_i32(cpu_env
,
141 offsetof(CPUSH4State
, fpul
), "FPUL");
143 cpu_flags
= tcg_global_mem_new_i32(cpu_env
,
144 offsetof(CPUSH4State
, flags
), "_flags_");
145 cpu_delayed_pc
= tcg_global_mem_new_i32(cpu_env
,
146 offsetof(CPUSH4State
, delayed_pc
),
148 cpu_ldst
= tcg_global_mem_new_i32(cpu_env
,
149 offsetof(CPUSH4State
, ldst
), "_ldst_");
151 for (i
= 0; i
< 32; i
++)
152 cpu_fregs
[i
] = tcg_global_mem_new_i32(cpu_env
,
153 offsetof(CPUSH4State
, fregs
[i
]),
159 void superh_cpu_dump_state(CPUState
*cs
, FILE *f
,
160 fprintf_function cpu_fprintf
, int flags
)
162 SuperHCPU
*cpu
= SUPERH_CPU(cs
);
163 CPUSH4State
*env
= &cpu
->env
;
165 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166 env
->pc
, cpu_read_sr(env
), env
->pr
, env
->fpscr
);
167 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
169 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
171 for (i
= 0; i
< 24; i
+= 4) {
172 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
174 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
176 if (env
->flags
& DELAY_SLOT
) {
177 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
179 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
180 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
185 static void gen_read_sr(TCGv dst
)
187 TCGv t0
= tcg_temp_new();
188 tcg_gen_shli_i32(t0
, cpu_sr_q
, SR_Q
);
189 tcg_gen_or_i32(dst
, dst
, t0
);
190 tcg_gen_shli_i32(t0
, cpu_sr_m
, SR_M
);
191 tcg_gen_or_i32(dst
, dst
, t0
);
192 tcg_gen_shli_i32(t0
, cpu_sr_t
, SR_T
);
193 tcg_gen_or_i32(dst
, cpu_sr
, t0
);
194 tcg_temp_free_i32(t0
);
197 static void gen_write_sr(TCGv src
)
199 tcg_gen_andi_i32(cpu_sr
, src
,
200 ~((1u << SR_Q
) | (1u << SR_M
) | (1u << SR_T
)));
201 tcg_gen_shri_i32(cpu_sr_q
, src
, SR_Q
);
202 tcg_gen_andi_i32(cpu_sr_q
, cpu_sr_q
, 1);
203 tcg_gen_shri_i32(cpu_sr_m
, src
, SR_M
);
204 tcg_gen_andi_i32(cpu_sr_m
, cpu_sr_m
, 1);
205 tcg_gen_shri_i32(cpu_sr_t
, src
, SR_T
);
206 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
209 static inline bool use_goto_tb(DisasContext
*ctx
, target_ulong dest
)
211 if (unlikely(ctx
->singlestep_enabled
)) {
215 #ifndef CONFIG_USER_ONLY
216 return (ctx
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
222 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
224 if (use_goto_tb(ctx
, dest
)) {
225 /* Use a direct jump if in same page and singlestep not enabled */
227 tcg_gen_movi_i32(cpu_pc
, dest
);
228 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ n
);
230 tcg_gen_movi_i32(cpu_pc
, dest
);
231 if (ctx
->singlestep_enabled
)
232 gen_helper_debug(cpu_env
);
237 static void gen_jump(DisasContext
* ctx
)
239 if (ctx
->delayed_pc
== (uint32_t) - 1) {
240 /* Target is not statically known, it comes necessarily from a
241 delayed jump as immediate jump are conditinal jumps */
242 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
243 if (ctx
->singlestep_enabled
)
244 gen_helper_debug(cpu_env
);
247 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
251 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
253 TCGLabel
*label
= gen_new_label();
254 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
255 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
: TCG_COND_NE
, cpu_sr_t
, 0, label
);
256 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
257 gen_set_label(label
);
260 /* Immediate conditional jump (bt or bf) */
261 static void gen_conditional_jump(DisasContext
* ctx
,
262 target_ulong ift
, target_ulong ifnott
)
264 TCGLabel
*l1
= gen_new_label();
265 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_sr_t
, 0, l1
);
266 gen_goto_tb(ctx
, 0, ifnott
);
268 gen_goto_tb(ctx
, 1, ift
);
271 /* Delayed conditional jump (bt or bf) */
272 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
277 l1
= gen_new_label();
279 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
280 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
281 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
283 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
287 static inline void gen_store_flags(uint32_t flags
)
289 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
290 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
293 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
295 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
298 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
300 TCGv_i32 tmp
= tcg_temp_new_i32();
301 tcg_gen_extrl_i64_i32(tmp
, t
);
302 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
303 tcg_gen_shri_i64(t
, t
, 32);
304 tcg_gen_extrl_i64_i32(tmp
, t
);
305 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
306 tcg_temp_free_i32(tmp
);
309 #define B3_0 (ctx->opcode & 0xf)
310 #define B6_4 ((ctx->opcode >> 4) & 0x7)
311 #define B7_4 ((ctx->opcode >> 4) & 0xf)
312 #define B7_0 (ctx->opcode & 0xff)
313 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
314 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
315 (ctx->opcode & 0xfff))
316 #define B11_8 ((ctx->opcode >> 8) & 0xf)
317 #define B15_12 ((ctx->opcode >> 12) & 0xf)
319 #define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
320 && (ctx->flags & (1u << SR_RB))\
321 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
323 #define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
324 || !(ctx->flags & (1u << SR_RB)))\
325 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
327 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
328 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
329 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
330 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
332 #define CHECK_NOT_DELAY_SLOT \
333 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
335 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
336 gen_helper_raise_slot_illegal_instruction(cpu_env); \
337 ctx->bstate = BS_BRANCH; \
341 #define CHECK_PRIVILEGED \
342 if (IS_USER(ctx)) { \
343 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
344 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
345 gen_helper_raise_slot_illegal_instruction(cpu_env); \
347 gen_helper_raise_illegal_instruction(cpu_env); \
349 ctx->bstate = BS_BRANCH; \
353 #define CHECK_FPU_ENABLED \
354 if (ctx->flags & (1u << SR_FD)) { \
355 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
356 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
357 gen_helper_raise_slot_fpu_disable(cpu_env); \
359 gen_helper_raise_fpu_disable(cpu_env); \
361 ctx->bstate = BS_BRANCH; \
365 static void _decode_opc(DisasContext
* ctx
)
367 /* This code tries to make movcal emulation sufficiently
368 accurate for Linux purposes. This instruction writes
369 memory, and prior to that, always allocates a cache line.
370 It is used in two contexts:
371 - in memcpy, where data is copied in blocks, the first write
372 of to a block uses movca.l for performance.
373 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
374 to flush the cache. Here, the data written by movcal.l is never
375 written to memory, and the data written is just bogus.
377 To simulate this, we simulate movcal.l, we store the value to memory,
378 but we also remember the previous content. If we see ocbi, we check
379 if movcal.l for that address was done previously. If so, the write should
380 not have hit the memory, so we restore the previous content.
381 When we see an instruction that is neither movca.l
382 nor ocbi, the previous content is discarded.
384 To optimize, we only try to flush stores when we're at the start of
385 TB, or if we already saw movca.l in this TB and did not flush stores
389 int opcode
= ctx
->opcode
& 0xf0ff;
390 if (opcode
!= 0x0093 /* ocbi */
391 && opcode
!= 0x00c3 /* movca.l */)
393 gen_helper_discard_movcal_backup(cpu_env
);
399 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
402 switch (ctx
->opcode
) {
403 case 0x0019: /* div0u */
404 tcg_gen_movi_i32(cpu_sr_m
, 0);
405 tcg_gen_movi_i32(cpu_sr_q
, 0);
406 tcg_gen_movi_i32(cpu_sr_t
, 0);
408 case 0x000b: /* rts */
410 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
411 ctx
->flags
|= DELAY_SLOT
;
412 ctx
->delayed_pc
= (uint32_t) - 1;
414 case 0x0028: /* clrmac */
415 tcg_gen_movi_i32(cpu_mach
, 0);
416 tcg_gen_movi_i32(cpu_macl
, 0);
418 case 0x0048: /* clrs */
419 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(1u << SR_S
));
421 case 0x0008: /* clrt */
422 tcg_gen_movi_i32(cpu_sr_t
, 0);
424 case 0x0038: /* ldtlb */
426 gen_helper_ldtlb(cpu_env
);
428 case 0x002b: /* rte */
431 gen_write_sr(cpu_ssr
);
432 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
433 ctx
->flags
|= DELAY_SLOT
;
434 ctx
->delayed_pc
= (uint32_t) - 1;
436 case 0x0058: /* sets */
437 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, (1u << SR_S
));
439 case 0x0018: /* sett */
440 tcg_gen_movi_i32(cpu_sr_t
, 1);
442 case 0xfbfd: /* frchg */
443 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
444 ctx
->bstate
= BS_STOP
;
446 case 0xf3fd: /* fschg */
447 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
448 ctx
->bstate
= BS_STOP
;
450 case 0x0009: /* nop */
452 case 0x001b: /* sleep */
454 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
455 gen_helper_sleep(cpu_env
);
459 switch (ctx
->opcode
& 0xf000) {
460 case 0x1000: /* mov.l Rm,@(disp,Rn) */
462 TCGv addr
= tcg_temp_new();
463 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
464 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
468 case 0x5000: /* mov.l @(disp,Rm),Rn */
470 TCGv addr
= tcg_temp_new();
471 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
472 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
476 case 0xe000: /* mov #imm,Rn */
477 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
479 case 0x9000: /* mov.w @(disp,PC),Rn */
481 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
482 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
486 case 0xd000: /* mov.l @(disp,PC),Rn */
488 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
489 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
493 case 0x7000: /* add #imm,Rn */
494 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
496 case 0xa000: /* bra disp */
498 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
499 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
500 ctx
->flags
|= DELAY_SLOT
;
502 case 0xb000: /* bsr disp */
504 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
505 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
506 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
507 ctx
->flags
|= DELAY_SLOT
;
511 switch (ctx
->opcode
& 0xf00f) {
512 case 0x6003: /* mov Rm,Rn */
513 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
515 case 0x2000: /* mov.b Rm,@Rn */
516 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_UB
);
518 case 0x2001: /* mov.w Rm,@Rn */
519 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUW
);
521 case 0x2002: /* mov.l Rm,@Rn */
522 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
524 case 0x6000: /* mov.b @Rm,Rn */
525 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
527 case 0x6001: /* mov.w @Rm,Rn */
528 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
530 case 0x6002: /* mov.l @Rm,Rn */
531 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
533 case 0x2004: /* mov.b Rm,@-Rn */
535 TCGv addr
= tcg_temp_new();
536 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
537 /* might cause re-execution */
538 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
539 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
543 case 0x2005: /* mov.w Rm,@-Rn */
545 TCGv addr
= tcg_temp_new();
546 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
547 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
548 tcg_gen_mov_i32(REG(B11_8
), addr
);
552 case 0x2006: /* mov.l Rm,@-Rn */
554 TCGv addr
= tcg_temp_new();
555 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
556 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
557 tcg_gen_mov_i32(REG(B11_8
), addr
);
560 case 0x6004: /* mov.b @Rm+,Rn */
561 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
563 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
565 case 0x6005: /* mov.w @Rm+,Rn */
566 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
568 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
570 case 0x6006: /* mov.l @Rm+,Rn */
571 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
573 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
575 case 0x0004: /* mov.b Rm,@(R0,Rn) */
577 TCGv addr
= tcg_temp_new();
578 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
579 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
583 case 0x0005: /* mov.w Rm,@(R0,Rn) */
585 TCGv addr
= tcg_temp_new();
586 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
587 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
591 case 0x0006: /* mov.l Rm,@(R0,Rn) */
593 TCGv addr
= tcg_temp_new();
594 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
595 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
599 case 0x000c: /* mov.b @(R0,Rm),Rn */
601 TCGv addr
= tcg_temp_new();
602 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
603 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_SB
);
607 case 0x000d: /* mov.w @(R0,Rm),Rn */
609 TCGv addr
= tcg_temp_new();
610 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
611 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
615 case 0x000e: /* mov.l @(R0,Rm),Rn */
617 TCGv addr
= tcg_temp_new();
618 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
619 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
623 case 0x6008: /* swap.b Rm,Rn */
625 TCGv low
= tcg_temp_new();;
626 tcg_gen_ext16u_i32(low
, REG(B7_4
));
627 tcg_gen_bswap16_i32(low
, low
);
628 tcg_gen_deposit_i32(REG(B11_8
), REG(B7_4
), low
, 0, 16);
632 case 0x6009: /* swap.w Rm,Rn */
633 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
635 case 0x200d: /* xtrct Rm,Rn */
638 high
= tcg_temp_new();
639 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
640 low
= tcg_temp_new();
641 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
642 tcg_gen_or_i32(REG(B11_8
), high
, low
);
647 case 0x300c: /* add Rm,Rn */
648 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
650 case 0x300e: /* addc Rm,Rn */
653 t0
= tcg_const_tl(0);
655 tcg_gen_add2_i32(t1
, cpu_sr_t
, cpu_sr_t
, t0
, REG(B7_4
), t0
);
656 tcg_gen_add2_i32(REG(B11_8
), cpu_sr_t
,
657 REG(B11_8
), t0
, t1
, cpu_sr_t
);
662 case 0x300f: /* addv Rm,Rn */
666 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
668 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
670 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
671 tcg_gen_andc_i32(cpu_sr_t
, t1
, t2
);
673 tcg_gen_shri_i32(cpu_sr_t
, cpu_sr_t
, 31);
675 tcg_gen_mov_i32(REG(B7_4
), t0
);
679 case 0x2009: /* and Rm,Rn */
680 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
682 case 0x3000: /* cmp/eq Rm,Rn */
683 tcg_gen_setcond_i32(TCG_COND_EQ
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
685 case 0x3003: /* cmp/ge Rm,Rn */
686 tcg_gen_setcond_i32(TCG_COND_GE
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
688 case 0x3007: /* cmp/gt Rm,Rn */
689 tcg_gen_setcond_i32(TCG_COND_GT
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
691 case 0x3006: /* cmp/hi Rm,Rn */
692 tcg_gen_setcond_i32(TCG_COND_GTU
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
694 case 0x3002: /* cmp/hs Rm,Rn */
695 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
697 case 0x200c: /* cmp/str Rm,Rn */
699 TCGv cmp1
= tcg_temp_new();
700 TCGv cmp2
= tcg_temp_new();
701 tcg_gen_xor_i32(cmp2
, REG(B7_4
), REG(B11_8
));
702 tcg_gen_subi_i32(cmp1
, cmp2
, 0x01010101);
703 tcg_gen_andc_i32(cmp1
, cmp1
, cmp2
);
704 tcg_gen_andi_i32(cmp1
, cmp1
, 0x80808080);
705 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_sr_t
, cmp1
, 0);
710 case 0x2007: /* div0s Rm,Rn */
711 tcg_gen_shri_i32(cpu_sr_q
, REG(B11_8
), 31); /* SR_Q */
712 tcg_gen_shri_i32(cpu_sr_m
, REG(B7_4
), 31); /* SR_M */
713 tcg_gen_xor_i32(cpu_sr_t
, cpu_sr_q
, cpu_sr_m
); /* SR_T */
715 case 0x3004: /* div1 Rm,Rn */
717 TCGv t0
= tcg_temp_new();
718 TCGv t1
= tcg_temp_new();
719 TCGv t2
= tcg_temp_new();
720 TCGv zero
= tcg_const_i32(0);
722 /* shift left arg1, saving the bit being pushed out and inserting
724 tcg_gen_shri_i32(t0
, REG(B11_8
), 31);
725 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
726 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), cpu_sr_t
);
728 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
729 using 64-bit temps, we compute arg0's high part from q ^ m, so
730 that it is 0x00000000 when adding the value or 0xffffffff when
732 tcg_gen_xor_i32(t1
, cpu_sr_q
, cpu_sr_m
);
733 tcg_gen_subi_i32(t1
, t1
, 1);
734 tcg_gen_neg_i32(t2
, REG(B7_4
));
735 tcg_gen_movcond_i32(TCG_COND_EQ
, t2
, t1
, zero
, REG(B7_4
), t2
);
736 tcg_gen_add2_i32(REG(B11_8
), t1
, REG(B11_8
), zero
, t2
, t1
);
738 /* compute T and Q depending on carry */
739 tcg_gen_andi_i32(t1
, t1
, 1);
740 tcg_gen_xor_i32(t1
, t1
, t0
);
741 tcg_gen_xori_i32(cpu_sr_t
, t1
, 1);
742 tcg_gen_xor_i32(cpu_sr_q
, cpu_sr_m
, t1
);
750 case 0x300d: /* dmuls.l Rm,Rn */
751 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
753 case 0x3005: /* dmulu.l Rm,Rn */
754 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
756 case 0x600e: /* exts.b Rm,Rn */
757 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
759 case 0x600f: /* exts.w Rm,Rn */
760 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
762 case 0x600c: /* extu.b Rm,Rn */
763 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
765 case 0x600d: /* extu.w Rm,Rn */
766 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
768 case 0x000f: /* mac.l @Rm+,@Rn+ */
771 arg0
= tcg_temp_new();
772 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
773 arg1
= tcg_temp_new();
774 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
775 gen_helper_macl(cpu_env
, arg0
, arg1
);
778 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
779 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
782 case 0x400f: /* mac.w @Rm+,@Rn+ */
785 arg0
= tcg_temp_new();
786 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
787 arg1
= tcg_temp_new();
788 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
789 gen_helper_macw(cpu_env
, arg0
, arg1
);
792 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
793 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
796 case 0x0007: /* mul.l Rm,Rn */
797 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
799 case 0x200f: /* muls.w Rm,Rn */
802 arg0
= tcg_temp_new();
803 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
804 arg1
= tcg_temp_new();
805 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
806 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
811 case 0x200e: /* mulu.w Rm,Rn */
814 arg0
= tcg_temp_new();
815 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
816 arg1
= tcg_temp_new();
817 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
818 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
823 case 0x600b: /* neg Rm,Rn */
824 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
826 case 0x600a: /* negc Rm,Rn */
828 TCGv t0
= tcg_const_i32(0);
829 tcg_gen_add2_i32(REG(B11_8
), cpu_sr_t
,
830 REG(B7_4
), t0
, cpu_sr_t
, t0
);
831 tcg_gen_sub2_i32(REG(B11_8
), cpu_sr_t
,
832 t0
, t0
, REG(B11_8
), cpu_sr_t
);
833 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
837 case 0x6007: /* not Rm,Rn */
838 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
840 case 0x200b: /* or Rm,Rn */
841 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
843 case 0x400c: /* shad Rm,Rn */
845 TCGv t0
= tcg_temp_new();
846 TCGv t1
= tcg_temp_new();
847 TCGv t2
= tcg_temp_new();
849 tcg_gen_andi_i32(t0
, REG(B7_4
), 0x1f);
851 /* positive case: shift to the left */
852 tcg_gen_shl_i32(t1
, REG(B11_8
), t0
);
854 /* negative case: shift to the right in two steps to
855 correctly handle the -32 case */
856 tcg_gen_xori_i32(t0
, t0
, 0x1f);
857 tcg_gen_sar_i32(t2
, REG(B11_8
), t0
);
858 tcg_gen_sari_i32(t2
, t2
, 1);
860 /* select between the two cases */
861 tcg_gen_movi_i32(t0
, 0);
862 tcg_gen_movcond_i32(TCG_COND_GE
, REG(B11_8
), REG(B7_4
), t0
, t1
, t2
);
869 case 0x400d: /* shld Rm,Rn */
871 TCGv t0
= tcg_temp_new();
872 TCGv t1
= tcg_temp_new();
873 TCGv t2
= tcg_temp_new();
875 tcg_gen_andi_i32(t0
, REG(B7_4
), 0x1f);
877 /* positive case: shift to the left */
878 tcg_gen_shl_i32(t1
, REG(B11_8
), t0
);
880 /* negative case: shift to the right in two steps to
881 correctly handle the -32 case */
882 tcg_gen_xori_i32(t0
, t0
, 0x1f);
883 tcg_gen_shr_i32(t2
, REG(B11_8
), t0
);
884 tcg_gen_shri_i32(t2
, t2
, 1);
886 /* select between the two cases */
887 tcg_gen_movi_i32(t0
, 0);
888 tcg_gen_movcond_i32(TCG_COND_GE
, REG(B11_8
), REG(B7_4
), t0
, t1
, t2
);
895 case 0x3008: /* sub Rm,Rn */
896 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
898 case 0x300a: /* subc Rm,Rn */
901 t0
= tcg_const_tl(0);
903 tcg_gen_add2_i32(t1
, cpu_sr_t
, cpu_sr_t
, t0
, REG(B7_4
), t0
);
904 tcg_gen_sub2_i32(REG(B11_8
), cpu_sr_t
,
905 REG(B11_8
), t0
, t1
, cpu_sr_t
);
906 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
911 case 0x300b: /* subv Rm,Rn */
915 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
917 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
919 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
920 tcg_gen_and_i32(t1
, t1
, t2
);
922 tcg_gen_shri_i32(cpu_sr_t
, t1
, 31);
924 tcg_gen_mov_i32(REG(B11_8
), t0
);
928 case 0x2008: /* tst Rm,Rn */
930 TCGv val
= tcg_temp_new();
931 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
932 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
936 case 0x200a: /* xor Rm,Rn */
937 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
939 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
941 if (ctx
->flags
& FPSCR_SZ
) {
942 TCGv_i64 fp
= tcg_temp_new_i64();
943 gen_load_fpr64(fp
, XREG(B7_4
));
944 gen_store_fpr64(fp
, XREG(B11_8
));
945 tcg_temp_free_i64(fp
);
947 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
950 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
952 if (ctx
->flags
& FPSCR_SZ
) {
953 TCGv addr_hi
= tcg_temp_new();
955 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
956 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], REG(B11_8
),
957 ctx
->memidx
, MO_TEUL
);
958 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr_hi
,
959 ctx
->memidx
, MO_TEUL
);
960 tcg_temp_free(addr_hi
);
962 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
),
963 ctx
->memidx
, MO_TEUL
);
966 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
968 if (ctx
->flags
& FPSCR_SZ
) {
969 TCGv addr_hi
= tcg_temp_new();
970 int fr
= XREG(B11_8
);
971 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
972 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
973 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
974 tcg_temp_free(addr_hi
);
976 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
977 ctx
->memidx
, MO_TEUL
);
980 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
982 if (ctx
->flags
& FPSCR_SZ
) {
983 TCGv addr_hi
= tcg_temp_new();
984 int fr
= XREG(B11_8
);
985 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
986 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
987 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
988 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
989 tcg_temp_free(addr_hi
);
991 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
992 ctx
->memidx
, MO_TEUL
);
993 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
996 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
998 TCGv addr
= tcg_temp_new_i32();
999 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1000 if (ctx
->flags
& FPSCR_SZ
) {
1001 int fr
= XREG(B7_4
);
1002 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
, MO_TEUL
);
1003 tcg_gen_subi_i32(addr
, addr
, 4);
1004 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], addr
, ctx
->memidx
, MO_TEUL
);
1006 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
1007 ctx
->memidx
, MO_TEUL
);
1009 tcg_gen_mov_i32(REG(B11_8
), addr
);
1010 tcg_temp_free(addr
);
1012 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1015 TCGv addr
= tcg_temp_new_i32();
1016 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1017 if (ctx
->flags
& FPSCR_SZ
) {
1018 int fr
= XREG(B11_8
);
1019 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1020 ctx
->memidx
, MO_TEUL
);
1021 tcg_gen_addi_i32(addr
, addr
, 4);
1022 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1023 ctx
->memidx
, MO_TEUL
);
1025 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], addr
,
1026 ctx
->memidx
, MO_TEUL
);
1028 tcg_temp_free(addr
);
1031 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1034 TCGv addr
= tcg_temp_new();
1035 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1036 if (ctx
->flags
& FPSCR_SZ
) {
1037 int fr
= XREG(B7_4
);
1038 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1039 ctx
->memidx
, MO_TEUL
);
1040 tcg_gen_addi_i32(addr
, addr
, 4);
1041 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1042 ctx
->memidx
, MO_TEUL
);
1044 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
1045 ctx
->memidx
, MO_TEUL
);
1047 tcg_temp_free(addr
);
1050 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1051 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1052 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1054 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1055 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1058 if (ctx
->flags
& FPSCR_PR
) {
1061 if (ctx
->opcode
& 0x0110)
1062 break; /* illegal instruction */
1063 fp0
= tcg_temp_new_i64();
1064 fp1
= tcg_temp_new_i64();
1065 gen_load_fpr64(fp0
, DREG(B11_8
));
1066 gen_load_fpr64(fp1
, DREG(B7_4
));
1067 switch (ctx
->opcode
& 0xf00f) {
1068 case 0xf000: /* fadd Rm,Rn */
1069 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1071 case 0xf001: /* fsub Rm,Rn */
1072 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1074 case 0xf002: /* fmul Rm,Rn */
1075 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1077 case 0xf003: /* fdiv Rm,Rn */
1078 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1080 case 0xf004: /* fcmp/eq Rm,Rn */
1081 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1083 case 0xf005: /* fcmp/gt Rm,Rn */
1084 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1087 gen_store_fpr64(fp0
, DREG(B11_8
));
1088 tcg_temp_free_i64(fp0
);
1089 tcg_temp_free_i64(fp1
);
1091 switch (ctx
->opcode
& 0xf00f) {
1092 case 0xf000: /* fadd Rm,Rn */
1093 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1094 cpu_fregs
[FREG(B11_8
)],
1095 cpu_fregs
[FREG(B7_4
)]);
1097 case 0xf001: /* fsub Rm,Rn */
1098 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1099 cpu_fregs
[FREG(B11_8
)],
1100 cpu_fregs
[FREG(B7_4
)]);
1102 case 0xf002: /* fmul Rm,Rn */
1103 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1104 cpu_fregs
[FREG(B11_8
)],
1105 cpu_fregs
[FREG(B7_4
)]);
1107 case 0xf003: /* fdiv Rm,Rn */
1108 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1109 cpu_fregs
[FREG(B11_8
)],
1110 cpu_fregs
[FREG(B7_4
)]);
1112 case 0xf004: /* fcmp/eq Rm,Rn */
1113 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1114 cpu_fregs
[FREG(B7_4
)]);
1116 case 0xf005: /* fcmp/gt Rm,Rn */
1117 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1118 cpu_fregs
[FREG(B7_4
)]);
1124 case 0xf00e: /* fmac FR0,RM,Rn */
1127 if (ctx
->flags
& FPSCR_PR
) {
1128 break; /* illegal instruction */
1130 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1131 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1132 cpu_fregs
[FREG(B11_8
)]);
1138 switch (ctx
->opcode
& 0xff00) {
1139 case 0xc900: /* and #imm,R0 */
1140 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1142 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1145 addr
= tcg_temp_new();
1146 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1147 val
= tcg_temp_new();
1148 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1149 tcg_gen_andi_i32(val
, val
, B7_0
);
1150 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1152 tcg_temp_free(addr
);
1155 case 0x8b00: /* bf label */
1156 CHECK_NOT_DELAY_SLOT
1157 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1158 ctx
->pc
+ 4 + B7_0s
* 2);
1159 ctx
->bstate
= BS_BRANCH
;
1161 case 0x8f00: /* bf/s label */
1162 CHECK_NOT_DELAY_SLOT
1163 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1164 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1166 case 0x8900: /* bt label */
1167 CHECK_NOT_DELAY_SLOT
1168 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1170 ctx
->bstate
= BS_BRANCH
;
1172 case 0x8d00: /* bt/s label */
1173 CHECK_NOT_DELAY_SLOT
1174 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1175 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1177 case 0x8800: /* cmp/eq #imm,R0 */
1178 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, REG(0), B7_0s
);
1180 case 0xc400: /* mov.b @(disp,GBR),R0 */
1182 TCGv addr
= tcg_temp_new();
1183 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1184 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1185 tcg_temp_free(addr
);
1188 case 0xc500: /* mov.w @(disp,GBR),R0 */
1190 TCGv addr
= tcg_temp_new();
1191 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1192 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1193 tcg_temp_free(addr
);
1196 case 0xc600: /* mov.l @(disp,GBR),R0 */
1198 TCGv addr
= tcg_temp_new();
1199 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1200 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESL
);
1201 tcg_temp_free(addr
);
1204 case 0xc000: /* mov.b R0,@(disp,GBR) */
1206 TCGv addr
= tcg_temp_new();
1207 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1208 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1209 tcg_temp_free(addr
);
1212 case 0xc100: /* mov.w R0,@(disp,GBR) */
1214 TCGv addr
= tcg_temp_new();
1215 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1216 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1217 tcg_temp_free(addr
);
1220 case 0xc200: /* mov.l R0,@(disp,GBR) */
1222 TCGv addr
= tcg_temp_new();
1223 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1224 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUL
);
1225 tcg_temp_free(addr
);
1228 case 0x8000: /* mov.b R0,@(disp,Rn) */
1230 TCGv addr
= tcg_temp_new();
1231 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1232 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1233 tcg_temp_free(addr
);
1236 case 0x8100: /* mov.w R0,@(disp,Rn) */
1238 TCGv addr
= tcg_temp_new();
1239 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1240 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1241 tcg_temp_free(addr
);
1244 case 0x8400: /* mov.b @(disp,Rn),R0 */
1246 TCGv addr
= tcg_temp_new();
1247 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1248 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1249 tcg_temp_free(addr
);
1252 case 0x8500: /* mov.w @(disp,Rn),R0 */
1254 TCGv addr
= tcg_temp_new();
1255 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1256 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1257 tcg_temp_free(addr
);
1260 case 0xc700: /* mova @(disp,PC),R0 */
1261 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1263 case 0xcb00: /* or #imm,R0 */
1264 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1266 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1269 addr
= tcg_temp_new();
1270 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1271 val
= tcg_temp_new();
1272 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1273 tcg_gen_ori_i32(val
, val
, B7_0
);
1274 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1276 tcg_temp_free(addr
);
1279 case 0xc300: /* trapa #imm */
1282 CHECK_NOT_DELAY_SLOT
1283 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1284 imm
= tcg_const_i32(B7_0
);
1285 gen_helper_trapa(cpu_env
, imm
);
1287 ctx
->bstate
= BS_BRANCH
;
1290 case 0xc800: /* tst #imm,R0 */
1292 TCGv val
= tcg_temp_new();
1293 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1294 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1298 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1300 TCGv val
= tcg_temp_new();
1301 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1302 tcg_gen_qemu_ld_i32(val
, val
, ctx
->memidx
, MO_UB
);
1303 tcg_gen_andi_i32(val
, val
, B7_0
);
1304 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1308 case 0xca00: /* xor #imm,R0 */
1309 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1311 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1314 addr
= tcg_temp_new();
1315 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1316 val
= tcg_temp_new();
1317 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1318 tcg_gen_xori_i32(val
, val
, B7_0
);
1319 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1321 tcg_temp_free(addr
);
1326 switch (ctx
->opcode
& 0xf08f) {
1327 case 0x408e: /* ldc Rm,Rn_BANK */
1329 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1331 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1333 tcg_gen_qemu_ld_i32(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1334 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1336 case 0x0082: /* stc Rm_BANK,Rn */
1338 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1340 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1343 TCGv addr
= tcg_temp_new();
1344 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1345 tcg_gen_qemu_st_i32(ALTREG(B6_4
), addr
, ctx
->memidx
, MO_TEUL
);
1346 tcg_gen_mov_i32(REG(B11_8
), addr
);
1347 tcg_temp_free(addr
);
1352 switch (ctx
->opcode
& 0xf0ff) {
1353 case 0x0023: /* braf Rn */
1354 CHECK_NOT_DELAY_SLOT
1355 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1356 ctx
->flags
|= DELAY_SLOT
;
1357 ctx
->delayed_pc
= (uint32_t) - 1;
1359 case 0x0003: /* bsrf Rn */
1360 CHECK_NOT_DELAY_SLOT
1361 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1362 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1363 ctx
->flags
|= DELAY_SLOT
;
1364 ctx
->delayed_pc
= (uint32_t) - 1;
1366 case 0x4015: /* cmp/pl Rn */
1367 tcg_gen_setcondi_i32(TCG_COND_GT
, cpu_sr_t
, REG(B11_8
), 0);
1369 case 0x4011: /* cmp/pz Rn */
1370 tcg_gen_setcondi_i32(TCG_COND_GE
, cpu_sr_t
, REG(B11_8
), 0);
1372 case 0x4010: /* dt Rn */
1373 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1374 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, REG(B11_8
), 0);
1376 case 0x402b: /* jmp @Rn */
1377 CHECK_NOT_DELAY_SLOT
1378 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1379 ctx
->flags
|= DELAY_SLOT
;
1380 ctx
->delayed_pc
= (uint32_t) - 1;
1382 case 0x400b: /* jsr @Rn */
1383 CHECK_NOT_DELAY_SLOT
1384 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1385 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1386 ctx
->flags
|= DELAY_SLOT
;
1387 ctx
->delayed_pc
= (uint32_t) - 1;
1389 case 0x400e: /* ldc Rm,SR */
1392 TCGv val
= tcg_temp_new();
1393 tcg_gen_andi_i32(val
, REG(B11_8
), 0x700083f3);
1396 ctx
->bstate
= BS_STOP
;
1399 case 0x4007: /* ldc.l @Rm+,SR */
1402 TCGv val
= tcg_temp_new();
1403 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1404 tcg_gen_andi_i32(val
, val
, 0x700083f3);
1407 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1408 ctx
->bstate
= BS_STOP
;
1411 case 0x0002: /* stc SR,Rn */
1413 gen_read_sr(REG(B11_8
));
1415 case 0x4003: /* stc SR,@-Rn */
1418 TCGv addr
= tcg_temp_new();
1419 TCGv val
= tcg_temp_new();
1420 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1422 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1423 tcg_gen_mov_i32(REG(B11_8
), addr
);
1425 tcg_temp_free(addr
);
1428 #define LD(reg,ldnum,ldpnum,prechk) \
1431 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1435 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1436 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1438 #define ST(reg,stnum,stpnum,prechk) \
1441 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1446 TCGv addr = tcg_temp_new(); \
1447 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1448 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1449 tcg_gen_mov_i32(REG(B11_8), addr); \
1450 tcg_temp_free(addr); \
1453 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1454 LD(reg,ldnum,ldpnum,prechk) \
1455 ST(reg,stnum,stpnum,prechk)
1456 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1457 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1458 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1459 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1460 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1461 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1462 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1463 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1464 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1465 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1466 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1467 case 0x406a: /* lds Rm,FPSCR */
1469 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1470 ctx
->bstate
= BS_STOP
;
1472 case 0x4066: /* lds.l @Rm+,FPSCR */
1475 TCGv addr
= tcg_temp_new();
1476 tcg_gen_qemu_ld_i32(addr
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1477 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1478 gen_helper_ld_fpscr(cpu_env
, addr
);
1479 tcg_temp_free(addr
);
1480 ctx
->bstate
= BS_STOP
;
1483 case 0x006a: /* sts FPSCR,Rn */
1485 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1487 case 0x4062: /* sts FPSCR,@-Rn */
1491 val
= tcg_temp_new();
1492 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1493 addr
= tcg_temp_new();
1494 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1495 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1496 tcg_gen_mov_i32(REG(B11_8
), addr
);
1497 tcg_temp_free(addr
);
1501 case 0x00c3: /* movca.l R0,@Rm */
1503 TCGv val
= tcg_temp_new();
1504 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1505 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1506 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1508 ctx
->has_movcal
= 1;
1511 /* MOVUA.L @Rm,R0 (Rm) -> R0
1512 Load non-boundary-aligned data */
1513 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1516 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1517 Load non-boundary-aligned data */
1518 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1519 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1521 case 0x0029: /* movt Rn */
1522 tcg_gen_mov_i32(REG(B11_8
), cpu_sr_t
);
1527 If (T == 1) R0 -> (Rn)
1530 if (ctx
->features
& SH_FEATURE_SH4A
) {
1531 TCGLabel
*label
= gen_new_label();
1532 tcg_gen_mov_i32(cpu_sr_t
, cpu_ldst
);
1533 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1534 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1535 gen_set_label(label
);
1536 tcg_gen_movi_i32(cpu_ldst
, 0);
1544 When interrupt/exception
1547 if (ctx
->features
& SH_FEATURE_SH4A
) {
1548 tcg_gen_movi_i32(cpu_ldst
, 0);
1549 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1550 tcg_gen_movi_i32(cpu_ldst
, 1);
1554 case 0x0093: /* ocbi @Rn */
1556 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1559 case 0x00a3: /* ocbp @Rn */
1560 case 0x00b3: /* ocbwb @Rn */
1561 /* These instructions are supposed to do nothing in case of
1562 a cache miss. Given that we only partially emulate caches
1563 it is safe to simply ignore them. */
1565 case 0x0083: /* pref @Rn */
1567 case 0x00d3: /* prefi @Rn */
1568 if (ctx
->features
& SH_FEATURE_SH4A
)
1572 case 0x00e3: /* icbi @Rn */
1573 if (ctx
->features
& SH_FEATURE_SH4A
)
1577 case 0x00ab: /* synco */
1578 if (ctx
->features
& SH_FEATURE_SH4A
)
1582 case 0x4024: /* rotcl Rn */
1584 TCGv tmp
= tcg_temp_new();
1585 tcg_gen_mov_i32(tmp
, cpu_sr_t
);
1586 tcg_gen_shri_i32(cpu_sr_t
, REG(B11_8
), 31);
1587 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1588 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), tmp
);
1592 case 0x4025: /* rotcr Rn */
1594 TCGv tmp
= tcg_temp_new();
1595 tcg_gen_shli_i32(tmp
, cpu_sr_t
, 31);
1596 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1597 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1598 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), tmp
);
1602 case 0x4004: /* rotl Rn */
1603 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1604 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 0);
1606 case 0x4005: /* rotr Rn */
1607 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 0);
1608 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1610 case 0x4000: /* shll Rn */
1611 case 0x4020: /* shal Rn */
1612 tcg_gen_shri_i32(cpu_sr_t
, REG(B11_8
), 31);
1613 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1615 case 0x4021: /* shar Rn */
1616 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1617 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1619 case 0x4001: /* shlr Rn */
1620 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1621 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1623 case 0x4008: /* shll2 Rn */
1624 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1626 case 0x4018: /* shll8 Rn */
1627 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1629 case 0x4028: /* shll16 Rn */
1630 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1632 case 0x4009: /* shlr2 Rn */
1633 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1635 case 0x4019: /* shlr8 Rn */
1636 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1638 case 0x4029: /* shlr16 Rn */
1639 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1641 case 0x401b: /* tas.b @Rn */
1644 addr
= tcg_temp_local_new();
1645 tcg_gen_mov_i32(addr
, REG(B11_8
));
1646 val
= tcg_temp_local_new();
1647 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1648 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1649 tcg_gen_ori_i32(val
, val
, 0x80);
1650 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1652 tcg_temp_free(addr
);
1655 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1657 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1659 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1661 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1663 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1665 if (ctx
->flags
& FPSCR_PR
) {
1667 if (ctx
->opcode
& 0x0100)
1668 break; /* illegal instruction */
1669 fp
= tcg_temp_new_i64();
1670 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1671 gen_store_fpr64(fp
, DREG(B11_8
));
1672 tcg_temp_free_i64(fp
);
1675 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1678 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1680 if (ctx
->flags
& FPSCR_PR
) {
1682 if (ctx
->opcode
& 0x0100)
1683 break; /* illegal instruction */
1684 fp
= tcg_temp_new_i64();
1685 gen_load_fpr64(fp
, DREG(B11_8
));
1686 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1687 tcg_temp_free_i64(fp
);
1690 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1693 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1696 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1699 case 0xf05d: /* fabs FRn/DRn */
1701 if (ctx
->flags
& FPSCR_PR
) {
1702 if (ctx
->opcode
& 0x0100)
1703 break; /* illegal instruction */
1704 TCGv_i64 fp
= tcg_temp_new_i64();
1705 gen_load_fpr64(fp
, DREG(B11_8
));
1706 gen_helper_fabs_DT(fp
, fp
);
1707 gen_store_fpr64(fp
, DREG(B11_8
));
1708 tcg_temp_free_i64(fp
);
1710 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1713 case 0xf06d: /* fsqrt FRn */
1715 if (ctx
->flags
& FPSCR_PR
) {
1716 if (ctx
->opcode
& 0x0100)
1717 break; /* illegal instruction */
1718 TCGv_i64 fp
= tcg_temp_new_i64();
1719 gen_load_fpr64(fp
, DREG(B11_8
));
1720 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1721 gen_store_fpr64(fp
, DREG(B11_8
));
1722 tcg_temp_free_i64(fp
);
1724 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1725 cpu_fregs
[FREG(B11_8
)]);
1728 case 0xf07d: /* fsrra FRn */
1731 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1733 if (!(ctx
->flags
& FPSCR_PR
)) {
1734 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1737 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1739 if (!(ctx
->flags
& FPSCR_PR
)) {
1740 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1743 case 0xf0ad: /* fcnvsd FPUL,DRn */
1746 TCGv_i64 fp
= tcg_temp_new_i64();
1747 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1748 gen_store_fpr64(fp
, DREG(B11_8
));
1749 tcg_temp_free_i64(fp
);
1752 case 0xf0bd: /* fcnvds DRn,FPUL */
1755 TCGv_i64 fp
= tcg_temp_new_i64();
1756 gen_load_fpr64(fp
, DREG(B11_8
));
1757 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1758 tcg_temp_free_i64(fp
);
1761 case 0xf0ed: /* fipr FVm,FVn */
1763 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1765 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1766 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1767 gen_helper_fipr(cpu_env
, m
, n
);
1773 case 0xf0fd: /* ftrv XMTRX,FVn */
1775 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1776 (ctx
->flags
& FPSCR_PR
) == 0) {
1778 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1779 gen_helper_ftrv(cpu_env
, n
);
1786 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1787 ctx
->opcode
, ctx
->pc
);
1790 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1791 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1792 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1794 gen_helper_raise_illegal_instruction(cpu_env
);
1796 ctx
->bstate
= BS_BRANCH
;
1799 static void decode_opc(DisasContext
* ctx
)
1801 uint32_t old_flags
= ctx
->flags
;
1805 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1806 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1809 /* go out of the delay slot */
1810 uint32_t new_flags
= ctx
->flags
;
1811 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1812 gen_store_flags(new_flags
);
1815 ctx
->bstate
= BS_BRANCH
;
1816 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1817 gen_delayed_conditional_jump(ctx
);
1818 } else if (old_flags
& DELAY_SLOT
) {
1824 /* go into a delay slot */
1825 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1826 gen_store_flags(ctx
->flags
);
1829 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1831 SuperHCPU
*cpu
= sh_env_get_cpu(env
);
1832 CPUState
*cs
= CPU(cpu
);
1834 target_ulong pc_start
;
1840 ctx
.flags
= (uint32_t)tb
->flags
;
1841 ctx
.bstate
= BS_NONE
;
1842 ctx
.memidx
= (ctx
.flags
& (1u << SR_MD
)) == 0 ? 1 : 0;
1843 /* We don't know if the delayed pc came from a dynamic or static branch,
1844 so assume it is a dynamic branch. */
1845 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1847 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1848 ctx
.features
= env
->features
;
1849 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1852 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1853 if (max_insns
== 0) {
1854 max_insns
= CF_COUNT_MASK
;
1856 if (max_insns
> TCG_MAX_INSNS
) {
1857 max_insns
= TCG_MAX_INSNS
;
1861 while (ctx
.bstate
== BS_NONE
&& !tcg_op_buf_full()) {
1862 tcg_gen_insn_start(ctx
.pc
, ctx
.flags
);
1865 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
1866 /* We have hit a breakpoint - make sure PC is up-to-date */
1867 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1868 gen_helper_debug(cpu_env
);
1869 ctx
.bstate
= BS_BRANCH
;
1870 /* The address covered by the breakpoint must be included in
1871 [tb->pc, tb->pc + tb->size) in order to for it to be
1872 properly cleared -- thus we increment the PC here so that
1873 the logic setting tb->size below does the right thing. */
1878 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1882 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
1885 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1887 if (cs
->singlestep_enabled
) {
1890 if (num_insns
>= max_insns
)
1895 if (tb
->cflags
& CF_LAST_IO
)
1897 if (cs
->singlestep_enabled
) {
1898 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1899 gen_helper_debug(cpu_env
);
1901 switch (ctx
.bstate
) {
1903 /* gen_op_interrupt_restart(); */
1907 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1909 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1912 /* gen_op_interrupt_restart(); */
1921 gen_tb_end(tb
, num_insns
);
1923 tb
->size
= ctx
.pc
- pc_start
;
1924 tb
->icount
= num_insns
;
1927 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1928 && qemu_log_in_addr_range(pc_start
)) {
1929 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1930 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 0);
1936 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
,
1940 env
->flags
= data
[1];