4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
24 #include "disas/disas.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
34 typedef struct DisasContext
{
35 struct TranslationBlock
*tb
;
42 int singlestep_enabled
;
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
50 #define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
54 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
57 BS_STOP
= 1, /* We want to stop translation for any reason */
58 BS_BRANCH
= 2, /* We reached a branch condition */
59 BS_EXCP
= 3, /* We reached an exception condition */
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
64 static TCGv cpu_gregs
[24];
65 static TCGv cpu_sr
, cpu_sr_m
, cpu_sr_q
, cpu_sr_t
;
66 static TCGv cpu_pc
, cpu_ssr
, cpu_spc
, cpu_gbr
;
67 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
68 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
69 static TCGv cpu_fregs
[32];
71 /* internal register indexes */
72 static TCGv cpu_flags
, cpu_delayed_pc
;
74 #include "exec/gen-icount.h"
76 void sh4_translate_init(void)
79 static int done_init
= 0;
80 static const char * const gregnames
[24] = {
81 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
82 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
83 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
84 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
85 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
87 static const char * const fregnames
[32] = {
88 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
89 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
90 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
91 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
92 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
93 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
94 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
95 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
101 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
103 for (i
= 0; i
< 24; i
++)
104 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
105 offsetof(CPUSH4State
, gregs
[i
]),
108 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
109 offsetof(CPUSH4State
, pc
), "PC");
110 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUSH4State
, sr
), "SR");
112 cpu_sr_m
= tcg_global_mem_new_i32(TCG_AREG0
,
113 offsetof(CPUSH4State
, sr_m
), "SR_M");
114 cpu_sr_q
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUSH4State
, sr_q
), "SR_Q");
116 cpu_sr_t
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUSH4State
, sr_t
), "SR_T");
118 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUSH4State
, ssr
), "SSR");
120 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUSH4State
, spc
), "SPC");
122 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUSH4State
, gbr
), "GBR");
124 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUSH4State
, vbr
), "VBR");
126 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUSH4State
, sgr
), "SGR");
128 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
129 offsetof(CPUSH4State
, dbr
), "DBR");
130 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
131 offsetof(CPUSH4State
, mach
), "MACH");
132 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
133 offsetof(CPUSH4State
, macl
), "MACL");
134 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
135 offsetof(CPUSH4State
, pr
), "PR");
136 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
137 offsetof(CPUSH4State
, fpscr
), "FPSCR");
138 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, fpul
), "FPUL");
141 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
142 offsetof(CPUSH4State
, flags
), "_flags_");
143 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
144 offsetof(CPUSH4State
, delayed_pc
),
146 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
147 offsetof(CPUSH4State
, ldst
), "_ldst_");
149 for (i
= 0; i
< 32; i
++)
150 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
151 offsetof(CPUSH4State
, fregs
[i
]),
157 void superh_cpu_dump_state(CPUState
*cs
, FILE *f
,
158 fprintf_function cpu_fprintf
, int flags
)
160 SuperHCPU
*cpu
= SUPERH_CPU(cs
);
161 CPUSH4State
*env
= &cpu
->env
;
163 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
164 env
->pc
, cpu_read_sr(env
), env
->pr
, env
->fpscr
);
165 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
166 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
167 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
168 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
169 for (i
= 0; i
< 24; i
+= 4) {
170 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
171 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
172 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
174 if (env
->flags
& DELAY_SLOT
) {
175 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
177 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
178 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
183 static void gen_read_sr(TCGv dst
)
185 TCGv t0
= tcg_temp_new();
186 tcg_gen_shli_i32(t0
, cpu_sr_q
, SR_Q
);
187 tcg_gen_or_i32(dst
, dst
, t0
);
188 tcg_gen_shli_i32(t0
, cpu_sr_m
, SR_M
);
189 tcg_gen_or_i32(dst
, dst
, t0
);
190 tcg_gen_shli_i32(t0
, cpu_sr_t
, SR_T
);
191 tcg_gen_or_i32(dst
, cpu_sr
, t0
);
192 tcg_temp_free_i32(t0
);
195 static void gen_write_sr(TCGv src
)
197 tcg_gen_andi_i32(cpu_sr
, src
,
198 ~((1u << SR_Q
) | (1u << SR_M
) | (1u << SR_T
)));
199 tcg_gen_shri_i32(cpu_sr_q
, src
, SR_Q
);
200 tcg_gen_andi_i32(cpu_sr_q
, cpu_sr_q
, 1);
201 tcg_gen_shri_i32(cpu_sr_m
, src
, SR_M
);
202 tcg_gen_andi_i32(cpu_sr_m
, cpu_sr_m
, 1);
203 tcg_gen_shri_i32(cpu_sr_t
, src
, SR_T
);
204 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
207 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
209 TranslationBlock
*tb
;
212 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
213 !ctx
->singlestep_enabled
) {
214 /* Use a direct jump if in same page and singlestep not enabled */
216 tcg_gen_movi_i32(cpu_pc
, dest
);
217 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
219 tcg_gen_movi_i32(cpu_pc
, dest
);
220 if (ctx
->singlestep_enabled
)
221 gen_helper_debug(cpu_env
);
226 static void gen_jump(DisasContext
* ctx
)
228 if (ctx
->delayed_pc
== (uint32_t) - 1) {
229 /* Target is not statically known, it comes necessarily from a
230 delayed jump as immediate jump are conditinal jumps */
231 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
232 if (ctx
->singlestep_enabled
)
233 gen_helper_debug(cpu_env
);
236 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
240 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
242 TCGLabel
*label
= gen_new_label();
243 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
244 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
: TCG_COND_NE
, cpu_sr_t
, 0, label
);
245 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
246 gen_set_label(label
);
249 /* Immediate conditional jump (bt or bf) */
250 static void gen_conditional_jump(DisasContext
* ctx
,
251 target_ulong ift
, target_ulong ifnott
)
253 TCGLabel
*l1
= gen_new_label();
254 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_sr_t
, 0, l1
);
255 gen_goto_tb(ctx
, 0, ifnott
);
257 gen_goto_tb(ctx
, 1, ift
);
260 /* Delayed conditional jump (bt or bf) */
261 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
266 l1
= gen_new_label();
268 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
269 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
270 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
272 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
276 static inline void gen_store_flags(uint32_t flags
)
278 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
279 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
282 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
284 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
287 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
289 TCGv_i32 tmp
= tcg_temp_new_i32();
290 tcg_gen_extrl_i64_i32(tmp
, t
);
291 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
292 tcg_gen_shri_i64(t
, t
, 32);
293 tcg_gen_extrl_i64_i32(tmp
, t
);
294 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
295 tcg_temp_free_i32(tmp
);
298 #define B3_0 (ctx->opcode & 0xf)
299 #define B6_4 ((ctx->opcode >> 4) & 0x7)
300 #define B7_4 ((ctx->opcode >> 4) & 0xf)
301 #define B7_0 (ctx->opcode & 0xff)
302 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
303 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
304 (ctx->opcode & 0xfff))
305 #define B11_8 ((ctx->opcode >> 8) & 0xf)
306 #define B15_12 ((ctx->opcode >> 12) & 0xf)
308 #define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
309 && (ctx->flags & (1u << SR_RB))\
310 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
312 #define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
313 || !(ctx->flags & (1u << SR_RB)))\
314 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
316 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
317 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
318 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
319 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
321 #define CHECK_NOT_DELAY_SLOT \
322 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
324 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
325 gen_helper_raise_slot_illegal_instruction(cpu_env); \
326 ctx->bstate = BS_BRANCH; \
330 #define CHECK_PRIVILEGED \
331 if (IS_USER(ctx)) { \
332 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
333 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
334 gen_helper_raise_slot_illegal_instruction(cpu_env); \
336 gen_helper_raise_illegal_instruction(cpu_env); \
338 ctx->bstate = BS_BRANCH; \
342 #define CHECK_FPU_ENABLED \
343 if (ctx->flags & (1u << SR_FD)) { \
344 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
345 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
346 gen_helper_raise_slot_fpu_disable(cpu_env); \
348 gen_helper_raise_fpu_disable(cpu_env); \
350 ctx->bstate = BS_BRANCH; \
354 static void _decode_opc(DisasContext
* ctx
)
356 /* This code tries to make movcal emulation sufficiently
357 accurate for Linux purposes. This instruction writes
358 memory, and prior to that, always allocates a cache line.
359 It is used in two contexts:
360 - in memcpy, where data is copied in blocks, the first write
361 of to a block uses movca.l for performance.
362 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
363 to flush the cache. Here, the data written by movcal.l is never
364 written to memory, and the data written is just bogus.
366 To simulate this, we simulate movcal.l, we store the value to memory,
367 but we also remember the previous content. If we see ocbi, we check
368 if movcal.l for that address was done previously. If so, the write should
369 not have hit the memory, so we restore the previous content.
370 When we see an instruction that is neither movca.l
371 nor ocbi, the previous content is discarded.
373 To optimize, we only try to flush stores when we're at the start of
374 TB, or if we already saw movca.l in this TB and did not flush stores
378 int opcode
= ctx
->opcode
& 0xf0ff;
379 if (opcode
!= 0x0093 /* ocbi */
380 && opcode
!= 0x00c3 /* movca.l */)
382 gen_helper_discard_movcal_backup(cpu_env
);
388 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
391 switch (ctx
->opcode
) {
392 case 0x0019: /* div0u */
393 tcg_gen_movi_i32(cpu_sr_m
, 0);
394 tcg_gen_movi_i32(cpu_sr_q
, 0);
395 tcg_gen_movi_i32(cpu_sr_t
, 0);
397 case 0x000b: /* rts */
399 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
400 ctx
->flags
|= DELAY_SLOT
;
401 ctx
->delayed_pc
= (uint32_t) - 1;
403 case 0x0028: /* clrmac */
404 tcg_gen_movi_i32(cpu_mach
, 0);
405 tcg_gen_movi_i32(cpu_macl
, 0);
407 case 0x0048: /* clrs */
408 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(1u << SR_S
));
410 case 0x0008: /* clrt */
411 tcg_gen_movi_i32(cpu_sr_t
, 0);
413 case 0x0038: /* ldtlb */
415 gen_helper_ldtlb(cpu_env
);
417 case 0x002b: /* rte */
420 gen_write_sr(cpu_ssr
);
421 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
422 ctx
->flags
|= DELAY_SLOT
;
423 ctx
->delayed_pc
= (uint32_t) - 1;
425 case 0x0058: /* sets */
426 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, (1u << SR_S
));
428 case 0x0018: /* sett */
429 tcg_gen_movi_i32(cpu_sr_t
, 1);
431 case 0xfbfd: /* frchg */
432 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
433 ctx
->bstate
= BS_STOP
;
435 case 0xf3fd: /* fschg */
436 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
437 ctx
->bstate
= BS_STOP
;
439 case 0x0009: /* nop */
441 case 0x001b: /* sleep */
443 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
444 gen_helper_sleep(cpu_env
);
448 switch (ctx
->opcode
& 0xf000) {
449 case 0x1000: /* mov.l Rm,@(disp,Rn) */
451 TCGv addr
= tcg_temp_new();
452 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
453 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
457 case 0x5000: /* mov.l @(disp,Rm),Rn */
459 TCGv addr
= tcg_temp_new();
460 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
461 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
465 case 0xe000: /* mov #imm,Rn */
466 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
468 case 0x9000: /* mov.w @(disp,PC),Rn */
470 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
471 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
475 case 0xd000: /* mov.l @(disp,PC),Rn */
477 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
478 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
482 case 0x7000: /* add #imm,Rn */
483 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
485 case 0xa000: /* bra disp */
487 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
488 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
489 ctx
->flags
|= DELAY_SLOT
;
491 case 0xb000: /* bsr disp */
493 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
494 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
495 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
496 ctx
->flags
|= DELAY_SLOT
;
500 switch (ctx
->opcode
& 0xf00f) {
501 case 0x6003: /* mov Rm,Rn */
502 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
504 case 0x2000: /* mov.b Rm,@Rn */
505 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_UB
);
507 case 0x2001: /* mov.w Rm,@Rn */
508 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUW
);
510 case 0x2002: /* mov.l Rm,@Rn */
511 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
513 case 0x6000: /* mov.b @Rm,Rn */
514 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
516 case 0x6001: /* mov.w @Rm,Rn */
517 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
519 case 0x6002: /* mov.l @Rm,Rn */
520 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
522 case 0x2004: /* mov.b Rm,@-Rn */
524 TCGv addr
= tcg_temp_new();
525 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
526 /* might cause re-execution */
527 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
528 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
532 case 0x2005: /* mov.w Rm,@-Rn */
534 TCGv addr
= tcg_temp_new();
535 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
536 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
537 tcg_gen_mov_i32(REG(B11_8
), addr
);
541 case 0x2006: /* mov.l Rm,@-Rn */
543 TCGv addr
= tcg_temp_new();
544 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
545 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
546 tcg_gen_mov_i32(REG(B11_8
), addr
);
549 case 0x6004: /* mov.b @Rm+,Rn */
550 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
552 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
554 case 0x6005: /* mov.w @Rm+,Rn */
555 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
557 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
559 case 0x6006: /* mov.l @Rm+,Rn */
560 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
562 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
564 case 0x0004: /* mov.b Rm,@(R0,Rn) */
566 TCGv addr
= tcg_temp_new();
567 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
568 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
572 case 0x0005: /* mov.w Rm,@(R0,Rn) */
574 TCGv addr
= tcg_temp_new();
575 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
576 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
580 case 0x0006: /* mov.l Rm,@(R0,Rn) */
582 TCGv addr
= tcg_temp_new();
583 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
584 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
588 case 0x000c: /* mov.b @(R0,Rm),Rn */
590 TCGv addr
= tcg_temp_new();
591 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
592 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_SB
);
596 case 0x000d: /* mov.w @(R0,Rm),Rn */
598 TCGv addr
= tcg_temp_new();
599 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
600 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
604 case 0x000e: /* mov.l @(R0,Rm),Rn */
606 TCGv addr
= tcg_temp_new();
607 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
608 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
612 case 0x6008: /* swap.b Rm,Rn */
614 TCGv low
= tcg_temp_new();;
615 tcg_gen_ext16u_i32(low
, REG(B7_4
));
616 tcg_gen_bswap16_i32(low
, low
);
617 tcg_gen_deposit_i32(REG(B11_8
), REG(B7_4
), low
, 0, 16);
621 case 0x6009: /* swap.w Rm,Rn */
622 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
624 case 0x200d: /* xtrct Rm,Rn */
627 high
= tcg_temp_new();
628 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
629 low
= tcg_temp_new();
630 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
631 tcg_gen_or_i32(REG(B11_8
), high
, low
);
636 case 0x300c: /* add Rm,Rn */
637 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
639 case 0x300e: /* addc Rm,Rn */
642 t0
= tcg_const_tl(0);
644 tcg_gen_add2_i32(t1
, cpu_sr_t
, cpu_sr_t
, t0
, REG(B7_4
), t0
);
645 tcg_gen_add2_i32(REG(B11_8
), cpu_sr_t
,
646 REG(B11_8
), t0
, t1
, cpu_sr_t
);
651 case 0x300f: /* addv Rm,Rn */
655 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
657 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
659 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
660 tcg_gen_andc_i32(cpu_sr_t
, t1
, t2
);
662 tcg_gen_shri_i32(cpu_sr_t
, cpu_sr_t
, 31);
664 tcg_gen_mov_i32(REG(B7_4
), t0
);
668 case 0x2009: /* and Rm,Rn */
669 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
671 case 0x3000: /* cmp/eq Rm,Rn */
672 tcg_gen_setcond_i32(TCG_COND_EQ
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
674 case 0x3003: /* cmp/ge Rm,Rn */
675 tcg_gen_setcond_i32(TCG_COND_GE
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
677 case 0x3007: /* cmp/gt Rm,Rn */
678 tcg_gen_setcond_i32(TCG_COND_GT
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
680 case 0x3006: /* cmp/hi Rm,Rn */
681 tcg_gen_setcond_i32(TCG_COND_GTU
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
683 case 0x3002: /* cmp/hs Rm,Rn */
684 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
686 case 0x200c: /* cmp/str Rm,Rn */
688 TCGv cmp1
= tcg_temp_new();
689 TCGv cmp2
= tcg_temp_new();
690 tcg_gen_xor_i32(cmp2
, REG(B7_4
), REG(B11_8
));
691 tcg_gen_subi_i32(cmp1
, cmp2
, 0x01010101);
692 tcg_gen_andc_i32(cmp1
, cmp1
, cmp2
);
693 tcg_gen_andi_i32(cmp1
, cmp1
, 0x80808080);
694 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_sr_t
, cmp1
, 0);
699 case 0x2007: /* div0s Rm,Rn */
700 tcg_gen_shri_i32(cpu_sr_q
, REG(B11_8
), 31); /* SR_Q */
701 tcg_gen_shri_i32(cpu_sr_m
, REG(B7_4
), 31); /* SR_M */
702 tcg_gen_xor_i32(cpu_sr_t
, cpu_sr_q
, cpu_sr_m
); /* SR_T */
704 case 0x3004: /* div1 Rm,Rn */
706 TCGv t0
= tcg_temp_new();
707 TCGv t1
= tcg_temp_new();
708 TCGv t2
= tcg_temp_new();
709 TCGv zero
= tcg_const_i32(0);
711 /* shift left arg1, saving the bit being pushed out and inserting
713 tcg_gen_shri_i32(t0
, REG(B11_8
), 31);
714 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
715 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), cpu_sr_t
);
717 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
718 using 64-bit temps, we compute arg0's high part from q ^ m, so
719 that it is 0x00000000 when adding the value or 0xffffffff when
721 tcg_gen_xor_i32(t1
, cpu_sr_q
, cpu_sr_m
);
722 tcg_gen_subi_i32(t1
, t1
, 1);
723 tcg_gen_neg_i32(t2
, REG(B7_4
));
724 tcg_gen_movcond_i32(TCG_COND_EQ
, t2
, t1
, zero
, REG(B7_4
), t2
);
725 tcg_gen_add2_i32(REG(B11_8
), t1
, REG(B11_8
), zero
, t2
, t1
);
727 /* compute T and Q depending on carry */
728 tcg_gen_andi_i32(t1
, t1
, 1);
729 tcg_gen_xor_i32(t1
, t1
, t0
);
730 tcg_gen_xori_i32(cpu_sr_t
, t1
, 1);
731 tcg_gen_xor_i32(cpu_sr_q
, cpu_sr_m
, t1
);
739 case 0x300d: /* dmuls.l Rm,Rn */
740 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
742 case 0x3005: /* dmulu.l Rm,Rn */
743 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
745 case 0x600e: /* exts.b Rm,Rn */
746 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
748 case 0x600f: /* exts.w Rm,Rn */
749 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
751 case 0x600c: /* extu.b Rm,Rn */
752 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
754 case 0x600d: /* extu.w Rm,Rn */
755 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
757 case 0x000f: /* mac.l @Rm+,@Rn+ */
760 arg0
= tcg_temp_new();
761 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
762 arg1
= tcg_temp_new();
763 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
764 gen_helper_macl(cpu_env
, arg0
, arg1
);
767 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
768 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
771 case 0x400f: /* mac.w @Rm+,@Rn+ */
774 arg0
= tcg_temp_new();
775 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
776 arg1
= tcg_temp_new();
777 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
778 gen_helper_macw(cpu_env
, arg0
, arg1
);
781 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
782 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
785 case 0x0007: /* mul.l Rm,Rn */
786 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
788 case 0x200f: /* muls.w Rm,Rn */
791 arg0
= tcg_temp_new();
792 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
793 arg1
= tcg_temp_new();
794 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
795 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
800 case 0x200e: /* mulu.w Rm,Rn */
803 arg0
= tcg_temp_new();
804 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
805 arg1
= tcg_temp_new();
806 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
807 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
812 case 0x600b: /* neg Rm,Rn */
813 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
815 case 0x600a: /* negc Rm,Rn */
817 TCGv t0
= tcg_const_i32(0);
818 tcg_gen_add2_i32(REG(B11_8
), cpu_sr_t
,
819 REG(B7_4
), t0
, cpu_sr_t
, t0
);
820 tcg_gen_sub2_i32(REG(B11_8
), cpu_sr_t
,
821 t0
, t0
, REG(B11_8
), cpu_sr_t
);
822 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
826 case 0x6007: /* not Rm,Rn */
827 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
829 case 0x200b: /* or Rm,Rn */
830 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
832 case 0x400c: /* shad Rm,Rn */
834 TCGv t0
= tcg_temp_new();
835 TCGv t1
= tcg_temp_new();
836 TCGv t2
= tcg_temp_new();
838 tcg_gen_andi_i32(t0
, REG(B7_4
), 0x1f);
840 /* positive case: shift to the left */
841 tcg_gen_shl_i32(t1
, REG(B11_8
), t0
);
843 /* negative case: shift to the right in two steps to
844 correctly handle the -32 case */
845 tcg_gen_xori_i32(t0
, t0
, 0x1f);
846 tcg_gen_sar_i32(t2
, REG(B11_8
), t0
);
847 tcg_gen_sari_i32(t2
, t2
, 1);
849 /* select between the two cases */
850 tcg_gen_movi_i32(t0
, 0);
851 tcg_gen_movcond_i32(TCG_COND_GE
, REG(B11_8
), REG(B7_4
), t0
, t1
, t2
);
858 case 0x400d: /* shld Rm,Rn */
860 TCGv t0
= tcg_temp_new();
861 TCGv t1
= tcg_temp_new();
862 TCGv t2
= tcg_temp_new();
864 tcg_gen_andi_i32(t0
, REG(B7_4
), 0x1f);
866 /* positive case: shift to the left */
867 tcg_gen_shl_i32(t1
, REG(B11_8
), t0
);
869 /* negative case: shift to the right in two steps to
870 correctly handle the -32 case */
871 tcg_gen_xori_i32(t0
, t0
, 0x1f);
872 tcg_gen_shr_i32(t2
, REG(B11_8
), t0
);
873 tcg_gen_shri_i32(t2
, t2
, 1);
875 /* select between the two cases */
876 tcg_gen_movi_i32(t0
, 0);
877 tcg_gen_movcond_i32(TCG_COND_GE
, REG(B11_8
), REG(B7_4
), t0
, t1
, t2
);
884 case 0x3008: /* sub Rm,Rn */
885 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
887 case 0x300a: /* subc Rm,Rn */
890 t0
= tcg_const_tl(0);
892 tcg_gen_add2_i32(t1
, cpu_sr_t
, cpu_sr_t
, t0
, REG(B7_4
), t0
);
893 tcg_gen_sub2_i32(REG(B11_8
), cpu_sr_t
,
894 REG(B11_8
), t0
, t1
, cpu_sr_t
);
895 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
900 case 0x300b: /* subv Rm,Rn */
904 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
906 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
908 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
909 tcg_gen_and_i32(t1
, t1
, t2
);
911 tcg_gen_shri_i32(cpu_sr_t
, t1
, 31);
913 tcg_gen_mov_i32(REG(B11_8
), t0
);
917 case 0x2008: /* tst Rm,Rn */
919 TCGv val
= tcg_temp_new();
920 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
921 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
925 case 0x200a: /* xor Rm,Rn */
926 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
928 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
930 if (ctx
->flags
& FPSCR_SZ
) {
931 TCGv_i64 fp
= tcg_temp_new_i64();
932 gen_load_fpr64(fp
, XREG(B7_4
));
933 gen_store_fpr64(fp
, XREG(B11_8
));
934 tcg_temp_free_i64(fp
);
936 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
939 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
941 if (ctx
->flags
& FPSCR_SZ
) {
942 TCGv addr_hi
= tcg_temp_new();
944 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
945 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], REG(B11_8
),
946 ctx
->memidx
, MO_TEUL
);
947 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr_hi
,
948 ctx
->memidx
, MO_TEUL
);
949 tcg_temp_free(addr_hi
);
951 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
),
952 ctx
->memidx
, MO_TEUL
);
955 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
957 if (ctx
->flags
& FPSCR_SZ
) {
958 TCGv addr_hi
= tcg_temp_new();
959 int fr
= XREG(B11_8
);
960 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
961 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
962 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
963 tcg_temp_free(addr_hi
);
965 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
966 ctx
->memidx
, MO_TEUL
);
969 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
971 if (ctx
->flags
& FPSCR_SZ
) {
972 TCGv addr_hi
= tcg_temp_new();
973 int fr
= XREG(B11_8
);
974 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
975 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
976 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
977 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
978 tcg_temp_free(addr_hi
);
980 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
981 ctx
->memidx
, MO_TEUL
);
982 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
985 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
987 TCGv addr
= tcg_temp_new_i32();
988 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
989 if (ctx
->flags
& FPSCR_SZ
) {
991 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
, MO_TEUL
);
992 tcg_gen_subi_i32(addr
, addr
, 4);
993 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], addr
, ctx
->memidx
, MO_TEUL
);
995 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
996 ctx
->memidx
, MO_TEUL
);
998 tcg_gen_mov_i32(REG(B11_8
), addr
);
1001 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1004 TCGv addr
= tcg_temp_new_i32();
1005 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1006 if (ctx
->flags
& FPSCR_SZ
) {
1007 int fr
= XREG(B11_8
);
1008 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1009 ctx
->memidx
, MO_TEUL
);
1010 tcg_gen_addi_i32(addr
, addr
, 4);
1011 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1012 ctx
->memidx
, MO_TEUL
);
1014 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], addr
,
1015 ctx
->memidx
, MO_TEUL
);
1017 tcg_temp_free(addr
);
1020 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1023 TCGv addr
= tcg_temp_new();
1024 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1025 if (ctx
->flags
& FPSCR_SZ
) {
1026 int fr
= XREG(B7_4
);
1027 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1028 ctx
->memidx
, MO_TEUL
);
1029 tcg_gen_addi_i32(addr
, addr
, 4);
1030 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1031 ctx
->memidx
, MO_TEUL
);
1033 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
1034 ctx
->memidx
, MO_TEUL
);
1036 tcg_temp_free(addr
);
1039 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1040 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1041 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1042 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1043 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1044 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1047 if (ctx
->flags
& FPSCR_PR
) {
1050 if (ctx
->opcode
& 0x0110)
1051 break; /* illegal instruction */
1052 fp0
= tcg_temp_new_i64();
1053 fp1
= tcg_temp_new_i64();
1054 gen_load_fpr64(fp0
, DREG(B11_8
));
1055 gen_load_fpr64(fp1
, DREG(B7_4
));
1056 switch (ctx
->opcode
& 0xf00f) {
1057 case 0xf000: /* fadd Rm,Rn */
1058 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1060 case 0xf001: /* fsub Rm,Rn */
1061 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1063 case 0xf002: /* fmul Rm,Rn */
1064 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1066 case 0xf003: /* fdiv Rm,Rn */
1067 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1069 case 0xf004: /* fcmp/eq Rm,Rn */
1070 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1072 case 0xf005: /* fcmp/gt Rm,Rn */
1073 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1076 gen_store_fpr64(fp0
, DREG(B11_8
));
1077 tcg_temp_free_i64(fp0
);
1078 tcg_temp_free_i64(fp1
);
1080 switch (ctx
->opcode
& 0xf00f) {
1081 case 0xf000: /* fadd Rm,Rn */
1082 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1083 cpu_fregs
[FREG(B11_8
)],
1084 cpu_fregs
[FREG(B7_4
)]);
1086 case 0xf001: /* fsub Rm,Rn */
1087 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1088 cpu_fregs
[FREG(B11_8
)],
1089 cpu_fregs
[FREG(B7_4
)]);
1091 case 0xf002: /* fmul Rm,Rn */
1092 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1093 cpu_fregs
[FREG(B11_8
)],
1094 cpu_fregs
[FREG(B7_4
)]);
1096 case 0xf003: /* fdiv Rm,Rn */
1097 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1098 cpu_fregs
[FREG(B11_8
)],
1099 cpu_fregs
[FREG(B7_4
)]);
1101 case 0xf004: /* fcmp/eq Rm,Rn */
1102 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1103 cpu_fregs
[FREG(B7_4
)]);
1105 case 0xf005: /* fcmp/gt Rm,Rn */
1106 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1107 cpu_fregs
[FREG(B7_4
)]);
1113 case 0xf00e: /* fmac FR0,RM,Rn */
1116 if (ctx
->flags
& FPSCR_PR
) {
1117 break; /* illegal instruction */
1119 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1120 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1121 cpu_fregs
[FREG(B11_8
)]);
1127 switch (ctx
->opcode
& 0xff00) {
1128 case 0xc900: /* and #imm,R0 */
1129 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1131 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1134 addr
= tcg_temp_new();
1135 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1136 val
= tcg_temp_new();
1137 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1138 tcg_gen_andi_i32(val
, val
, B7_0
);
1139 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1141 tcg_temp_free(addr
);
1144 case 0x8b00: /* bf label */
1145 CHECK_NOT_DELAY_SLOT
1146 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1147 ctx
->pc
+ 4 + B7_0s
* 2);
1148 ctx
->bstate
= BS_BRANCH
;
1150 case 0x8f00: /* bf/s label */
1151 CHECK_NOT_DELAY_SLOT
1152 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1153 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1155 case 0x8900: /* bt label */
1156 CHECK_NOT_DELAY_SLOT
1157 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1159 ctx
->bstate
= BS_BRANCH
;
1161 case 0x8d00: /* bt/s label */
1162 CHECK_NOT_DELAY_SLOT
1163 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1164 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1166 case 0x8800: /* cmp/eq #imm,R0 */
1167 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, REG(0), B7_0s
);
1169 case 0xc400: /* mov.b @(disp,GBR),R0 */
1171 TCGv addr
= tcg_temp_new();
1172 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1173 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1174 tcg_temp_free(addr
);
1177 case 0xc500: /* mov.w @(disp,GBR),R0 */
1179 TCGv addr
= tcg_temp_new();
1180 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1181 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1182 tcg_temp_free(addr
);
1185 case 0xc600: /* mov.l @(disp,GBR),R0 */
1187 TCGv addr
= tcg_temp_new();
1188 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1189 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESL
);
1190 tcg_temp_free(addr
);
1193 case 0xc000: /* mov.b R0,@(disp,GBR) */
1195 TCGv addr
= tcg_temp_new();
1196 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1197 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1198 tcg_temp_free(addr
);
1201 case 0xc100: /* mov.w R0,@(disp,GBR) */
1203 TCGv addr
= tcg_temp_new();
1204 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1205 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1206 tcg_temp_free(addr
);
1209 case 0xc200: /* mov.l R0,@(disp,GBR) */
1211 TCGv addr
= tcg_temp_new();
1212 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1213 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUL
);
1214 tcg_temp_free(addr
);
1217 case 0x8000: /* mov.b R0,@(disp,Rn) */
1219 TCGv addr
= tcg_temp_new();
1220 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1221 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1222 tcg_temp_free(addr
);
1225 case 0x8100: /* mov.w R0,@(disp,Rn) */
1227 TCGv addr
= tcg_temp_new();
1228 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1229 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1230 tcg_temp_free(addr
);
1233 case 0x8400: /* mov.b @(disp,Rn),R0 */
1235 TCGv addr
= tcg_temp_new();
1236 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1237 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1238 tcg_temp_free(addr
);
1241 case 0x8500: /* mov.w @(disp,Rn),R0 */
1243 TCGv addr
= tcg_temp_new();
1244 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1245 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1246 tcg_temp_free(addr
);
1249 case 0xc700: /* mova @(disp,PC),R0 */
1250 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1252 case 0xcb00: /* or #imm,R0 */
1253 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1255 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1258 addr
= tcg_temp_new();
1259 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1260 val
= tcg_temp_new();
1261 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1262 tcg_gen_ori_i32(val
, val
, B7_0
);
1263 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1265 tcg_temp_free(addr
);
1268 case 0xc300: /* trapa #imm */
1271 CHECK_NOT_DELAY_SLOT
1272 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1273 imm
= tcg_const_i32(B7_0
);
1274 gen_helper_trapa(cpu_env
, imm
);
1276 ctx
->bstate
= BS_BRANCH
;
1279 case 0xc800: /* tst #imm,R0 */
1281 TCGv val
= tcg_temp_new();
1282 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1283 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1287 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1289 TCGv val
= tcg_temp_new();
1290 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1291 tcg_gen_qemu_ld_i32(val
, val
, ctx
->memidx
, MO_UB
);
1292 tcg_gen_andi_i32(val
, val
, B7_0
);
1293 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1297 case 0xca00: /* xor #imm,R0 */
1298 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1300 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1303 addr
= tcg_temp_new();
1304 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1305 val
= tcg_temp_new();
1306 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1307 tcg_gen_xori_i32(val
, val
, B7_0
);
1308 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1310 tcg_temp_free(addr
);
1315 switch (ctx
->opcode
& 0xf08f) {
1316 case 0x408e: /* ldc Rm,Rn_BANK */
1318 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1320 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1322 tcg_gen_qemu_ld_i32(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1323 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1325 case 0x0082: /* stc Rm_BANK,Rn */
1327 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1329 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1332 TCGv addr
= tcg_temp_new();
1333 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1334 tcg_gen_qemu_st_i32(ALTREG(B6_4
), addr
, ctx
->memidx
, MO_TEUL
);
1335 tcg_gen_mov_i32(REG(B11_8
), addr
);
1336 tcg_temp_free(addr
);
1341 switch (ctx
->opcode
& 0xf0ff) {
1342 case 0x0023: /* braf Rn */
1343 CHECK_NOT_DELAY_SLOT
1344 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1345 ctx
->flags
|= DELAY_SLOT
;
1346 ctx
->delayed_pc
= (uint32_t) - 1;
1348 case 0x0003: /* bsrf Rn */
1349 CHECK_NOT_DELAY_SLOT
1350 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1351 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1352 ctx
->flags
|= DELAY_SLOT
;
1353 ctx
->delayed_pc
= (uint32_t) - 1;
1355 case 0x4015: /* cmp/pl Rn */
1356 tcg_gen_setcondi_i32(TCG_COND_GT
, cpu_sr_t
, REG(B11_8
), 0);
1358 case 0x4011: /* cmp/pz Rn */
1359 tcg_gen_setcondi_i32(TCG_COND_GE
, cpu_sr_t
, REG(B11_8
), 0);
1361 case 0x4010: /* dt Rn */
1362 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1363 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, REG(B11_8
), 0);
1365 case 0x402b: /* jmp @Rn */
1366 CHECK_NOT_DELAY_SLOT
1367 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1368 ctx
->flags
|= DELAY_SLOT
;
1369 ctx
->delayed_pc
= (uint32_t) - 1;
1371 case 0x400b: /* jsr @Rn */
1372 CHECK_NOT_DELAY_SLOT
1373 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1374 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1375 ctx
->flags
|= DELAY_SLOT
;
1376 ctx
->delayed_pc
= (uint32_t) - 1;
1378 case 0x400e: /* ldc Rm,SR */
1381 TCGv val
= tcg_temp_new();
1382 tcg_gen_andi_i32(val
, REG(B11_8
), 0x700083f3);
1385 ctx
->bstate
= BS_STOP
;
1388 case 0x4007: /* ldc.l @Rm+,SR */
1391 TCGv val
= tcg_temp_new();
1392 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1393 tcg_gen_andi_i32(val
, val
, 0x700083f3);
1396 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1397 ctx
->bstate
= BS_STOP
;
1400 case 0x0002: /* stc SR,Rn */
1402 gen_read_sr(REG(B11_8
));
1404 case 0x4003: /* stc SR,@-Rn */
1407 TCGv addr
= tcg_temp_new();
1408 TCGv val
= tcg_temp_new();
1409 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1411 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1412 tcg_gen_mov_i32(REG(B11_8
), addr
);
1414 tcg_temp_free(addr
);
1417 #define LD(reg,ldnum,ldpnum,prechk) \
1420 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1424 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1425 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1427 #define ST(reg,stnum,stpnum,prechk) \
1430 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1435 TCGv addr = tcg_temp_new(); \
1436 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1437 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1438 tcg_gen_mov_i32(REG(B11_8), addr); \
1439 tcg_temp_free(addr); \
1442 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1443 LD(reg,ldnum,ldpnum,prechk) \
1444 ST(reg,stnum,stpnum,prechk)
1445 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1446 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1447 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1448 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1449 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1450 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1451 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1452 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1453 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1454 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1455 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1456 case 0x406a: /* lds Rm,FPSCR */
1458 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1459 ctx
->bstate
= BS_STOP
;
1461 case 0x4066: /* lds.l @Rm+,FPSCR */
1464 TCGv addr
= tcg_temp_new();
1465 tcg_gen_qemu_ld_i32(addr
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1466 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1467 gen_helper_ld_fpscr(cpu_env
, addr
);
1468 tcg_temp_free(addr
);
1469 ctx
->bstate
= BS_STOP
;
1472 case 0x006a: /* sts FPSCR,Rn */
1474 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1476 case 0x4062: /* sts FPSCR,@-Rn */
1480 val
= tcg_temp_new();
1481 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1482 addr
= tcg_temp_new();
1483 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1484 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1485 tcg_gen_mov_i32(REG(B11_8
), addr
);
1486 tcg_temp_free(addr
);
1490 case 0x00c3: /* movca.l R0,@Rm */
1492 TCGv val
= tcg_temp_new();
1493 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1494 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1495 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1497 ctx
->has_movcal
= 1;
1500 /* MOVUA.L @Rm,R0 (Rm) -> R0
1501 Load non-boundary-aligned data */
1502 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1505 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1506 Load non-boundary-aligned data */
1507 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1508 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1510 case 0x0029: /* movt Rn */
1511 tcg_gen_mov_i32(REG(B11_8
), cpu_sr_t
);
1516 If (T == 1) R0 -> (Rn)
1519 if (ctx
->features
& SH_FEATURE_SH4A
) {
1520 TCGLabel
*label
= gen_new_label();
1521 tcg_gen_mov_i32(cpu_sr_t
, cpu_ldst
);
1522 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1523 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1524 gen_set_label(label
);
1525 tcg_gen_movi_i32(cpu_ldst
, 0);
1533 When interrupt/exception
1536 if (ctx
->features
& SH_FEATURE_SH4A
) {
1537 tcg_gen_movi_i32(cpu_ldst
, 0);
1538 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1539 tcg_gen_movi_i32(cpu_ldst
, 1);
1543 case 0x0093: /* ocbi @Rn */
1545 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1548 case 0x00a3: /* ocbp @Rn */
1549 case 0x00b3: /* ocbwb @Rn */
1550 /* These instructions are supposed to do nothing in case of
1551 a cache miss. Given that we only partially emulate caches
1552 it is safe to simply ignore them. */
1554 case 0x0083: /* pref @Rn */
1556 case 0x00d3: /* prefi @Rn */
1557 if (ctx
->features
& SH_FEATURE_SH4A
)
1561 case 0x00e3: /* icbi @Rn */
1562 if (ctx
->features
& SH_FEATURE_SH4A
)
1566 case 0x00ab: /* synco */
1567 if (ctx
->features
& SH_FEATURE_SH4A
)
1571 case 0x4024: /* rotcl Rn */
1573 TCGv tmp
= tcg_temp_new();
1574 tcg_gen_mov_i32(tmp
, cpu_sr_t
);
1575 tcg_gen_shri_i32(cpu_sr_t
, REG(B11_8
), 31);
1576 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1577 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), tmp
);
1581 case 0x4025: /* rotcr Rn */
1583 TCGv tmp
= tcg_temp_new();
1584 tcg_gen_shli_i32(tmp
, cpu_sr_t
, 31);
1585 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1586 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1587 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), tmp
);
1591 case 0x4004: /* rotl Rn */
1592 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1593 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 0);
1595 case 0x4005: /* rotr Rn */
1596 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 0);
1597 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1599 case 0x4000: /* shll Rn */
1600 case 0x4020: /* shal Rn */
1601 tcg_gen_shri_i32(cpu_sr_t
, REG(B11_8
), 31);
1602 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1604 case 0x4021: /* shar Rn */
1605 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1606 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1608 case 0x4001: /* shlr Rn */
1609 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1610 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1612 case 0x4008: /* shll2 Rn */
1613 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1615 case 0x4018: /* shll8 Rn */
1616 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1618 case 0x4028: /* shll16 Rn */
1619 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1621 case 0x4009: /* shlr2 Rn */
1622 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1624 case 0x4019: /* shlr8 Rn */
1625 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1627 case 0x4029: /* shlr16 Rn */
1628 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1630 case 0x401b: /* tas.b @Rn */
1633 addr
= tcg_temp_local_new();
1634 tcg_gen_mov_i32(addr
, REG(B11_8
));
1635 val
= tcg_temp_local_new();
1636 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1637 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1638 tcg_gen_ori_i32(val
, val
, 0x80);
1639 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1641 tcg_temp_free(addr
);
1644 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1646 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1648 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1650 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1652 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1654 if (ctx
->flags
& FPSCR_PR
) {
1656 if (ctx
->opcode
& 0x0100)
1657 break; /* illegal instruction */
1658 fp
= tcg_temp_new_i64();
1659 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1660 gen_store_fpr64(fp
, DREG(B11_8
));
1661 tcg_temp_free_i64(fp
);
1664 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1667 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1669 if (ctx
->flags
& FPSCR_PR
) {
1671 if (ctx
->opcode
& 0x0100)
1672 break; /* illegal instruction */
1673 fp
= tcg_temp_new_i64();
1674 gen_load_fpr64(fp
, DREG(B11_8
));
1675 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1676 tcg_temp_free_i64(fp
);
1679 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1682 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1685 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1688 case 0xf05d: /* fabs FRn/DRn */
1690 if (ctx
->flags
& FPSCR_PR
) {
1691 if (ctx
->opcode
& 0x0100)
1692 break; /* illegal instruction */
1693 TCGv_i64 fp
= tcg_temp_new_i64();
1694 gen_load_fpr64(fp
, DREG(B11_8
));
1695 gen_helper_fabs_DT(fp
, fp
);
1696 gen_store_fpr64(fp
, DREG(B11_8
));
1697 tcg_temp_free_i64(fp
);
1699 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1702 case 0xf06d: /* fsqrt FRn */
1704 if (ctx
->flags
& FPSCR_PR
) {
1705 if (ctx
->opcode
& 0x0100)
1706 break; /* illegal instruction */
1707 TCGv_i64 fp
= tcg_temp_new_i64();
1708 gen_load_fpr64(fp
, DREG(B11_8
));
1709 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1710 gen_store_fpr64(fp
, DREG(B11_8
));
1711 tcg_temp_free_i64(fp
);
1713 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1714 cpu_fregs
[FREG(B11_8
)]);
1717 case 0xf07d: /* fsrra FRn */
1720 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1722 if (!(ctx
->flags
& FPSCR_PR
)) {
1723 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1726 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1728 if (!(ctx
->flags
& FPSCR_PR
)) {
1729 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1732 case 0xf0ad: /* fcnvsd FPUL,DRn */
1735 TCGv_i64 fp
= tcg_temp_new_i64();
1736 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1737 gen_store_fpr64(fp
, DREG(B11_8
));
1738 tcg_temp_free_i64(fp
);
1741 case 0xf0bd: /* fcnvds DRn,FPUL */
1744 TCGv_i64 fp
= tcg_temp_new_i64();
1745 gen_load_fpr64(fp
, DREG(B11_8
));
1746 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1747 tcg_temp_free_i64(fp
);
1750 case 0xf0ed: /* fipr FVm,FVn */
1752 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1754 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1755 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1756 gen_helper_fipr(cpu_env
, m
, n
);
1762 case 0xf0fd: /* ftrv XMTRX,FVn */
1764 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1765 (ctx
->flags
& FPSCR_PR
) == 0) {
1767 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1768 gen_helper_ftrv(cpu_env
, n
);
1775 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1776 ctx
->opcode
, ctx
->pc
);
1779 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1780 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1781 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1783 gen_helper_raise_illegal_instruction(cpu_env
);
1785 ctx
->bstate
= BS_BRANCH
;
1788 static void decode_opc(DisasContext
* ctx
)
1790 uint32_t old_flags
= ctx
->flags
;
1794 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1795 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1798 /* go out of the delay slot */
1799 uint32_t new_flags
= ctx
->flags
;
1800 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1801 gen_store_flags(new_flags
);
1804 ctx
->bstate
= BS_BRANCH
;
1805 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1806 gen_delayed_conditional_jump(ctx
);
1807 } else if (old_flags
& DELAY_SLOT
) {
1813 /* go into a delay slot */
1814 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1815 gen_store_flags(ctx
->flags
);
1818 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1820 SuperHCPU
*cpu
= sh_env_get_cpu(env
);
1821 CPUState
*cs
= CPU(cpu
);
1823 target_ulong pc_start
;
1829 ctx
.flags
= (uint32_t)tb
->flags
;
1830 ctx
.bstate
= BS_NONE
;
1831 ctx
.memidx
= (ctx
.flags
& (1u << SR_MD
)) == 0 ? 1 : 0;
1832 /* We don't know if the delayed pc came from a dynamic or static branch,
1833 so assume it is a dynamic branch. */
1834 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1836 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1837 ctx
.features
= env
->features
;
1838 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1841 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1842 if (max_insns
== 0) {
1843 max_insns
= CF_COUNT_MASK
;
1845 if (max_insns
> TCG_MAX_INSNS
) {
1846 max_insns
= TCG_MAX_INSNS
;
1850 while (ctx
.bstate
== BS_NONE
&& !tcg_op_buf_full()) {
1851 tcg_gen_insn_start(ctx
.pc
, ctx
.flags
);
1854 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
1855 /* We have hit a breakpoint - make sure PC is up-to-date */
1856 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1857 gen_helper_debug(cpu_env
);
1858 ctx
.bstate
= BS_BRANCH
;
1859 /* The address covered by the breakpoint must be included in
1860 [tb->pc, tb->pc + tb->size) in order to for it to be
1861 properly cleared -- thus we increment the PC here so that
1862 the logic setting tb->size below does the right thing. */
1867 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1871 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
1874 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1876 if (cs
->singlestep_enabled
) {
1879 if (num_insns
>= max_insns
)
1884 if (tb
->cflags
& CF_LAST_IO
)
1886 if (cs
->singlestep_enabled
) {
1887 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1888 gen_helper_debug(cpu_env
);
1890 switch (ctx
.bstate
) {
1892 /* gen_op_interrupt_restart(); */
1896 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1898 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1901 /* gen_op_interrupt_restart(); */
1910 gen_tb_end(tb
, num_insns
);
1912 tb
->size
= ctx
.pc
- pc_start
;
1913 tb
->icount
= num_insns
;
1916 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1917 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1918 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 0);
1924 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
,
1928 env
->flags
= data
[1];