4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
24 #include "disas/disas.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
35 typedef struct DisasContext
{
36 struct TranslationBlock
*tb
;
43 int singlestep_enabled
;
48 #if defined(CONFIG_USER_ONLY)
49 #define IS_USER(ctx) 1
51 #define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
55 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
58 BS_STOP
= 1, /* We want to stop translation for any reason */
59 BS_BRANCH
= 2, /* We reached a branch condition */
60 BS_EXCP
= 3, /* We reached an exception condition */
63 /* global register indexes */
64 static TCGv_env cpu_env
;
65 static TCGv cpu_gregs
[24];
66 static TCGv cpu_sr
, cpu_sr_m
, cpu_sr_q
, cpu_sr_t
;
67 static TCGv cpu_pc
, cpu_ssr
, cpu_spc
, cpu_gbr
;
68 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
69 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
70 static TCGv cpu_fregs
[32];
72 /* internal register indexes */
73 static TCGv cpu_flags
, cpu_delayed_pc
;
75 #include "exec/gen-icount.h"
77 void sh4_translate_init(void)
80 static int done_init
= 0;
81 static const char * const gregnames
[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 static const char * const fregnames
[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
104 for (i
= 0; i
< 24; i
++)
105 cpu_gregs
[i
] = tcg_global_mem_new_i32(cpu_env
,
106 offsetof(CPUSH4State
, gregs
[i
]),
109 cpu_pc
= tcg_global_mem_new_i32(cpu_env
,
110 offsetof(CPUSH4State
, pc
), "PC");
111 cpu_sr
= tcg_global_mem_new_i32(cpu_env
,
112 offsetof(CPUSH4State
, sr
), "SR");
113 cpu_sr_m
= tcg_global_mem_new_i32(cpu_env
,
114 offsetof(CPUSH4State
, sr_m
), "SR_M");
115 cpu_sr_q
= tcg_global_mem_new_i32(cpu_env
,
116 offsetof(CPUSH4State
, sr_q
), "SR_Q");
117 cpu_sr_t
= tcg_global_mem_new_i32(cpu_env
,
118 offsetof(CPUSH4State
, sr_t
), "SR_T");
119 cpu_ssr
= tcg_global_mem_new_i32(cpu_env
,
120 offsetof(CPUSH4State
, ssr
), "SSR");
121 cpu_spc
= tcg_global_mem_new_i32(cpu_env
,
122 offsetof(CPUSH4State
, spc
), "SPC");
123 cpu_gbr
= tcg_global_mem_new_i32(cpu_env
,
124 offsetof(CPUSH4State
, gbr
), "GBR");
125 cpu_vbr
= tcg_global_mem_new_i32(cpu_env
,
126 offsetof(CPUSH4State
, vbr
), "VBR");
127 cpu_sgr
= tcg_global_mem_new_i32(cpu_env
,
128 offsetof(CPUSH4State
, sgr
), "SGR");
129 cpu_dbr
= tcg_global_mem_new_i32(cpu_env
,
130 offsetof(CPUSH4State
, dbr
), "DBR");
131 cpu_mach
= tcg_global_mem_new_i32(cpu_env
,
132 offsetof(CPUSH4State
, mach
), "MACH");
133 cpu_macl
= tcg_global_mem_new_i32(cpu_env
,
134 offsetof(CPUSH4State
, macl
), "MACL");
135 cpu_pr
= tcg_global_mem_new_i32(cpu_env
,
136 offsetof(CPUSH4State
, pr
), "PR");
137 cpu_fpscr
= tcg_global_mem_new_i32(cpu_env
,
138 offsetof(CPUSH4State
, fpscr
), "FPSCR");
139 cpu_fpul
= tcg_global_mem_new_i32(cpu_env
,
140 offsetof(CPUSH4State
, fpul
), "FPUL");
142 cpu_flags
= tcg_global_mem_new_i32(cpu_env
,
143 offsetof(CPUSH4State
, flags
), "_flags_");
144 cpu_delayed_pc
= tcg_global_mem_new_i32(cpu_env
,
145 offsetof(CPUSH4State
, delayed_pc
),
147 cpu_ldst
= tcg_global_mem_new_i32(cpu_env
,
148 offsetof(CPUSH4State
, ldst
), "_ldst_");
150 for (i
= 0; i
< 32; i
++)
151 cpu_fregs
[i
] = tcg_global_mem_new_i32(cpu_env
,
152 offsetof(CPUSH4State
, fregs
[i
]),
158 void superh_cpu_dump_state(CPUState
*cs
, FILE *f
,
159 fprintf_function cpu_fprintf
, int flags
)
161 SuperHCPU
*cpu
= SUPERH_CPU(cs
);
162 CPUSH4State
*env
= &cpu
->env
;
164 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
165 env
->pc
, cpu_read_sr(env
), env
->pr
, env
->fpscr
);
166 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
167 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
168 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
169 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
170 for (i
= 0; i
< 24; i
+= 4) {
171 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
172 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
173 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
175 if (env
->flags
& DELAY_SLOT
) {
176 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
178 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
179 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
184 static void gen_read_sr(TCGv dst
)
186 TCGv t0
= tcg_temp_new();
187 tcg_gen_shli_i32(t0
, cpu_sr_q
, SR_Q
);
188 tcg_gen_or_i32(dst
, dst
, t0
);
189 tcg_gen_shli_i32(t0
, cpu_sr_m
, SR_M
);
190 tcg_gen_or_i32(dst
, dst
, t0
);
191 tcg_gen_shli_i32(t0
, cpu_sr_t
, SR_T
);
192 tcg_gen_or_i32(dst
, cpu_sr
, t0
);
193 tcg_temp_free_i32(t0
);
196 static void gen_write_sr(TCGv src
)
198 tcg_gen_andi_i32(cpu_sr
, src
,
199 ~((1u << SR_Q
) | (1u << SR_M
) | (1u << SR_T
)));
200 tcg_gen_shri_i32(cpu_sr_q
, src
, SR_Q
);
201 tcg_gen_andi_i32(cpu_sr_q
, cpu_sr_q
, 1);
202 tcg_gen_shri_i32(cpu_sr_m
, src
, SR_M
);
203 tcg_gen_andi_i32(cpu_sr_m
, cpu_sr_m
, 1);
204 tcg_gen_shri_i32(cpu_sr_t
, src
, SR_T
);
205 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
208 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
210 TranslationBlock
*tb
;
213 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
214 !ctx
->singlestep_enabled
) {
215 /* Use a direct jump if in same page and singlestep not enabled */
217 tcg_gen_movi_i32(cpu_pc
, dest
);
218 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
220 tcg_gen_movi_i32(cpu_pc
, dest
);
221 if (ctx
->singlestep_enabled
)
222 gen_helper_debug(cpu_env
);
227 static void gen_jump(DisasContext
* ctx
)
229 if (ctx
->delayed_pc
== (uint32_t) - 1) {
230 /* Target is not statically known, it comes necessarily from a
231 delayed jump as immediate jump are conditinal jumps */
232 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
233 if (ctx
->singlestep_enabled
)
234 gen_helper_debug(cpu_env
);
237 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
241 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
243 TCGLabel
*label
= gen_new_label();
244 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
245 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
: TCG_COND_NE
, cpu_sr_t
, 0, label
);
246 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
247 gen_set_label(label
);
250 /* Immediate conditional jump (bt or bf) */
251 static void gen_conditional_jump(DisasContext
* ctx
,
252 target_ulong ift
, target_ulong ifnott
)
254 TCGLabel
*l1
= gen_new_label();
255 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_sr_t
, 0, l1
);
256 gen_goto_tb(ctx
, 0, ifnott
);
258 gen_goto_tb(ctx
, 1, ift
);
261 /* Delayed conditional jump (bt or bf) */
262 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
267 l1
= gen_new_label();
269 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
270 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
271 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
273 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
277 static inline void gen_store_flags(uint32_t flags
)
279 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
280 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
283 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
285 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
288 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
290 TCGv_i32 tmp
= tcg_temp_new_i32();
291 tcg_gen_extrl_i64_i32(tmp
, t
);
292 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
293 tcg_gen_shri_i64(t
, t
, 32);
294 tcg_gen_extrl_i64_i32(tmp
, t
);
295 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
296 tcg_temp_free_i32(tmp
);
299 #define B3_0 (ctx->opcode & 0xf)
300 #define B6_4 ((ctx->opcode >> 4) & 0x7)
301 #define B7_4 ((ctx->opcode >> 4) & 0xf)
302 #define B7_0 (ctx->opcode & 0xff)
303 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
304 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
305 (ctx->opcode & 0xfff))
306 #define B11_8 ((ctx->opcode >> 8) & 0xf)
307 #define B15_12 ((ctx->opcode >> 12) & 0xf)
309 #define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
310 && (ctx->flags & (1u << SR_RB))\
311 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
313 #define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
314 || !(ctx->flags & (1u << SR_RB)))\
315 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
317 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
318 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
319 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
320 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
322 #define CHECK_NOT_DELAY_SLOT \
323 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
325 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
326 gen_helper_raise_slot_illegal_instruction(cpu_env); \
327 ctx->bstate = BS_BRANCH; \
331 #define CHECK_PRIVILEGED \
332 if (IS_USER(ctx)) { \
333 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
334 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
335 gen_helper_raise_slot_illegal_instruction(cpu_env); \
337 gen_helper_raise_illegal_instruction(cpu_env); \
339 ctx->bstate = BS_BRANCH; \
343 #define CHECK_FPU_ENABLED \
344 if (ctx->flags & (1u << SR_FD)) { \
345 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
346 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
347 gen_helper_raise_slot_fpu_disable(cpu_env); \
349 gen_helper_raise_fpu_disable(cpu_env); \
351 ctx->bstate = BS_BRANCH; \
355 static void _decode_opc(DisasContext
* ctx
)
357 /* This code tries to make movcal emulation sufficiently
358 accurate for Linux purposes. This instruction writes
359 memory, and prior to that, always allocates a cache line.
360 It is used in two contexts:
361 - in memcpy, where data is copied in blocks, the first write
362 of to a block uses movca.l for performance.
363 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
364 to flush the cache. Here, the data written by movcal.l is never
365 written to memory, and the data written is just bogus.
367 To simulate this, we simulate movcal.l, we store the value to memory,
368 but we also remember the previous content. If we see ocbi, we check
369 if movcal.l for that address was done previously. If so, the write should
370 not have hit the memory, so we restore the previous content.
371 When we see an instruction that is neither movca.l
372 nor ocbi, the previous content is discarded.
374 To optimize, we only try to flush stores when we're at the start of
375 TB, or if we already saw movca.l in this TB and did not flush stores
379 int opcode
= ctx
->opcode
& 0xf0ff;
380 if (opcode
!= 0x0093 /* ocbi */
381 && opcode
!= 0x00c3 /* movca.l */)
383 gen_helper_discard_movcal_backup(cpu_env
);
389 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
392 switch (ctx
->opcode
) {
393 case 0x0019: /* div0u */
394 tcg_gen_movi_i32(cpu_sr_m
, 0);
395 tcg_gen_movi_i32(cpu_sr_q
, 0);
396 tcg_gen_movi_i32(cpu_sr_t
, 0);
398 case 0x000b: /* rts */
400 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
401 ctx
->flags
|= DELAY_SLOT
;
402 ctx
->delayed_pc
= (uint32_t) - 1;
404 case 0x0028: /* clrmac */
405 tcg_gen_movi_i32(cpu_mach
, 0);
406 tcg_gen_movi_i32(cpu_macl
, 0);
408 case 0x0048: /* clrs */
409 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(1u << SR_S
));
411 case 0x0008: /* clrt */
412 tcg_gen_movi_i32(cpu_sr_t
, 0);
414 case 0x0038: /* ldtlb */
416 gen_helper_ldtlb(cpu_env
);
418 case 0x002b: /* rte */
421 gen_write_sr(cpu_ssr
);
422 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
423 ctx
->flags
|= DELAY_SLOT
;
424 ctx
->delayed_pc
= (uint32_t) - 1;
426 case 0x0058: /* sets */
427 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, (1u << SR_S
));
429 case 0x0018: /* sett */
430 tcg_gen_movi_i32(cpu_sr_t
, 1);
432 case 0xfbfd: /* frchg */
433 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
434 ctx
->bstate
= BS_STOP
;
436 case 0xf3fd: /* fschg */
437 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
438 ctx
->bstate
= BS_STOP
;
440 case 0x0009: /* nop */
442 case 0x001b: /* sleep */
444 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
445 gen_helper_sleep(cpu_env
);
449 switch (ctx
->opcode
& 0xf000) {
450 case 0x1000: /* mov.l Rm,@(disp,Rn) */
452 TCGv addr
= tcg_temp_new();
453 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
454 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
458 case 0x5000: /* mov.l @(disp,Rm),Rn */
460 TCGv addr
= tcg_temp_new();
461 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
462 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
466 case 0xe000: /* mov #imm,Rn */
467 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
469 case 0x9000: /* mov.w @(disp,PC),Rn */
471 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
472 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
476 case 0xd000: /* mov.l @(disp,PC),Rn */
478 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
479 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
483 case 0x7000: /* add #imm,Rn */
484 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
486 case 0xa000: /* bra disp */
488 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
489 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
490 ctx
->flags
|= DELAY_SLOT
;
492 case 0xb000: /* bsr disp */
494 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
495 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
496 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
497 ctx
->flags
|= DELAY_SLOT
;
501 switch (ctx
->opcode
& 0xf00f) {
502 case 0x6003: /* mov Rm,Rn */
503 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
505 case 0x2000: /* mov.b Rm,@Rn */
506 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_UB
);
508 case 0x2001: /* mov.w Rm,@Rn */
509 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUW
);
511 case 0x2002: /* mov.l Rm,@Rn */
512 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
514 case 0x6000: /* mov.b @Rm,Rn */
515 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
517 case 0x6001: /* mov.w @Rm,Rn */
518 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
520 case 0x6002: /* mov.l @Rm,Rn */
521 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
523 case 0x2004: /* mov.b Rm,@-Rn */
525 TCGv addr
= tcg_temp_new();
526 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
527 /* might cause re-execution */
528 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
529 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
533 case 0x2005: /* mov.w Rm,@-Rn */
535 TCGv addr
= tcg_temp_new();
536 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
537 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
538 tcg_gen_mov_i32(REG(B11_8
), addr
);
542 case 0x2006: /* mov.l Rm,@-Rn */
544 TCGv addr
= tcg_temp_new();
545 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
546 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
547 tcg_gen_mov_i32(REG(B11_8
), addr
);
550 case 0x6004: /* mov.b @Rm+,Rn */
551 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
553 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
555 case 0x6005: /* mov.w @Rm+,Rn */
556 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
558 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
560 case 0x6006: /* mov.l @Rm+,Rn */
561 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
563 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
565 case 0x0004: /* mov.b Rm,@(R0,Rn) */
567 TCGv addr
= tcg_temp_new();
568 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
569 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
573 case 0x0005: /* mov.w Rm,@(R0,Rn) */
575 TCGv addr
= tcg_temp_new();
576 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
577 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
581 case 0x0006: /* mov.l Rm,@(R0,Rn) */
583 TCGv addr
= tcg_temp_new();
584 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
585 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
589 case 0x000c: /* mov.b @(R0,Rm),Rn */
591 TCGv addr
= tcg_temp_new();
592 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
593 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_SB
);
597 case 0x000d: /* mov.w @(R0,Rm),Rn */
599 TCGv addr
= tcg_temp_new();
600 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
601 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
605 case 0x000e: /* mov.l @(R0,Rm),Rn */
607 TCGv addr
= tcg_temp_new();
608 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
609 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
613 case 0x6008: /* swap.b Rm,Rn */
615 TCGv low
= tcg_temp_new();;
616 tcg_gen_ext16u_i32(low
, REG(B7_4
));
617 tcg_gen_bswap16_i32(low
, low
);
618 tcg_gen_deposit_i32(REG(B11_8
), REG(B7_4
), low
, 0, 16);
622 case 0x6009: /* swap.w Rm,Rn */
623 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
625 case 0x200d: /* xtrct Rm,Rn */
628 high
= tcg_temp_new();
629 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
630 low
= tcg_temp_new();
631 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
632 tcg_gen_or_i32(REG(B11_8
), high
, low
);
637 case 0x300c: /* add Rm,Rn */
638 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
640 case 0x300e: /* addc Rm,Rn */
643 t0
= tcg_const_tl(0);
645 tcg_gen_add2_i32(t1
, cpu_sr_t
, cpu_sr_t
, t0
, REG(B7_4
), t0
);
646 tcg_gen_add2_i32(REG(B11_8
), cpu_sr_t
,
647 REG(B11_8
), t0
, t1
, cpu_sr_t
);
652 case 0x300f: /* addv Rm,Rn */
656 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
658 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
660 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
661 tcg_gen_andc_i32(cpu_sr_t
, t1
, t2
);
663 tcg_gen_shri_i32(cpu_sr_t
, cpu_sr_t
, 31);
665 tcg_gen_mov_i32(REG(B7_4
), t0
);
669 case 0x2009: /* and Rm,Rn */
670 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
672 case 0x3000: /* cmp/eq Rm,Rn */
673 tcg_gen_setcond_i32(TCG_COND_EQ
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
675 case 0x3003: /* cmp/ge Rm,Rn */
676 tcg_gen_setcond_i32(TCG_COND_GE
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
678 case 0x3007: /* cmp/gt Rm,Rn */
679 tcg_gen_setcond_i32(TCG_COND_GT
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
681 case 0x3006: /* cmp/hi Rm,Rn */
682 tcg_gen_setcond_i32(TCG_COND_GTU
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
684 case 0x3002: /* cmp/hs Rm,Rn */
685 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
687 case 0x200c: /* cmp/str Rm,Rn */
689 TCGv cmp1
= tcg_temp_new();
690 TCGv cmp2
= tcg_temp_new();
691 tcg_gen_xor_i32(cmp2
, REG(B7_4
), REG(B11_8
));
692 tcg_gen_subi_i32(cmp1
, cmp2
, 0x01010101);
693 tcg_gen_andc_i32(cmp1
, cmp1
, cmp2
);
694 tcg_gen_andi_i32(cmp1
, cmp1
, 0x80808080);
695 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_sr_t
, cmp1
, 0);
700 case 0x2007: /* div0s Rm,Rn */
701 tcg_gen_shri_i32(cpu_sr_q
, REG(B11_8
), 31); /* SR_Q */
702 tcg_gen_shri_i32(cpu_sr_m
, REG(B7_4
), 31); /* SR_M */
703 tcg_gen_xor_i32(cpu_sr_t
, cpu_sr_q
, cpu_sr_m
); /* SR_T */
705 case 0x3004: /* div1 Rm,Rn */
707 TCGv t0
= tcg_temp_new();
708 TCGv t1
= tcg_temp_new();
709 TCGv t2
= tcg_temp_new();
710 TCGv zero
= tcg_const_i32(0);
712 /* shift left arg1, saving the bit being pushed out and inserting
714 tcg_gen_shri_i32(t0
, REG(B11_8
), 31);
715 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
716 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), cpu_sr_t
);
718 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
719 using 64-bit temps, we compute arg0's high part from q ^ m, so
720 that it is 0x00000000 when adding the value or 0xffffffff when
722 tcg_gen_xor_i32(t1
, cpu_sr_q
, cpu_sr_m
);
723 tcg_gen_subi_i32(t1
, t1
, 1);
724 tcg_gen_neg_i32(t2
, REG(B7_4
));
725 tcg_gen_movcond_i32(TCG_COND_EQ
, t2
, t1
, zero
, REG(B7_4
), t2
);
726 tcg_gen_add2_i32(REG(B11_8
), t1
, REG(B11_8
), zero
, t2
, t1
);
728 /* compute T and Q depending on carry */
729 tcg_gen_andi_i32(t1
, t1
, 1);
730 tcg_gen_xor_i32(t1
, t1
, t0
);
731 tcg_gen_xori_i32(cpu_sr_t
, t1
, 1);
732 tcg_gen_xor_i32(cpu_sr_q
, cpu_sr_m
, t1
);
740 case 0x300d: /* dmuls.l Rm,Rn */
741 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
743 case 0x3005: /* dmulu.l Rm,Rn */
744 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
746 case 0x600e: /* exts.b Rm,Rn */
747 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
749 case 0x600f: /* exts.w Rm,Rn */
750 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
752 case 0x600c: /* extu.b Rm,Rn */
753 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
755 case 0x600d: /* extu.w Rm,Rn */
756 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
758 case 0x000f: /* mac.l @Rm+,@Rn+ */
761 arg0
= tcg_temp_new();
762 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
763 arg1
= tcg_temp_new();
764 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
765 gen_helper_macl(cpu_env
, arg0
, arg1
);
768 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
769 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
772 case 0x400f: /* mac.w @Rm+,@Rn+ */
775 arg0
= tcg_temp_new();
776 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
777 arg1
= tcg_temp_new();
778 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
779 gen_helper_macw(cpu_env
, arg0
, arg1
);
782 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
783 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
786 case 0x0007: /* mul.l Rm,Rn */
787 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
789 case 0x200f: /* muls.w Rm,Rn */
792 arg0
= tcg_temp_new();
793 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
794 arg1
= tcg_temp_new();
795 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
796 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
801 case 0x200e: /* mulu.w Rm,Rn */
804 arg0
= tcg_temp_new();
805 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
806 arg1
= tcg_temp_new();
807 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
808 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
813 case 0x600b: /* neg Rm,Rn */
814 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
816 case 0x600a: /* negc Rm,Rn */
818 TCGv t0
= tcg_const_i32(0);
819 tcg_gen_add2_i32(REG(B11_8
), cpu_sr_t
,
820 REG(B7_4
), t0
, cpu_sr_t
, t0
);
821 tcg_gen_sub2_i32(REG(B11_8
), cpu_sr_t
,
822 t0
, t0
, REG(B11_8
), cpu_sr_t
);
823 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
827 case 0x6007: /* not Rm,Rn */
828 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
830 case 0x200b: /* or Rm,Rn */
831 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
833 case 0x400c: /* shad Rm,Rn */
835 TCGv t0
= tcg_temp_new();
836 TCGv t1
= tcg_temp_new();
837 TCGv t2
= tcg_temp_new();
839 tcg_gen_andi_i32(t0
, REG(B7_4
), 0x1f);
841 /* positive case: shift to the left */
842 tcg_gen_shl_i32(t1
, REG(B11_8
), t0
);
844 /* negative case: shift to the right in two steps to
845 correctly handle the -32 case */
846 tcg_gen_xori_i32(t0
, t0
, 0x1f);
847 tcg_gen_sar_i32(t2
, REG(B11_8
), t0
);
848 tcg_gen_sari_i32(t2
, t2
, 1);
850 /* select between the two cases */
851 tcg_gen_movi_i32(t0
, 0);
852 tcg_gen_movcond_i32(TCG_COND_GE
, REG(B11_8
), REG(B7_4
), t0
, t1
, t2
);
859 case 0x400d: /* shld Rm,Rn */
861 TCGv t0
= tcg_temp_new();
862 TCGv t1
= tcg_temp_new();
863 TCGv t2
= tcg_temp_new();
865 tcg_gen_andi_i32(t0
, REG(B7_4
), 0x1f);
867 /* positive case: shift to the left */
868 tcg_gen_shl_i32(t1
, REG(B11_8
), t0
);
870 /* negative case: shift to the right in two steps to
871 correctly handle the -32 case */
872 tcg_gen_xori_i32(t0
, t0
, 0x1f);
873 tcg_gen_shr_i32(t2
, REG(B11_8
), t0
);
874 tcg_gen_shri_i32(t2
, t2
, 1);
876 /* select between the two cases */
877 tcg_gen_movi_i32(t0
, 0);
878 tcg_gen_movcond_i32(TCG_COND_GE
, REG(B11_8
), REG(B7_4
), t0
, t1
, t2
);
885 case 0x3008: /* sub Rm,Rn */
886 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
888 case 0x300a: /* subc Rm,Rn */
891 t0
= tcg_const_tl(0);
893 tcg_gen_add2_i32(t1
, cpu_sr_t
, cpu_sr_t
, t0
, REG(B7_4
), t0
);
894 tcg_gen_sub2_i32(REG(B11_8
), cpu_sr_t
,
895 REG(B11_8
), t0
, t1
, cpu_sr_t
);
896 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
901 case 0x300b: /* subv Rm,Rn */
905 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
907 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
909 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
910 tcg_gen_and_i32(t1
, t1
, t2
);
912 tcg_gen_shri_i32(cpu_sr_t
, t1
, 31);
914 tcg_gen_mov_i32(REG(B11_8
), t0
);
918 case 0x2008: /* tst Rm,Rn */
920 TCGv val
= tcg_temp_new();
921 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
922 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
926 case 0x200a: /* xor Rm,Rn */
927 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
929 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
931 if (ctx
->flags
& FPSCR_SZ
) {
932 TCGv_i64 fp
= tcg_temp_new_i64();
933 gen_load_fpr64(fp
, XREG(B7_4
));
934 gen_store_fpr64(fp
, XREG(B11_8
));
935 tcg_temp_free_i64(fp
);
937 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
940 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
942 if (ctx
->flags
& FPSCR_SZ
) {
943 TCGv addr_hi
= tcg_temp_new();
945 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
946 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], REG(B11_8
),
947 ctx
->memidx
, MO_TEUL
);
948 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr_hi
,
949 ctx
->memidx
, MO_TEUL
);
950 tcg_temp_free(addr_hi
);
952 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
),
953 ctx
->memidx
, MO_TEUL
);
956 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
958 if (ctx
->flags
& FPSCR_SZ
) {
959 TCGv addr_hi
= tcg_temp_new();
960 int fr
= XREG(B11_8
);
961 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
962 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
963 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
964 tcg_temp_free(addr_hi
);
966 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
967 ctx
->memidx
, MO_TEUL
);
970 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
972 if (ctx
->flags
& FPSCR_SZ
) {
973 TCGv addr_hi
= tcg_temp_new();
974 int fr
= XREG(B11_8
);
975 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
976 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
977 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
978 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
979 tcg_temp_free(addr_hi
);
981 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
982 ctx
->memidx
, MO_TEUL
);
983 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
986 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
988 TCGv addr
= tcg_temp_new_i32();
989 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
990 if (ctx
->flags
& FPSCR_SZ
) {
992 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
, MO_TEUL
);
993 tcg_gen_subi_i32(addr
, addr
, 4);
994 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], addr
, ctx
->memidx
, MO_TEUL
);
996 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
997 ctx
->memidx
, MO_TEUL
);
999 tcg_gen_mov_i32(REG(B11_8
), addr
);
1000 tcg_temp_free(addr
);
1002 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1005 TCGv addr
= tcg_temp_new_i32();
1006 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1007 if (ctx
->flags
& FPSCR_SZ
) {
1008 int fr
= XREG(B11_8
);
1009 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1010 ctx
->memidx
, MO_TEUL
);
1011 tcg_gen_addi_i32(addr
, addr
, 4);
1012 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1013 ctx
->memidx
, MO_TEUL
);
1015 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], addr
,
1016 ctx
->memidx
, MO_TEUL
);
1018 tcg_temp_free(addr
);
1021 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1024 TCGv addr
= tcg_temp_new();
1025 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1026 if (ctx
->flags
& FPSCR_SZ
) {
1027 int fr
= XREG(B7_4
);
1028 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1029 ctx
->memidx
, MO_TEUL
);
1030 tcg_gen_addi_i32(addr
, addr
, 4);
1031 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1032 ctx
->memidx
, MO_TEUL
);
1034 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
1035 ctx
->memidx
, MO_TEUL
);
1037 tcg_temp_free(addr
);
1040 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1041 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1042 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1043 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1044 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1045 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1048 if (ctx
->flags
& FPSCR_PR
) {
1051 if (ctx
->opcode
& 0x0110)
1052 break; /* illegal instruction */
1053 fp0
= tcg_temp_new_i64();
1054 fp1
= tcg_temp_new_i64();
1055 gen_load_fpr64(fp0
, DREG(B11_8
));
1056 gen_load_fpr64(fp1
, DREG(B7_4
));
1057 switch (ctx
->opcode
& 0xf00f) {
1058 case 0xf000: /* fadd Rm,Rn */
1059 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1061 case 0xf001: /* fsub Rm,Rn */
1062 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1064 case 0xf002: /* fmul Rm,Rn */
1065 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1067 case 0xf003: /* fdiv Rm,Rn */
1068 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1070 case 0xf004: /* fcmp/eq Rm,Rn */
1071 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1073 case 0xf005: /* fcmp/gt Rm,Rn */
1074 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1077 gen_store_fpr64(fp0
, DREG(B11_8
));
1078 tcg_temp_free_i64(fp0
);
1079 tcg_temp_free_i64(fp1
);
1081 switch (ctx
->opcode
& 0xf00f) {
1082 case 0xf000: /* fadd Rm,Rn */
1083 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1084 cpu_fregs
[FREG(B11_8
)],
1085 cpu_fregs
[FREG(B7_4
)]);
1087 case 0xf001: /* fsub Rm,Rn */
1088 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1089 cpu_fregs
[FREG(B11_8
)],
1090 cpu_fregs
[FREG(B7_4
)]);
1092 case 0xf002: /* fmul Rm,Rn */
1093 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1094 cpu_fregs
[FREG(B11_8
)],
1095 cpu_fregs
[FREG(B7_4
)]);
1097 case 0xf003: /* fdiv Rm,Rn */
1098 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1099 cpu_fregs
[FREG(B11_8
)],
1100 cpu_fregs
[FREG(B7_4
)]);
1102 case 0xf004: /* fcmp/eq Rm,Rn */
1103 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1104 cpu_fregs
[FREG(B7_4
)]);
1106 case 0xf005: /* fcmp/gt Rm,Rn */
1107 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1108 cpu_fregs
[FREG(B7_4
)]);
1114 case 0xf00e: /* fmac FR0,RM,Rn */
1117 if (ctx
->flags
& FPSCR_PR
) {
1118 break; /* illegal instruction */
1120 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1121 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1122 cpu_fregs
[FREG(B11_8
)]);
1128 switch (ctx
->opcode
& 0xff00) {
1129 case 0xc900: /* and #imm,R0 */
1130 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1132 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1135 addr
= tcg_temp_new();
1136 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1137 val
= tcg_temp_new();
1138 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1139 tcg_gen_andi_i32(val
, val
, B7_0
);
1140 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1142 tcg_temp_free(addr
);
1145 case 0x8b00: /* bf label */
1146 CHECK_NOT_DELAY_SLOT
1147 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1148 ctx
->pc
+ 4 + B7_0s
* 2);
1149 ctx
->bstate
= BS_BRANCH
;
1151 case 0x8f00: /* bf/s label */
1152 CHECK_NOT_DELAY_SLOT
1153 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1154 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1156 case 0x8900: /* bt label */
1157 CHECK_NOT_DELAY_SLOT
1158 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1160 ctx
->bstate
= BS_BRANCH
;
1162 case 0x8d00: /* bt/s label */
1163 CHECK_NOT_DELAY_SLOT
1164 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1165 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1167 case 0x8800: /* cmp/eq #imm,R0 */
1168 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, REG(0), B7_0s
);
1170 case 0xc400: /* mov.b @(disp,GBR),R0 */
1172 TCGv addr
= tcg_temp_new();
1173 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1174 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1175 tcg_temp_free(addr
);
1178 case 0xc500: /* mov.w @(disp,GBR),R0 */
1180 TCGv addr
= tcg_temp_new();
1181 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1182 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1183 tcg_temp_free(addr
);
1186 case 0xc600: /* mov.l @(disp,GBR),R0 */
1188 TCGv addr
= tcg_temp_new();
1189 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1190 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESL
);
1191 tcg_temp_free(addr
);
1194 case 0xc000: /* mov.b R0,@(disp,GBR) */
1196 TCGv addr
= tcg_temp_new();
1197 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1198 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1199 tcg_temp_free(addr
);
1202 case 0xc100: /* mov.w R0,@(disp,GBR) */
1204 TCGv addr
= tcg_temp_new();
1205 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1206 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1207 tcg_temp_free(addr
);
1210 case 0xc200: /* mov.l R0,@(disp,GBR) */
1212 TCGv addr
= tcg_temp_new();
1213 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1214 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUL
);
1215 tcg_temp_free(addr
);
1218 case 0x8000: /* mov.b R0,@(disp,Rn) */
1220 TCGv addr
= tcg_temp_new();
1221 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1222 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1223 tcg_temp_free(addr
);
1226 case 0x8100: /* mov.w R0,@(disp,Rn) */
1228 TCGv addr
= tcg_temp_new();
1229 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1230 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1231 tcg_temp_free(addr
);
1234 case 0x8400: /* mov.b @(disp,Rn),R0 */
1236 TCGv addr
= tcg_temp_new();
1237 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1238 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1239 tcg_temp_free(addr
);
1242 case 0x8500: /* mov.w @(disp,Rn),R0 */
1244 TCGv addr
= tcg_temp_new();
1245 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1246 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1247 tcg_temp_free(addr
);
1250 case 0xc700: /* mova @(disp,PC),R0 */
1251 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1253 case 0xcb00: /* or #imm,R0 */
1254 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1256 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1259 addr
= tcg_temp_new();
1260 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1261 val
= tcg_temp_new();
1262 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1263 tcg_gen_ori_i32(val
, val
, B7_0
);
1264 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1266 tcg_temp_free(addr
);
1269 case 0xc300: /* trapa #imm */
1272 CHECK_NOT_DELAY_SLOT
1273 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1274 imm
= tcg_const_i32(B7_0
);
1275 gen_helper_trapa(cpu_env
, imm
);
1277 ctx
->bstate
= BS_BRANCH
;
1280 case 0xc800: /* tst #imm,R0 */
1282 TCGv val
= tcg_temp_new();
1283 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1284 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1288 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1290 TCGv val
= tcg_temp_new();
1291 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1292 tcg_gen_qemu_ld_i32(val
, val
, ctx
->memidx
, MO_UB
);
1293 tcg_gen_andi_i32(val
, val
, B7_0
);
1294 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1298 case 0xca00: /* xor #imm,R0 */
1299 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1301 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1304 addr
= tcg_temp_new();
1305 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1306 val
= tcg_temp_new();
1307 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1308 tcg_gen_xori_i32(val
, val
, B7_0
);
1309 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1311 tcg_temp_free(addr
);
1316 switch (ctx
->opcode
& 0xf08f) {
1317 case 0x408e: /* ldc Rm,Rn_BANK */
1319 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1321 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1323 tcg_gen_qemu_ld_i32(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1324 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1326 case 0x0082: /* stc Rm_BANK,Rn */
1328 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1330 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1333 TCGv addr
= tcg_temp_new();
1334 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1335 tcg_gen_qemu_st_i32(ALTREG(B6_4
), addr
, ctx
->memidx
, MO_TEUL
);
1336 tcg_gen_mov_i32(REG(B11_8
), addr
);
1337 tcg_temp_free(addr
);
1342 switch (ctx
->opcode
& 0xf0ff) {
1343 case 0x0023: /* braf Rn */
1344 CHECK_NOT_DELAY_SLOT
1345 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1346 ctx
->flags
|= DELAY_SLOT
;
1347 ctx
->delayed_pc
= (uint32_t) - 1;
1349 case 0x0003: /* bsrf Rn */
1350 CHECK_NOT_DELAY_SLOT
1351 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1352 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1353 ctx
->flags
|= DELAY_SLOT
;
1354 ctx
->delayed_pc
= (uint32_t) - 1;
1356 case 0x4015: /* cmp/pl Rn */
1357 tcg_gen_setcondi_i32(TCG_COND_GT
, cpu_sr_t
, REG(B11_8
), 0);
1359 case 0x4011: /* cmp/pz Rn */
1360 tcg_gen_setcondi_i32(TCG_COND_GE
, cpu_sr_t
, REG(B11_8
), 0);
1362 case 0x4010: /* dt Rn */
1363 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1364 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, REG(B11_8
), 0);
1366 case 0x402b: /* jmp @Rn */
1367 CHECK_NOT_DELAY_SLOT
1368 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1369 ctx
->flags
|= DELAY_SLOT
;
1370 ctx
->delayed_pc
= (uint32_t) - 1;
1372 case 0x400b: /* jsr @Rn */
1373 CHECK_NOT_DELAY_SLOT
1374 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1375 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1376 ctx
->flags
|= DELAY_SLOT
;
1377 ctx
->delayed_pc
= (uint32_t) - 1;
1379 case 0x400e: /* ldc Rm,SR */
1382 TCGv val
= tcg_temp_new();
1383 tcg_gen_andi_i32(val
, REG(B11_8
), 0x700083f3);
1386 ctx
->bstate
= BS_STOP
;
1389 case 0x4007: /* ldc.l @Rm+,SR */
1392 TCGv val
= tcg_temp_new();
1393 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1394 tcg_gen_andi_i32(val
, val
, 0x700083f3);
1397 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1398 ctx
->bstate
= BS_STOP
;
1401 case 0x0002: /* stc SR,Rn */
1403 gen_read_sr(REG(B11_8
));
1405 case 0x4003: /* stc SR,@-Rn */
1408 TCGv addr
= tcg_temp_new();
1409 TCGv val
= tcg_temp_new();
1410 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1412 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1413 tcg_gen_mov_i32(REG(B11_8
), addr
);
1415 tcg_temp_free(addr
);
1418 #define LD(reg,ldnum,ldpnum,prechk) \
1421 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1425 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1426 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1428 #define ST(reg,stnum,stpnum,prechk) \
1431 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1436 TCGv addr = tcg_temp_new(); \
1437 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1438 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1439 tcg_gen_mov_i32(REG(B11_8), addr); \
1440 tcg_temp_free(addr); \
1443 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1444 LD(reg,ldnum,ldpnum,prechk) \
1445 ST(reg,stnum,stpnum,prechk)
1446 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1447 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1448 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1449 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1450 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1451 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1452 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1453 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1454 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1455 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1456 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1457 case 0x406a: /* lds Rm,FPSCR */
1459 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1460 ctx
->bstate
= BS_STOP
;
1462 case 0x4066: /* lds.l @Rm+,FPSCR */
1465 TCGv addr
= tcg_temp_new();
1466 tcg_gen_qemu_ld_i32(addr
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1467 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1468 gen_helper_ld_fpscr(cpu_env
, addr
);
1469 tcg_temp_free(addr
);
1470 ctx
->bstate
= BS_STOP
;
1473 case 0x006a: /* sts FPSCR,Rn */
1475 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1477 case 0x4062: /* sts FPSCR,@-Rn */
1481 val
= tcg_temp_new();
1482 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1483 addr
= tcg_temp_new();
1484 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1485 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1486 tcg_gen_mov_i32(REG(B11_8
), addr
);
1487 tcg_temp_free(addr
);
1491 case 0x00c3: /* movca.l R0,@Rm */
1493 TCGv val
= tcg_temp_new();
1494 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1495 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1496 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1498 ctx
->has_movcal
= 1;
1501 /* MOVUA.L @Rm,R0 (Rm) -> R0
1502 Load non-boundary-aligned data */
1503 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1506 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1507 Load non-boundary-aligned data */
1508 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1509 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1511 case 0x0029: /* movt Rn */
1512 tcg_gen_mov_i32(REG(B11_8
), cpu_sr_t
);
1517 If (T == 1) R0 -> (Rn)
1520 if (ctx
->features
& SH_FEATURE_SH4A
) {
1521 TCGLabel
*label
= gen_new_label();
1522 tcg_gen_mov_i32(cpu_sr_t
, cpu_ldst
);
1523 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1524 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1525 gen_set_label(label
);
1526 tcg_gen_movi_i32(cpu_ldst
, 0);
1534 When interrupt/exception
1537 if (ctx
->features
& SH_FEATURE_SH4A
) {
1538 tcg_gen_movi_i32(cpu_ldst
, 0);
1539 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1540 tcg_gen_movi_i32(cpu_ldst
, 1);
1544 case 0x0093: /* ocbi @Rn */
1546 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1549 case 0x00a3: /* ocbp @Rn */
1550 case 0x00b3: /* ocbwb @Rn */
1551 /* These instructions are supposed to do nothing in case of
1552 a cache miss. Given that we only partially emulate caches
1553 it is safe to simply ignore them. */
1555 case 0x0083: /* pref @Rn */
1557 case 0x00d3: /* prefi @Rn */
1558 if (ctx
->features
& SH_FEATURE_SH4A
)
1562 case 0x00e3: /* icbi @Rn */
1563 if (ctx
->features
& SH_FEATURE_SH4A
)
1567 case 0x00ab: /* synco */
1568 if (ctx
->features
& SH_FEATURE_SH4A
)
1572 case 0x4024: /* rotcl Rn */
1574 TCGv tmp
= tcg_temp_new();
1575 tcg_gen_mov_i32(tmp
, cpu_sr_t
);
1576 tcg_gen_shri_i32(cpu_sr_t
, REG(B11_8
), 31);
1577 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1578 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), tmp
);
1582 case 0x4025: /* rotcr Rn */
1584 TCGv tmp
= tcg_temp_new();
1585 tcg_gen_shli_i32(tmp
, cpu_sr_t
, 31);
1586 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1587 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1588 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), tmp
);
1592 case 0x4004: /* rotl Rn */
1593 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1594 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 0);
1596 case 0x4005: /* rotr Rn */
1597 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 0);
1598 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1600 case 0x4000: /* shll Rn */
1601 case 0x4020: /* shal Rn */
1602 tcg_gen_shri_i32(cpu_sr_t
, REG(B11_8
), 31);
1603 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1605 case 0x4021: /* shar Rn */
1606 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1607 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1609 case 0x4001: /* shlr Rn */
1610 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1611 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1613 case 0x4008: /* shll2 Rn */
1614 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1616 case 0x4018: /* shll8 Rn */
1617 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1619 case 0x4028: /* shll16 Rn */
1620 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1622 case 0x4009: /* shlr2 Rn */
1623 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1625 case 0x4019: /* shlr8 Rn */
1626 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1628 case 0x4029: /* shlr16 Rn */
1629 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1631 case 0x401b: /* tas.b @Rn */
1634 addr
= tcg_temp_local_new();
1635 tcg_gen_mov_i32(addr
, REG(B11_8
));
1636 val
= tcg_temp_local_new();
1637 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1638 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1639 tcg_gen_ori_i32(val
, val
, 0x80);
1640 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1642 tcg_temp_free(addr
);
1645 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1647 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1649 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1651 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1653 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1655 if (ctx
->flags
& FPSCR_PR
) {
1657 if (ctx
->opcode
& 0x0100)
1658 break; /* illegal instruction */
1659 fp
= tcg_temp_new_i64();
1660 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1661 gen_store_fpr64(fp
, DREG(B11_8
));
1662 tcg_temp_free_i64(fp
);
1665 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1668 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1670 if (ctx
->flags
& FPSCR_PR
) {
1672 if (ctx
->opcode
& 0x0100)
1673 break; /* illegal instruction */
1674 fp
= tcg_temp_new_i64();
1675 gen_load_fpr64(fp
, DREG(B11_8
));
1676 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1677 tcg_temp_free_i64(fp
);
1680 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1683 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1686 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1689 case 0xf05d: /* fabs FRn/DRn */
1691 if (ctx
->flags
& FPSCR_PR
) {
1692 if (ctx
->opcode
& 0x0100)
1693 break; /* illegal instruction */
1694 TCGv_i64 fp
= tcg_temp_new_i64();
1695 gen_load_fpr64(fp
, DREG(B11_8
));
1696 gen_helper_fabs_DT(fp
, fp
);
1697 gen_store_fpr64(fp
, DREG(B11_8
));
1698 tcg_temp_free_i64(fp
);
1700 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1703 case 0xf06d: /* fsqrt FRn */
1705 if (ctx
->flags
& FPSCR_PR
) {
1706 if (ctx
->opcode
& 0x0100)
1707 break; /* illegal instruction */
1708 TCGv_i64 fp
= tcg_temp_new_i64();
1709 gen_load_fpr64(fp
, DREG(B11_8
));
1710 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1711 gen_store_fpr64(fp
, DREG(B11_8
));
1712 tcg_temp_free_i64(fp
);
1714 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1715 cpu_fregs
[FREG(B11_8
)]);
1718 case 0xf07d: /* fsrra FRn */
1721 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1723 if (!(ctx
->flags
& FPSCR_PR
)) {
1724 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1727 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1729 if (!(ctx
->flags
& FPSCR_PR
)) {
1730 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1733 case 0xf0ad: /* fcnvsd FPUL,DRn */
1736 TCGv_i64 fp
= tcg_temp_new_i64();
1737 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1738 gen_store_fpr64(fp
, DREG(B11_8
));
1739 tcg_temp_free_i64(fp
);
1742 case 0xf0bd: /* fcnvds DRn,FPUL */
1745 TCGv_i64 fp
= tcg_temp_new_i64();
1746 gen_load_fpr64(fp
, DREG(B11_8
));
1747 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1748 tcg_temp_free_i64(fp
);
1751 case 0xf0ed: /* fipr FVm,FVn */
1753 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1755 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1756 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1757 gen_helper_fipr(cpu_env
, m
, n
);
1763 case 0xf0fd: /* ftrv XMTRX,FVn */
1765 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1766 (ctx
->flags
& FPSCR_PR
) == 0) {
1768 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1769 gen_helper_ftrv(cpu_env
, n
);
1776 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1777 ctx
->opcode
, ctx
->pc
);
1780 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1781 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1782 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1784 gen_helper_raise_illegal_instruction(cpu_env
);
1786 ctx
->bstate
= BS_BRANCH
;
1789 static void decode_opc(DisasContext
* ctx
)
1791 uint32_t old_flags
= ctx
->flags
;
1795 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1796 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1799 /* go out of the delay slot */
1800 uint32_t new_flags
= ctx
->flags
;
1801 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1802 gen_store_flags(new_flags
);
1805 ctx
->bstate
= BS_BRANCH
;
1806 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1807 gen_delayed_conditional_jump(ctx
);
1808 } else if (old_flags
& DELAY_SLOT
) {
1814 /* go into a delay slot */
1815 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1816 gen_store_flags(ctx
->flags
);
1819 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1821 SuperHCPU
*cpu
= sh_env_get_cpu(env
);
1822 CPUState
*cs
= CPU(cpu
);
1824 target_ulong pc_start
;
1830 ctx
.flags
= (uint32_t)tb
->flags
;
1831 ctx
.bstate
= BS_NONE
;
1832 ctx
.memidx
= (ctx
.flags
& (1u << SR_MD
)) == 0 ? 1 : 0;
1833 /* We don't know if the delayed pc came from a dynamic or static branch,
1834 so assume it is a dynamic branch. */
1835 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1837 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1838 ctx
.features
= env
->features
;
1839 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1842 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1843 if (max_insns
== 0) {
1844 max_insns
= CF_COUNT_MASK
;
1846 if (max_insns
> TCG_MAX_INSNS
) {
1847 max_insns
= TCG_MAX_INSNS
;
1851 while (ctx
.bstate
== BS_NONE
&& !tcg_op_buf_full()) {
1852 tcg_gen_insn_start(ctx
.pc
, ctx
.flags
);
1855 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
1856 /* We have hit a breakpoint - make sure PC is up-to-date */
1857 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1858 gen_helper_debug(cpu_env
);
1859 ctx
.bstate
= BS_BRANCH
;
1860 /* The address covered by the breakpoint must be included in
1861 [tb->pc, tb->pc + tb->size) in order to for it to be
1862 properly cleared -- thus we increment the PC here so that
1863 the logic setting tb->size below does the right thing. */
1868 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1872 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
1875 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1877 if (cs
->singlestep_enabled
) {
1880 if (num_insns
>= max_insns
)
1885 if (tb
->cflags
& CF_LAST_IO
)
1887 if (cs
->singlestep_enabled
) {
1888 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1889 gen_helper_debug(cpu_env
);
1891 switch (ctx
.bstate
) {
1893 /* gen_op_interrupt_restart(); */
1897 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1899 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1902 /* gen_op_interrupt_restart(); */
1911 gen_tb_end(tb
, num_insns
);
1913 tb
->size
= ctx
.pc
- pc_start
;
1914 tb
->icount
= num_insns
;
1917 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1918 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1919 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 0);
1925 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
,
1929 env
->flags
= data
[1];