4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 //#define SH4_SINGLE_STEP
24 #include "disas/disas.h"
31 typedef struct DisasContext
{
32 struct TranslationBlock
*tb
;
39 int singlestep_enabled
;
44 #if defined(CONFIG_USER_ONLY)
45 #define IS_USER(ctx) 1
47 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
51 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
54 BS_STOP
= 1, /* We want to stop translation for any reason */
55 BS_BRANCH
= 2, /* We reached a branch condition */
56 BS_EXCP
= 3, /* We reached an exception condition */
59 /* global register indexes */
60 static TCGv_ptr cpu_env
;
61 static TCGv cpu_gregs
[24];
62 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
63 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
64 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
65 static TCGv cpu_fregs
[32];
67 /* internal register indexes */
68 static TCGv cpu_flags
, cpu_delayed_pc
;
70 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
72 #include "exec/gen-icount.h"
74 void sh4_translate_init(void)
77 static int done_init
= 0;
78 static const char * const gregnames
[24] = {
79 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
80 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
81 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
82 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
83 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 static const char * const fregnames
[32] = {
86 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
87 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
88 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
89 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
90 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
91 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
92 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
93 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
99 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
101 for (i
= 0; i
< 24; i
++)
102 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
103 offsetof(CPUSH4State
, gregs
[i
]),
106 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
107 offsetof(CPUSH4State
, pc
), "PC");
108 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
109 offsetof(CPUSH4State
, sr
), "SR");
110 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUSH4State
, ssr
), "SSR");
112 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
113 offsetof(CPUSH4State
, spc
), "SPC");
114 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUSH4State
, gbr
), "GBR");
116 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUSH4State
, vbr
), "VBR");
118 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUSH4State
, sgr
), "SGR");
120 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUSH4State
, dbr
), "DBR");
122 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUSH4State
, mach
), "MACH");
124 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUSH4State
, macl
), "MACL");
126 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUSH4State
, pr
), "PR");
128 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
129 offsetof(CPUSH4State
, fpscr
), "FPSCR");
130 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
131 offsetof(CPUSH4State
, fpul
), "FPUL");
133 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, flags
), "_flags_");
135 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
136 offsetof(CPUSH4State
, delayed_pc
),
138 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, ldst
), "_ldst_");
141 for (i
= 0; i
< 32; i
++)
142 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
143 offsetof(CPUSH4State
, fregs
[i
]),
146 /* register helpers */
153 void cpu_dump_state(CPUSH4State
* env
, FILE * f
,
154 int (*cpu_fprintf
) (FILE * f
, const char *fmt
, ...),
158 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
159 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
160 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
161 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
162 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
163 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
164 for (i
= 0; i
< 24; i
+= 4) {
165 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
166 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
167 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
169 if (env
->flags
& DELAY_SLOT
) {
170 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
172 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
173 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
178 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
180 TranslationBlock
*tb
;
183 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
184 !ctx
->singlestep_enabled
) {
185 /* Use a direct jump if in same page and singlestep not enabled */
187 tcg_gen_movi_i32(cpu_pc
, dest
);
188 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
190 tcg_gen_movi_i32(cpu_pc
, dest
);
191 if (ctx
->singlestep_enabled
)
192 gen_helper_debug(cpu_env
);
197 static void gen_jump(DisasContext
* ctx
)
199 if (ctx
->delayed_pc
== (uint32_t) - 1) {
200 /* Target is not statically known, it comes necessarily from a
201 delayed jump as immediate jump are conditinal jumps */
202 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
203 if (ctx
->singlestep_enabled
)
204 gen_helper_debug(cpu_env
);
207 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
211 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
214 int label
= gen_new_label();
215 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
217 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
218 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
219 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
220 gen_set_label(label
);
223 /* Immediate conditional jump (bt or bf) */
224 static void gen_conditional_jump(DisasContext
* ctx
,
225 target_ulong ift
, target_ulong ifnott
)
230 l1
= gen_new_label();
232 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
233 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
234 gen_goto_tb(ctx
, 0, ifnott
);
236 gen_goto_tb(ctx
, 1, ift
);
239 /* Delayed conditional jump (bt or bf) */
240 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
245 l1
= gen_new_label();
247 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
248 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
249 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
251 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
255 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
260 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
261 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
262 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
267 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
272 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
273 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
274 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
279 static inline void gen_store_flags(uint32_t flags
)
281 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
282 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
285 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
287 TCGv tmp
= tcg_temp_new();
292 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
293 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
295 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
297 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
298 tcg_gen_or_i32(t0
, t0
, tmp
);
303 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
305 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
308 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
310 TCGv_i32 tmp
= tcg_temp_new_i32();
311 tcg_gen_trunc_i64_i32(tmp
, t
);
312 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
313 tcg_gen_shri_i64(t
, t
, 32);
314 tcg_gen_trunc_i64_i32(tmp
, t
);
315 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
316 tcg_temp_free_i32(tmp
);
319 #define B3_0 (ctx->opcode & 0xf)
320 #define B6_4 ((ctx->opcode >> 4) & 0x7)
321 #define B7_4 ((ctx->opcode >> 4) & 0xf)
322 #define B7_0 (ctx->opcode & 0xff)
323 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
324 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
325 (ctx->opcode & 0xfff))
326 #define B11_8 ((ctx->opcode >> 8) & 0xf)
327 #define B15_12 ((ctx->opcode >> 12) & 0xf)
329 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
330 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
332 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
333 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
335 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
336 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
337 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
338 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
340 #define CHECK_NOT_DELAY_SLOT \
341 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
343 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
344 gen_helper_raise_slot_illegal_instruction(cpu_env); \
345 ctx->bstate = BS_BRANCH; \
349 #define CHECK_PRIVILEGED \
350 if (IS_USER(ctx)) { \
351 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
352 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
353 gen_helper_raise_slot_illegal_instruction(cpu_env); \
355 gen_helper_raise_illegal_instruction(cpu_env); \
357 ctx->bstate = BS_BRANCH; \
361 #define CHECK_FPU_ENABLED \
362 if (ctx->flags & SR_FD) { \
363 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
364 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
365 gen_helper_raise_slot_fpu_disable(cpu_env); \
367 gen_helper_raise_fpu_disable(cpu_env); \
369 ctx->bstate = BS_BRANCH; \
373 static void _decode_opc(DisasContext
* ctx
)
375 /* This code tries to make movcal emulation sufficiently
376 accurate for Linux purposes. This instruction writes
377 memory, and prior to that, always allocates a cache line.
378 It is used in two contexts:
379 - in memcpy, where data is copied in blocks, the first write
380 of to a block uses movca.l for performance.
381 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
382 to flush the cache. Here, the data written by movcal.l is never
383 written to memory, and the data written is just bogus.
385 To simulate this, we simulate movcal.l, we store the value to memory,
386 but we also remember the previous content. If we see ocbi, we check
387 if movcal.l for that address was done previously. If so, the write should
388 not have hit the memory, so we restore the previous content.
389 When we see an instruction that is neither movca.l
390 nor ocbi, the previous content is discarded.
392 To optimize, we only try to flush stores when we're at the start of
393 TB, or if we already saw movca.l in this TB and did not flush stores
397 int opcode
= ctx
->opcode
& 0xf0ff;
398 if (opcode
!= 0x0093 /* ocbi */
399 && opcode
!= 0x00c3 /* movca.l */)
401 gen_helper_discard_movcal_backup(cpu_env
);
407 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
410 switch (ctx
->opcode
) {
411 case 0x0019: /* div0u */
412 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
414 case 0x000b: /* rts */
416 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
417 ctx
->flags
|= DELAY_SLOT
;
418 ctx
->delayed_pc
= (uint32_t) - 1;
420 case 0x0028: /* clrmac */
421 tcg_gen_movi_i32(cpu_mach
, 0);
422 tcg_gen_movi_i32(cpu_macl
, 0);
424 case 0x0048: /* clrs */
425 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
427 case 0x0008: /* clrt */
428 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
430 case 0x0038: /* ldtlb */
432 gen_helper_ldtlb(cpu_env
);
434 case 0x002b: /* rte */
437 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
438 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
439 ctx
->flags
|= DELAY_SLOT
;
440 ctx
->delayed_pc
= (uint32_t) - 1;
442 case 0x0058: /* sets */
443 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
445 case 0x0018: /* sett */
446 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
448 case 0xfbfd: /* frchg */
449 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
450 ctx
->bstate
= BS_STOP
;
452 case 0xf3fd: /* fschg */
453 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
454 ctx
->bstate
= BS_STOP
;
456 case 0x0009: /* nop */
458 case 0x001b: /* sleep */
460 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
461 gen_helper_sleep(cpu_env
);
465 switch (ctx
->opcode
& 0xf000) {
466 case 0x1000: /* mov.l Rm,@(disp,Rn) */
468 TCGv addr
= tcg_temp_new();
469 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
470 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
474 case 0x5000: /* mov.l @(disp,Rm),Rn */
476 TCGv addr
= tcg_temp_new();
477 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
478 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
482 case 0xe000: /* mov #imm,Rn */
483 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
485 case 0x9000: /* mov.w @(disp,PC),Rn */
487 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
488 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
492 case 0xd000: /* mov.l @(disp,PC),Rn */
494 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
495 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
499 case 0x7000: /* add #imm,Rn */
500 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
502 case 0xa000: /* bra disp */
504 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
505 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
506 ctx
->flags
|= DELAY_SLOT
;
508 case 0xb000: /* bsr disp */
510 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
511 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
512 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
513 ctx
->flags
|= DELAY_SLOT
;
517 switch (ctx
->opcode
& 0xf00f) {
518 case 0x6003: /* mov Rm,Rn */
519 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
521 case 0x2000: /* mov.b Rm,@Rn */
522 tcg_gen_qemu_st8(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
524 case 0x2001: /* mov.w Rm,@Rn */
525 tcg_gen_qemu_st16(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
527 case 0x2002: /* mov.l Rm,@Rn */
528 tcg_gen_qemu_st32(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
530 case 0x6000: /* mov.b @Rm,Rn */
531 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
533 case 0x6001: /* mov.w @Rm,Rn */
534 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
536 case 0x6002: /* mov.l @Rm,Rn */
537 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
539 case 0x2004: /* mov.b Rm,@-Rn */
541 TCGv addr
= tcg_temp_new();
542 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
543 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
); /* might cause re-execution */
544 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
548 case 0x2005: /* mov.w Rm,@-Rn */
550 TCGv addr
= tcg_temp_new();
551 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
552 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
553 tcg_gen_mov_i32(REG(B11_8
), addr
);
557 case 0x2006: /* mov.l Rm,@-Rn */
559 TCGv addr
= tcg_temp_new();
560 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
561 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
562 tcg_gen_mov_i32(REG(B11_8
), addr
);
565 case 0x6004: /* mov.b @Rm+,Rn */
566 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
568 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
570 case 0x6005: /* mov.w @Rm+,Rn */
571 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
573 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
575 case 0x6006: /* mov.l @Rm+,Rn */
576 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
578 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
580 case 0x0004: /* mov.b Rm,@(R0,Rn) */
582 TCGv addr
= tcg_temp_new();
583 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
584 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
);
588 case 0x0005: /* mov.w Rm,@(R0,Rn) */
590 TCGv addr
= tcg_temp_new();
591 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
592 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
596 case 0x0006: /* mov.l Rm,@(R0,Rn) */
598 TCGv addr
= tcg_temp_new();
599 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
600 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
604 case 0x000c: /* mov.b @(R0,Rm),Rn */
606 TCGv addr
= tcg_temp_new();
607 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
608 tcg_gen_qemu_ld8s(REG(B11_8
), addr
, ctx
->memidx
);
612 case 0x000d: /* mov.w @(R0,Rm),Rn */
614 TCGv addr
= tcg_temp_new();
615 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
616 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
620 case 0x000e: /* mov.l @(R0,Rm),Rn */
622 TCGv addr
= tcg_temp_new();
623 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
624 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
628 case 0x6008: /* swap.b Rm,Rn */
631 high
= tcg_temp_new();
632 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
633 low
= tcg_temp_new();
634 tcg_gen_ext16u_i32(low
, REG(B7_4
));
635 tcg_gen_bswap16_i32(low
, low
);
636 tcg_gen_or_i32(REG(B11_8
), high
, low
);
641 case 0x6009: /* swap.w Rm,Rn */
642 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
644 case 0x200d: /* xtrct Rm,Rn */
647 high
= tcg_temp_new();
648 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
649 low
= tcg_temp_new();
650 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
651 tcg_gen_or_i32(REG(B11_8
), high
, low
);
656 case 0x300c: /* add Rm,Rn */
657 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
659 case 0x300e: /* addc Rm,Rn */
663 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
665 tcg_gen_add_i32(t1
, REG(B7_4
), REG(B11_8
));
666 tcg_gen_add_i32(t0
, t0
, t1
);
668 tcg_gen_setcond_i32(TCG_COND_GTU
, t2
, REG(B11_8
), t1
);
669 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, t1
, t0
);
670 tcg_gen_or_i32(t1
, t1
, t2
);
672 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
673 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
675 tcg_gen_mov_i32(REG(B11_8
), t0
);
679 case 0x300f: /* addv Rm,Rn */
683 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
685 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
687 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
688 tcg_gen_andc_i32(t1
, t1
, t2
);
690 tcg_gen_shri_i32(t1
, t1
, 31);
691 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
692 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
694 tcg_gen_mov_i32(REG(B7_4
), t0
);
698 case 0x2009: /* and Rm,Rn */
699 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
701 case 0x3000: /* cmp/eq Rm,Rn */
702 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
704 case 0x3003: /* cmp/ge Rm,Rn */
705 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
707 case 0x3007: /* cmp/gt Rm,Rn */
708 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
710 case 0x3006: /* cmp/hi Rm,Rn */
711 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
713 case 0x3002: /* cmp/hs Rm,Rn */
714 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
716 case 0x200c: /* cmp/str Rm,Rn */
718 TCGv cmp1
= tcg_temp_new();
719 TCGv cmp2
= tcg_temp_new();
720 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
721 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
722 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
723 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
724 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
725 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
726 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
727 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
728 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
729 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
730 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
731 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
732 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
733 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
738 case 0x2007: /* div0s Rm,Rn */
740 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
741 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
742 TCGv val
= tcg_temp_new();
743 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
744 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
748 case 0x3004: /* div1 Rm,Rn */
749 gen_helper_div1(REG(B11_8
), cpu_env
, REG(B7_4
), REG(B11_8
));
751 case 0x300d: /* dmuls.l Rm,Rn */
752 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
754 case 0x3005: /* dmulu.l Rm,Rn */
755 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
757 case 0x600e: /* exts.b Rm,Rn */
758 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
760 case 0x600f: /* exts.w Rm,Rn */
761 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
763 case 0x600c: /* extu.b Rm,Rn */
764 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
766 case 0x600d: /* extu.w Rm,Rn */
767 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
769 case 0x000f: /* mac.l @Rm+,@Rn+ */
772 arg0
= tcg_temp_new();
773 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
774 arg1
= tcg_temp_new();
775 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
776 gen_helper_macl(cpu_env
, arg0
, arg1
);
779 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
780 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
783 case 0x400f: /* mac.w @Rm+,@Rn+ */
786 arg0
= tcg_temp_new();
787 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
788 arg1
= tcg_temp_new();
789 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
790 gen_helper_macw(cpu_env
, arg0
, arg1
);
793 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
794 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
797 case 0x0007: /* mul.l Rm,Rn */
798 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
800 case 0x200f: /* muls.w Rm,Rn */
803 arg0
= tcg_temp_new();
804 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
805 arg1
= tcg_temp_new();
806 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
807 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
812 case 0x200e: /* mulu.w Rm,Rn */
815 arg0
= tcg_temp_new();
816 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
817 arg1
= tcg_temp_new();
818 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
819 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
824 case 0x600b: /* neg Rm,Rn */
825 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
827 case 0x600a: /* negc Rm,Rn */
831 tcg_gen_neg_i32(t0
, REG(B7_4
));
833 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
834 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
835 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
836 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
837 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
838 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
839 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
844 case 0x6007: /* not Rm,Rn */
845 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
847 case 0x200b: /* or Rm,Rn */
848 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
850 case 0x400c: /* shad Rm,Rn */
852 int label1
= gen_new_label();
853 int label2
= gen_new_label();
854 int label3
= gen_new_label();
855 int label4
= gen_new_label();
857 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
858 /* Rm positive, shift to the left */
859 shift
= tcg_temp_new();
860 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
861 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
862 tcg_temp_free(shift
);
864 /* Rm negative, shift to the right */
865 gen_set_label(label1
);
866 shift
= tcg_temp_new();
867 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
868 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
869 tcg_gen_not_i32(shift
, REG(B7_4
));
870 tcg_gen_andi_i32(shift
, shift
, 0x1f);
871 tcg_gen_addi_i32(shift
, shift
, 1);
872 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
873 tcg_temp_free(shift
);
876 gen_set_label(label2
);
877 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
878 tcg_gen_movi_i32(REG(B11_8
), 0);
880 gen_set_label(label3
);
881 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
882 gen_set_label(label4
);
885 case 0x400d: /* shld Rm,Rn */
887 int label1
= gen_new_label();
888 int label2
= gen_new_label();
889 int label3
= gen_new_label();
891 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
892 /* Rm positive, shift to the left */
893 shift
= tcg_temp_new();
894 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
895 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
896 tcg_temp_free(shift
);
898 /* Rm negative, shift to the right */
899 gen_set_label(label1
);
900 shift
= tcg_temp_new();
901 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
902 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
903 tcg_gen_not_i32(shift
, REG(B7_4
));
904 tcg_gen_andi_i32(shift
, shift
, 0x1f);
905 tcg_gen_addi_i32(shift
, shift
, 1);
906 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
907 tcg_temp_free(shift
);
910 gen_set_label(label2
);
911 tcg_gen_movi_i32(REG(B11_8
), 0);
912 gen_set_label(label3
);
915 case 0x3008: /* sub Rm,Rn */
916 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
918 case 0x300a: /* subc Rm,Rn */
922 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
924 tcg_gen_sub_i32(t1
, REG(B11_8
), REG(B7_4
));
925 tcg_gen_sub_i32(t0
, t1
, t0
);
927 tcg_gen_setcond_i32(TCG_COND_LTU
, t2
, REG(B11_8
), t1
);
928 tcg_gen_setcond_i32(TCG_COND_LTU
, t1
, t1
, t0
);
929 tcg_gen_or_i32(t1
, t1
, t2
);
931 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
932 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
934 tcg_gen_mov_i32(REG(B11_8
), t0
);
938 case 0x300b: /* subv Rm,Rn */
942 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
944 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
946 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
947 tcg_gen_and_i32(t1
, t1
, t2
);
949 tcg_gen_shri_i32(t1
, t1
, 31);
950 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
951 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
953 tcg_gen_mov_i32(REG(B11_8
), t0
);
957 case 0x2008: /* tst Rm,Rn */
959 TCGv val
= tcg_temp_new();
960 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
961 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
965 case 0x200a: /* xor Rm,Rn */
966 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
968 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
970 if (ctx
->flags
& FPSCR_SZ
) {
971 TCGv_i64 fp
= tcg_temp_new_i64();
972 gen_load_fpr64(fp
, XREG(B7_4
));
973 gen_store_fpr64(fp
, XREG(B11_8
));
974 tcg_temp_free_i64(fp
);
976 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
979 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
981 if (ctx
->flags
& FPSCR_SZ
) {
982 TCGv addr_hi
= tcg_temp_new();
984 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
985 tcg_gen_qemu_st32(cpu_fregs
[fr
], REG(B11_8
), ctx
->memidx
);
986 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
987 tcg_temp_free(addr_hi
);
989 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
), ctx
->memidx
);
992 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
994 if (ctx
->flags
& FPSCR_SZ
) {
995 TCGv addr_hi
= tcg_temp_new();
996 int fr
= XREG(B11_8
);
997 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
998 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
999 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1000 tcg_temp_free(addr_hi
);
1002 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1005 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1007 if (ctx
->flags
& FPSCR_SZ
) {
1008 TCGv addr_hi
= tcg_temp_new();
1009 int fr
= XREG(B11_8
);
1010 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1011 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1012 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1013 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1014 tcg_temp_free(addr_hi
);
1016 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1017 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1020 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1022 if (ctx
->flags
& FPSCR_SZ
) {
1023 TCGv addr
= tcg_temp_new_i32();
1024 int fr
= XREG(B7_4
);
1025 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1026 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1027 tcg_gen_subi_i32(addr
, addr
, 4);
1028 tcg_gen_qemu_st32(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1029 tcg_gen_mov_i32(REG(B11_8
), addr
);
1030 tcg_temp_free(addr
);
1033 addr
= tcg_temp_new_i32();
1034 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1035 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1036 tcg_gen_mov_i32(REG(B11_8
), addr
);
1037 tcg_temp_free(addr
);
1040 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1043 TCGv addr
= tcg_temp_new_i32();
1044 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1045 if (ctx
->flags
& FPSCR_SZ
) {
1046 int fr
= XREG(B11_8
);
1047 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1048 tcg_gen_addi_i32(addr
, addr
, 4);
1049 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1051 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], addr
, ctx
->memidx
);
1053 tcg_temp_free(addr
);
1056 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1059 TCGv addr
= tcg_temp_new();
1060 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1061 if (ctx
->flags
& FPSCR_SZ
) {
1062 int fr
= XREG(B7_4
);
1063 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1064 tcg_gen_addi_i32(addr
, addr
, 4);
1065 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1067 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1069 tcg_temp_free(addr
);
1072 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1073 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1074 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1075 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1076 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1077 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1080 if (ctx
->flags
& FPSCR_PR
) {
1083 if (ctx
->opcode
& 0x0110)
1084 break; /* illegal instruction */
1085 fp0
= tcg_temp_new_i64();
1086 fp1
= tcg_temp_new_i64();
1087 gen_load_fpr64(fp0
, DREG(B11_8
));
1088 gen_load_fpr64(fp1
, DREG(B7_4
));
1089 switch (ctx
->opcode
& 0xf00f) {
1090 case 0xf000: /* fadd Rm,Rn */
1091 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1093 case 0xf001: /* fsub Rm,Rn */
1094 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1096 case 0xf002: /* fmul Rm,Rn */
1097 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1099 case 0xf003: /* fdiv Rm,Rn */
1100 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1102 case 0xf004: /* fcmp/eq Rm,Rn */
1103 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1105 case 0xf005: /* fcmp/gt Rm,Rn */
1106 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1109 gen_store_fpr64(fp0
, DREG(B11_8
));
1110 tcg_temp_free_i64(fp0
);
1111 tcg_temp_free_i64(fp1
);
1113 switch (ctx
->opcode
& 0xf00f) {
1114 case 0xf000: /* fadd Rm,Rn */
1115 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1116 cpu_fregs
[FREG(B11_8
)],
1117 cpu_fregs
[FREG(B7_4
)]);
1119 case 0xf001: /* fsub Rm,Rn */
1120 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1121 cpu_fregs
[FREG(B11_8
)],
1122 cpu_fregs
[FREG(B7_4
)]);
1124 case 0xf002: /* fmul Rm,Rn */
1125 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1126 cpu_fregs
[FREG(B11_8
)],
1127 cpu_fregs
[FREG(B7_4
)]);
1129 case 0xf003: /* fdiv Rm,Rn */
1130 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1131 cpu_fregs
[FREG(B11_8
)],
1132 cpu_fregs
[FREG(B7_4
)]);
1134 case 0xf004: /* fcmp/eq Rm,Rn */
1135 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1136 cpu_fregs
[FREG(B7_4
)]);
1138 case 0xf005: /* fcmp/gt Rm,Rn */
1139 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1140 cpu_fregs
[FREG(B7_4
)]);
1146 case 0xf00e: /* fmac FR0,RM,Rn */
1149 if (ctx
->flags
& FPSCR_PR
) {
1150 break; /* illegal instruction */
1152 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1153 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1154 cpu_fregs
[FREG(B11_8
)]);
1160 switch (ctx
->opcode
& 0xff00) {
1161 case 0xc900: /* and #imm,R0 */
1162 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1164 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1167 addr
= tcg_temp_new();
1168 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1169 val
= tcg_temp_new();
1170 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1171 tcg_gen_andi_i32(val
, val
, B7_0
);
1172 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1174 tcg_temp_free(addr
);
1177 case 0x8b00: /* bf label */
1178 CHECK_NOT_DELAY_SLOT
1179 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1180 ctx
->pc
+ 4 + B7_0s
* 2);
1181 ctx
->bstate
= BS_BRANCH
;
1183 case 0x8f00: /* bf/s label */
1184 CHECK_NOT_DELAY_SLOT
1185 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1186 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1188 case 0x8900: /* bt label */
1189 CHECK_NOT_DELAY_SLOT
1190 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1192 ctx
->bstate
= BS_BRANCH
;
1194 case 0x8d00: /* bt/s label */
1195 CHECK_NOT_DELAY_SLOT
1196 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1197 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1199 case 0x8800: /* cmp/eq #imm,R0 */
1200 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1202 case 0xc400: /* mov.b @(disp,GBR),R0 */
1204 TCGv addr
= tcg_temp_new();
1205 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1206 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1207 tcg_temp_free(addr
);
1210 case 0xc500: /* mov.w @(disp,GBR),R0 */
1212 TCGv addr
= tcg_temp_new();
1213 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1214 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1215 tcg_temp_free(addr
);
1218 case 0xc600: /* mov.l @(disp,GBR),R0 */
1220 TCGv addr
= tcg_temp_new();
1221 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1222 tcg_gen_qemu_ld32s(REG(0), addr
, ctx
->memidx
);
1223 tcg_temp_free(addr
);
1226 case 0xc000: /* mov.b R0,@(disp,GBR) */
1228 TCGv addr
= tcg_temp_new();
1229 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1230 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1231 tcg_temp_free(addr
);
1234 case 0xc100: /* mov.w R0,@(disp,GBR) */
1236 TCGv addr
= tcg_temp_new();
1237 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1238 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1239 tcg_temp_free(addr
);
1242 case 0xc200: /* mov.l R0,@(disp,GBR) */
1244 TCGv addr
= tcg_temp_new();
1245 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1246 tcg_gen_qemu_st32(REG(0), addr
, ctx
->memidx
);
1247 tcg_temp_free(addr
);
1250 case 0x8000: /* mov.b R0,@(disp,Rn) */
1252 TCGv addr
= tcg_temp_new();
1253 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1254 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1255 tcg_temp_free(addr
);
1258 case 0x8100: /* mov.w R0,@(disp,Rn) */
1260 TCGv addr
= tcg_temp_new();
1261 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1262 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1263 tcg_temp_free(addr
);
1266 case 0x8400: /* mov.b @(disp,Rn),R0 */
1268 TCGv addr
= tcg_temp_new();
1269 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1270 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1271 tcg_temp_free(addr
);
1274 case 0x8500: /* mov.w @(disp,Rn),R0 */
1276 TCGv addr
= tcg_temp_new();
1277 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1278 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1279 tcg_temp_free(addr
);
1282 case 0xc700: /* mova @(disp,PC),R0 */
1283 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1285 case 0xcb00: /* or #imm,R0 */
1286 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1288 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1291 addr
= tcg_temp_new();
1292 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1293 val
= tcg_temp_new();
1294 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1295 tcg_gen_ori_i32(val
, val
, B7_0
);
1296 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1298 tcg_temp_free(addr
);
1301 case 0xc300: /* trapa #imm */
1304 CHECK_NOT_DELAY_SLOT
1305 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1306 imm
= tcg_const_i32(B7_0
);
1307 gen_helper_trapa(cpu_env
, imm
);
1309 ctx
->bstate
= BS_BRANCH
;
1312 case 0xc800: /* tst #imm,R0 */
1314 TCGv val
= tcg_temp_new();
1315 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1316 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1320 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1322 TCGv val
= tcg_temp_new();
1323 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1324 tcg_gen_qemu_ld8u(val
, val
, ctx
->memidx
);
1325 tcg_gen_andi_i32(val
, val
, B7_0
);
1326 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1330 case 0xca00: /* xor #imm,R0 */
1331 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1333 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1336 addr
= tcg_temp_new();
1337 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1338 val
= tcg_temp_new();
1339 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1340 tcg_gen_xori_i32(val
, val
, B7_0
);
1341 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1343 tcg_temp_free(addr
);
1348 switch (ctx
->opcode
& 0xf08f) {
1349 case 0x408e: /* ldc Rm,Rn_BANK */
1351 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1353 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1355 tcg_gen_qemu_ld32s(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
);
1356 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1358 case 0x0082: /* stc Rm_BANK,Rn */
1360 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1362 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1365 TCGv addr
= tcg_temp_new();
1366 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1367 tcg_gen_qemu_st32(ALTREG(B6_4
), addr
, ctx
->memidx
);
1368 tcg_gen_mov_i32(REG(B11_8
), addr
);
1369 tcg_temp_free(addr
);
1374 switch (ctx
->opcode
& 0xf0ff) {
1375 case 0x0023: /* braf Rn */
1376 CHECK_NOT_DELAY_SLOT
1377 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1378 ctx
->flags
|= DELAY_SLOT
;
1379 ctx
->delayed_pc
= (uint32_t) - 1;
1381 case 0x0003: /* bsrf Rn */
1382 CHECK_NOT_DELAY_SLOT
1383 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1384 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1385 ctx
->flags
|= DELAY_SLOT
;
1386 ctx
->delayed_pc
= (uint32_t) - 1;
1388 case 0x4015: /* cmp/pl Rn */
1389 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1391 case 0x4011: /* cmp/pz Rn */
1392 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1394 case 0x4010: /* dt Rn */
1395 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1396 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1398 case 0x402b: /* jmp @Rn */
1399 CHECK_NOT_DELAY_SLOT
1400 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1401 ctx
->flags
|= DELAY_SLOT
;
1402 ctx
->delayed_pc
= (uint32_t) - 1;
1404 case 0x400b: /* jsr @Rn */
1405 CHECK_NOT_DELAY_SLOT
1406 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1407 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1408 ctx
->flags
|= DELAY_SLOT
;
1409 ctx
->delayed_pc
= (uint32_t) - 1;
1411 case 0x400e: /* ldc Rm,SR */
1413 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1414 ctx
->bstate
= BS_STOP
;
1416 case 0x4007: /* ldc.l @Rm+,SR */
1419 TCGv val
= tcg_temp_new();
1420 tcg_gen_qemu_ld32s(val
, REG(B11_8
), ctx
->memidx
);
1421 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1423 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1424 ctx
->bstate
= BS_STOP
;
1427 case 0x0002: /* stc SR,Rn */
1429 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1431 case 0x4003: /* stc SR,@-Rn */
1434 TCGv addr
= tcg_temp_new();
1435 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1436 tcg_gen_qemu_st32(cpu_sr
, addr
, ctx
->memidx
);
1437 tcg_gen_mov_i32(REG(B11_8
), addr
);
1438 tcg_temp_free(addr
);
1441 #define LD(reg,ldnum,ldpnum,prechk) \
1444 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1448 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1449 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1451 #define ST(reg,stnum,stpnum,prechk) \
1454 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1459 TCGv addr = tcg_temp_new(); \
1460 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1461 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1462 tcg_gen_mov_i32(REG(B11_8), addr); \
1463 tcg_temp_free(addr); \
1466 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1467 LD(reg,ldnum,ldpnum,prechk) \
1468 ST(reg,stnum,stpnum,prechk)
1469 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1470 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1471 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1472 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1473 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1474 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1475 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1476 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1477 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1478 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1479 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1480 case 0x406a: /* lds Rm,FPSCR */
1482 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1483 ctx
->bstate
= BS_STOP
;
1485 case 0x4066: /* lds.l @Rm+,FPSCR */
1488 TCGv addr
= tcg_temp_new();
1489 tcg_gen_qemu_ld32s(addr
, REG(B11_8
), ctx
->memidx
);
1490 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1491 gen_helper_ld_fpscr(cpu_env
, addr
);
1492 tcg_temp_free(addr
);
1493 ctx
->bstate
= BS_STOP
;
1496 case 0x006a: /* sts FPSCR,Rn */
1498 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1500 case 0x4062: /* sts FPSCR,@-Rn */
1504 val
= tcg_temp_new();
1505 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1506 addr
= tcg_temp_new();
1507 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1508 tcg_gen_qemu_st32(val
, addr
, ctx
->memidx
);
1509 tcg_gen_mov_i32(REG(B11_8
), addr
);
1510 tcg_temp_free(addr
);
1514 case 0x00c3: /* movca.l R0,@Rm */
1516 TCGv val
= tcg_temp_new();
1517 tcg_gen_qemu_ld32u(val
, REG(B11_8
), ctx
->memidx
);
1518 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1519 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1521 ctx
->has_movcal
= 1;
1524 /* MOVUA.L @Rm,R0 (Rm) -> R0
1525 Load non-boundary-aligned data */
1526 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1529 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1530 Load non-boundary-aligned data */
1531 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1532 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1534 case 0x0029: /* movt Rn */
1535 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1540 If (T == 1) R0 -> (Rn)
1543 if (ctx
->features
& SH_FEATURE_SH4A
) {
1544 int label
= gen_new_label();
1545 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1546 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1547 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1548 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1549 gen_set_label(label
);
1550 tcg_gen_movi_i32(cpu_ldst
, 0);
1558 When interrupt/exception
1561 if (ctx
->features
& SH_FEATURE_SH4A
) {
1562 tcg_gen_movi_i32(cpu_ldst
, 0);
1563 tcg_gen_qemu_ld32s(REG(0), REG(B11_8
), ctx
->memidx
);
1564 tcg_gen_movi_i32(cpu_ldst
, 1);
1568 case 0x0093: /* ocbi @Rn */
1570 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1573 case 0x00a3: /* ocbp @Rn */
1574 case 0x00b3: /* ocbwb @Rn */
1575 /* These instructions are supposed to do nothing in case of
1576 a cache miss. Given that we only partially emulate caches
1577 it is safe to simply ignore them. */
1579 case 0x0083: /* pref @Rn */
1581 case 0x00d3: /* prefi @Rn */
1582 if (ctx
->features
& SH_FEATURE_SH4A
)
1586 case 0x00e3: /* icbi @Rn */
1587 if (ctx
->features
& SH_FEATURE_SH4A
)
1591 case 0x00ab: /* synco */
1592 if (ctx
->features
& SH_FEATURE_SH4A
)
1596 case 0x4024: /* rotcl Rn */
1598 TCGv tmp
= tcg_temp_new();
1599 tcg_gen_mov_i32(tmp
, cpu_sr
);
1600 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1601 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1602 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1606 case 0x4025: /* rotcr Rn */
1608 TCGv tmp
= tcg_temp_new();
1609 tcg_gen_mov_i32(tmp
, cpu_sr
);
1610 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1611 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1612 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1616 case 0x4004: /* rotl Rn */
1617 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1618 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1620 case 0x4005: /* rotr Rn */
1621 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1622 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1624 case 0x4000: /* shll Rn */
1625 case 0x4020: /* shal Rn */
1626 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1627 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1629 case 0x4021: /* shar Rn */
1630 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1631 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1633 case 0x4001: /* shlr Rn */
1634 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1635 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1637 case 0x4008: /* shll2 Rn */
1638 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1640 case 0x4018: /* shll8 Rn */
1641 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1643 case 0x4028: /* shll16 Rn */
1644 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1646 case 0x4009: /* shlr2 Rn */
1647 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1649 case 0x4019: /* shlr8 Rn */
1650 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1652 case 0x4029: /* shlr16 Rn */
1653 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1655 case 0x401b: /* tas.b @Rn */
1658 addr
= tcg_temp_local_new();
1659 tcg_gen_mov_i32(addr
, REG(B11_8
));
1660 val
= tcg_temp_local_new();
1661 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1662 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1663 tcg_gen_ori_i32(val
, val
, 0x80);
1664 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1666 tcg_temp_free(addr
);
1669 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1671 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1673 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1675 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1677 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1679 if (ctx
->flags
& FPSCR_PR
) {
1681 if (ctx
->opcode
& 0x0100)
1682 break; /* illegal instruction */
1683 fp
= tcg_temp_new_i64();
1684 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1685 gen_store_fpr64(fp
, DREG(B11_8
));
1686 tcg_temp_free_i64(fp
);
1689 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1692 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1694 if (ctx
->flags
& FPSCR_PR
) {
1696 if (ctx
->opcode
& 0x0100)
1697 break; /* illegal instruction */
1698 fp
= tcg_temp_new_i64();
1699 gen_load_fpr64(fp
, DREG(B11_8
));
1700 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1701 tcg_temp_free_i64(fp
);
1704 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1707 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1710 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1713 case 0xf05d: /* fabs FRn/DRn */
1715 if (ctx
->flags
& FPSCR_PR
) {
1716 if (ctx
->opcode
& 0x0100)
1717 break; /* illegal instruction */
1718 TCGv_i64 fp
= tcg_temp_new_i64();
1719 gen_load_fpr64(fp
, DREG(B11_8
));
1720 gen_helper_fabs_DT(fp
, fp
);
1721 gen_store_fpr64(fp
, DREG(B11_8
));
1722 tcg_temp_free_i64(fp
);
1724 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1727 case 0xf06d: /* fsqrt FRn */
1729 if (ctx
->flags
& FPSCR_PR
) {
1730 if (ctx
->opcode
& 0x0100)
1731 break; /* illegal instruction */
1732 TCGv_i64 fp
= tcg_temp_new_i64();
1733 gen_load_fpr64(fp
, DREG(B11_8
));
1734 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1735 gen_store_fpr64(fp
, DREG(B11_8
));
1736 tcg_temp_free_i64(fp
);
1738 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1739 cpu_fregs
[FREG(B11_8
)]);
1742 case 0xf07d: /* fsrra FRn */
1745 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1747 if (!(ctx
->flags
& FPSCR_PR
)) {
1748 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1751 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1753 if (!(ctx
->flags
& FPSCR_PR
)) {
1754 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1757 case 0xf0ad: /* fcnvsd FPUL,DRn */
1760 TCGv_i64 fp
= tcg_temp_new_i64();
1761 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1762 gen_store_fpr64(fp
, DREG(B11_8
));
1763 tcg_temp_free_i64(fp
);
1766 case 0xf0bd: /* fcnvds DRn,FPUL */
1769 TCGv_i64 fp
= tcg_temp_new_i64();
1770 gen_load_fpr64(fp
, DREG(B11_8
));
1771 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1772 tcg_temp_free_i64(fp
);
1775 case 0xf0ed: /* fipr FVm,FVn */
1777 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1779 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1780 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1781 gen_helper_fipr(cpu_env
, m
, n
);
1787 case 0xf0fd: /* ftrv XMTRX,FVn */
1789 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1790 (ctx
->flags
& FPSCR_PR
) == 0) {
1792 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1793 gen_helper_ftrv(cpu_env
, n
);
1800 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1801 ctx
->opcode
, ctx
->pc
);
1804 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1805 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1806 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1808 gen_helper_raise_illegal_instruction(cpu_env
);
1810 ctx
->bstate
= BS_BRANCH
;
1813 static void decode_opc(DisasContext
* ctx
)
1815 uint32_t old_flags
= ctx
->flags
;
1817 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1818 tcg_gen_debug_insn_start(ctx
->pc
);
1823 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1824 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1827 /* go out of the delay slot */
1828 uint32_t new_flags
= ctx
->flags
;
1829 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1830 gen_store_flags(new_flags
);
1833 ctx
->bstate
= BS_BRANCH
;
1834 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1835 gen_delayed_conditional_jump(ctx
);
1836 } else if (old_flags
& DELAY_SLOT
) {
1842 /* go into a delay slot */
1843 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1844 gen_store_flags(ctx
->flags
);
1848 gen_intermediate_code_internal(CPUSH4State
* env
, TranslationBlock
* tb
,
1852 target_ulong pc_start
;
1853 static uint16_t *gen_opc_end
;
1860 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1862 ctx
.flags
= (uint32_t)tb
->flags
;
1863 ctx
.bstate
= BS_NONE
;
1864 ctx
.memidx
= (ctx
.flags
& SR_MD
) == 0 ? 1 : 0;
1865 /* We don't know if the delayed pc came from a dynamic or static branch,
1866 so assume it is a dynamic branch. */
1867 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1869 ctx
.singlestep_enabled
= env
->singlestep_enabled
;
1870 ctx
.features
= env
->features
;
1871 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1875 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1877 max_insns
= CF_COUNT_MASK
;
1879 while (ctx
.bstate
== BS_NONE
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
) {
1880 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
1881 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1882 if (ctx
.pc
== bp
->pc
) {
1883 /* We have hit a breakpoint - make sure PC is up-to-date */
1884 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1885 gen_helper_debug(cpu_env
);
1886 ctx
.bstate
= BS_BRANCH
;
1892 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1896 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1898 tcg_ctx
.gen_opc_pc
[ii
] = ctx
.pc
;
1899 gen_opc_hflags
[ii
] = ctx
.flags
;
1900 tcg_ctx
.gen_opc_instr_start
[ii
] = 1;
1901 tcg_ctx
.gen_opc_icount
[ii
] = num_insns
;
1903 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1906 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
1909 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
1913 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1915 if (env
->singlestep_enabled
)
1917 if (num_insns
>= max_insns
)
1922 if (tb
->cflags
& CF_LAST_IO
)
1924 if (env
->singlestep_enabled
) {
1925 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1926 gen_helper_debug(cpu_env
);
1928 switch (ctx
.bstate
) {
1930 /* gen_op_interrupt_restart(); */
1934 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1936 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1939 /* gen_op_interrupt_restart(); */
1948 gen_tb_end(tb
, num_insns
);
1949 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1951 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1954 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1956 tb
->size
= ctx
.pc
- pc_start
;
1957 tb
->icount
= num_insns
;
1961 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1962 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1963 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
1969 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1971 gen_intermediate_code_internal(env
, tb
, 0);
1974 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1976 gen_intermediate_code_internal(env
, tb
, 1);
1979 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
1981 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
1982 env
->flags
= gen_opc_hflags
[pc_pos
];