Merge tag 'v9.1.0'
[qemu/ar7.git] / target / sh4 / translate.c
blob53b092175dcb65c89f1a003804b2af6d6bbbbbae
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg-op.h"
24 #include "exec/helper-proto.h"
25 #include "exec/helper-gen.h"
26 #include "exec/translator.h"
27 #include "exec/log.h"
28 #include "qemu/qemu-print.h"
30 #define HELPER_H "helper.h"
31 #include "exec/helper-info.c.inc"
32 #undef HELPER_H
35 typedef struct DisasContext {
36 DisasContextBase base;
38 uint32_t tbflags; /* should stay unmodified during the TB translation */
39 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
40 int memidx;
41 int gbank;
42 int fbank;
43 uint32_t delayed_pc;
44 uint32_t features;
46 uint16_t opcode;
48 bool has_movcal;
49 } DisasContext;
51 #if defined(CONFIG_USER_ONLY)
52 #define IS_USER(ctx) 1
53 #define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
54 #else
55 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
56 #define UNALIGN(C) 0
57 #endif
59 /* Target-specific values for ctx->base.is_jmp. */
60 /* We want to exit back to the cpu loop for some reason.
61 Usually this is to recognize interrupts immediately. */
62 #define DISAS_STOP DISAS_TARGET_0
64 /* global register indexes */
65 static TCGv cpu_gregs[32];
66 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
67 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
68 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
69 static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
70 static TCGv cpu_lock_addr, cpu_lock_value;
71 static TCGv cpu_fregs[32];
73 /* internal register indexes */
74 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
76 void sh4_translate_init(void)
78 int i;
79 static const char * const gregnames[24] = {
80 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
86 static const char * const fregnames[32] = {
87 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
88 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
89 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
90 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
91 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
92 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
93 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
94 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
97 for (i = 0; i < 24; i++) {
98 cpu_gregs[i] = tcg_global_mem_new_i32(tcg_env,
99 offsetof(CPUSH4State, gregs[i]),
100 gregnames[i]);
102 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
104 cpu_pc = tcg_global_mem_new_i32(tcg_env,
105 offsetof(CPUSH4State, pc), "PC");
106 cpu_sr = tcg_global_mem_new_i32(tcg_env,
107 offsetof(CPUSH4State, sr), "SR");
108 cpu_sr_m = tcg_global_mem_new_i32(tcg_env,
109 offsetof(CPUSH4State, sr_m), "SR_M");
110 cpu_sr_q = tcg_global_mem_new_i32(tcg_env,
111 offsetof(CPUSH4State, sr_q), "SR_Q");
112 cpu_sr_t = tcg_global_mem_new_i32(tcg_env,
113 offsetof(CPUSH4State, sr_t), "SR_T");
114 cpu_ssr = tcg_global_mem_new_i32(tcg_env,
115 offsetof(CPUSH4State, ssr), "SSR");
116 cpu_spc = tcg_global_mem_new_i32(tcg_env,
117 offsetof(CPUSH4State, spc), "SPC");
118 cpu_gbr = tcg_global_mem_new_i32(tcg_env,
119 offsetof(CPUSH4State, gbr), "GBR");
120 cpu_vbr = tcg_global_mem_new_i32(tcg_env,
121 offsetof(CPUSH4State, vbr), "VBR");
122 cpu_sgr = tcg_global_mem_new_i32(tcg_env,
123 offsetof(CPUSH4State, sgr), "SGR");
124 cpu_dbr = tcg_global_mem_new_i32(tcg_env,
125 offsetof(CPUSH4State, dbr), "DBR");
126 cpu_mach = tcg_global_mem_new_i32(tcg_env,
127 offsetof(CPUSH4State, mach), "MACH");
128 cpu_macl = tcg_global_mem_new_i32(tcg_env,
129 offsetof(CPUSH4State, macl), "MACL");
130 cpu_pr = tcg_global_mem_new_i32(tcg_env,
131 offsetof(CPUSH4State, pr), "PR");
132 cpu_fpscr = tcg_global_mem_new_i32(tcg_env,
133 offsetof(CPUSH4State, fpscr), "FPSCR");
134 cpu_fpul = tcg_global_mem_new_i32(tcg_env,
135 offsetof(CPUSH4State, fpul), "FPUL");
137 cpu_flags = tcg_global_mem_new_i32(tcg_env,
138 offsetof(CPUSH4State, flags), "_flags_");
139 cpu_delayed_pc = tcg_global_mem_new_i32(tcg_env,
140 offsetof(CPUSH4State, delayed_pc),
141 "_delayed_pc_");
142 cpu_delayed_cond = tcg_global_mem_new_i32(tcg_env,
143 offsetof(CPUSH4State,
144 delayed_cond),
145 "_delayed_cond_");
146 cpu_lock_addr = tcg_global_mem_new_i32(tcg_env,
147 offsetof(CPUSH4State, lock_addr),
148 "_lock_addr_");
149 cpu_lock_value = tcg_global_mem_new_i32(tcg_env,
150 offsetof(CPUSH4State, lock_value),
151 "_lock_value_");
153 for (i = 0; i < 32; i++)
154 cpu_fregs[i] = tcg_global_mem_new_i32(tcg_env,
155 offsetof(CPUSH4State, fregs[i]),
156 fregnames[i]);
159 void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
161 CPUSH4State *env = cpu_env(cs);
162 int i;
164 qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
165 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
166 qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
167 env->spc, env->ssr, env->gbr, env->vbr);
168 qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
169 env->sgr, env->dbr, env->delayed_pc, env->fpul);
170 for (i = 0; i < 24; i += 4) {
171 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
172 i, env->gregs[i], i + 1, env->gregs[i + 1],
173 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
175 if (env->flags & TB_FLAG_DELAY_SLOT) {
176 qemu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
177 env->delayed_pc);
178 } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
179 qemu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
180 env->delayed_pc);
181 } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
182 qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
183 env->delayed_pc);
187 static void gen_read_sr(TCGv dst)
189 TCGv t0 = tcg_temp_new();
190 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
191 tcg_gen_or_i32(dst, dst, t0);
192 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
193 tcg_gen_or_i32(dst, dst, t0);
194 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
195 tcg_gen_or_i32(dst, cpu_sr, t0);
198 static void gen_write_sr(TCGv src)
200 tcg_gen_andi_i32(cpu_sr, src,
201 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
202 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
203 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
204 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
207 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
209 if (save_pc) {
210 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
212 if (ctx->delayed_pc != (uint32_t) -1) {
213 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
215 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
216 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
220 static inline bool use_exit_tb(DisasContext *ctx)
222 return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
225 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
227 if (use_exit_tb(ctx)) {
228 return false;
230 return translator_use_goto_tb(&ctx->base, dest);
233 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
235 if (use_goto_tb(ctx, dest)) {
236 tcg_gen_goto_tb(n);
237 tcg_gen_movi_i32(cpu_pc, dest);
238 tcg_gen_exit_tb(ctx->base.tb, n);
239 } else {
240 tcg_gen_movi_i32(cpu_pc, dest);
241 if (use_exit_tb(ctx)) {
242 tcg_gen_exit_tb(NULL, 0);
243 } else {
244 tcg_gen_lookup_and_goto_ptr();
247 ctx->base.is_jmp = DISAS_NORETURN;
250 static void gen_jump(DisasContext * ctx)
252 if (ctx->delayed_pc == -1) {
253 /* Target is not statically known, it comes necessarily from a
254 delayed jump as immediate jump are conditinal jumps */
255 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
256 tcg_gen_discard_i32(cpu_delayed_pc);
257 if (use_exit_tb(ctx)) {
258 tcg_gen_exit_tb(NULL, 0);
259 } else {
260 tcg_gen_lookup_and_goto_ptr();
262 ctx->base.is_jmp = DISAS_NORETURN;
263 } else {
264 gen_goto_tb(ctx, 0, ctx->delayed_pc);
268 /* Immediate conditional jump (bt or bf) */
269 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
270 bool jump_if_true)
272 TCGLabel *l1 = gen_new_label();
273 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
275 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
276 /* When in an exclusive region, we must continue to the end.
277 Therefore, exit the region on a taken branch, but otherwise
278 fall through to the next instruction. */
279 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
280 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
281 /* Note that this won't actually use a goto_tb opcode because we
282 disallow it in use_goto_tb, but it handles exit + singlestep. */
283 gen_goto_tb(ctx, 0, dest);
284 gen_set_label(l1);
285 ctx->base.is_jmp = DISAS_NEXT;
286 return;
289 gen_save_cpu_state(ctx, false);
290 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
291 gen_goto_tb(ctx, 0, dest);
292 gen_set_label(l1);
293 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
294 ctx->base.is_jmp = DISAS_NORETURN;
297 /* Delayed conditional jump (bt or bf) */
298 static void gen_delayed_conditional_jump(DisasContext * ctx)
300 TCGLabel *l1 = gen_new_label();
301 TCGv ds = tcg_temp_new();
303 tcg_gen_mov_i32(ds, cpu_delayed_cond);
304 tcg_gen_discard_i32(cpu_delayed_cond);
306 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
307 /* When in an exclusive region, we must continue to the end.
308 Therefore, exit the region on a taken branch, but otherwise
309 fall through to the next instruction. */
310 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
312 /* Leave the gUSA region. */
313 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
314 gen_jump(ctx);
316 gen_set_label(l1);
317 ctx->base.is_jmp = DISAS_NEXT;
318 return;
321 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
322 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
323 gen_set_label(l1);
324 gen_jump(ctx);
327 static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
329 /* We have already signaled illegal instruction for odd Dr. */
330 tcg_debug_assert((reg & 1) == 0);
331 reg ^= ctx->fbank;
332 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
335 static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
337 /* We have already signaled illegal instruction for odd Dr. */
338 tcg_debug_assert((reg & 1) == 0);
339 reg ^= ctx->fbank;
340 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
343 #define B3_0 (ctx->opcode & 0xf)
344 #define B6_4 ((ctx->opcode >> 4) & 0x7)
345 #define B7_4 ((ctx->opcode >> 4) & 0xf)
346 #define B7_0 (ctx->opcode & 0xff)
347 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
348 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
349 (ctx->opcode & 0xfff))
350 #define B11_8 ((ctx->opcode >> 8) & 0xf)
351 #define B15_12 ((ctx->opcode >> 12) & 0xf)
353 #define REG(x) cpu_gregs[(x) ^ ctx->gbank]
354 #define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
355 #define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
357 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
359 #define CHECK_NOT_DELAY_SLOT \
360 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { \
361 goto do_illegal_slot; \
364 #define CHECK_PRIVILEGED \
365 if (IS_USER(ctx)) { \
366 goto do_illegal; \
369 #define CHECK_FPU_ENABLED \
370 if (ctx->tbflags & (1u << SR_FD)) { \
371 goto do_fpu_disabled; \
374 #define CHECK_FPSCR_PR_0 \
375 if (ctx->tbflags & FPSCR_PR) { \
376 goto do_illegal; \
379 #define CHECK_FPSCR_PR_1 \
380 if (!(ctx->tbflags & FPSCR_PR)) { \
381 goto do_illegal; \
384 #define CHECK_SH4A \
385 if (!(ctx->features & SH_FEATURE_SH4A)) { \
386 goto do_illegal; \
389 static void _decode_opc(DisasContext * ctx)
391 /* This code tries to make movcal emulation sufficiently
392 accurate for Linux purposes. This instruction writes
393 memory, and prior to that, always allocates a cache line.
394 It is used in two contexts:
395 - in memcpy, where data is copied in blocks, the first write
396 of to a block uses movca.l for performance.
397 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
398 to flush the cache. Here, the data written by movcal.l is never
399 written to memory, and the data written is just bogus.
401 To simulate this, we simulate movcal.l, we store the value to memory,
402 but we also remember the previous content. If we see ocbi, we check
403 if movcal.l for that address was done previously. If so, the write should
404 not have hit the memory, so we restore the previous content.
405 When we see an instruction that is neither movca.l
406 nor ocbi, the previous content is discarded.
408 To optimize, we only try to flush stores when we're at the start of
409 TB, or if we already saw movca.l in this TB and did not flush stores
410 yet. */
411 if (ctx->has_movcal)
413 int opcode = ctx->opcode & 0xf0ff;
414 if (opcode != 0x0093 /* ocbi */
415 && opcode != 0x00c3 /* movca.l */)
417 gen_helper_discard_movcal_backup(tcg_env);
418 ctx->has_movcal = 0;
422 #if 0
423 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
424 #endif
426 switch (ctx->opcode) {
427 case 0x0019: /* div0u */
428 tcg_gen_movi_i32(cpu_sr_m, 0);
429 tcg_gen_movi_i32(cpu_sr_q, 0);
430 tcg_gen_movi_i32(cpu_sr_t, 0);
431 return;
432 case 0x000b: /* rts */
433 CHECK_NOT_DELAY_SLOT
434 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
435 ctx->envflags |= TB_FLAG_DELAY_SLOT;
436 ctx->delayed_pc = (uint32_t) - 1;
437 return;
438 case 0x0028: /* clrmac */
439 tcg_gen_movi_i32(cpu_mach, 0);
440 tcg_gen_movi_i32(cpu_macl, 0);
441 return;
442 case 0x0048: /* clrs */
443 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
444 return;
445 case 0x0008: /* clrt */
446 tcg_gen_movi_i32(cpu_sr_t, 0);
447 return;
448 case 0x0038: /* ldtlb */
449 CHECK_PRIVILEGED
450 gen_helper_ldtlb(tcg_env);
451 return;
452 case 0x002b: /* rte */
453 CHECK_PRIVILEGED
454 CHECK_NOT_DELAY_SLOT
455 gen_write_sr(cpu_ssr);
456 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
457 ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
458 ctx->delayed_pc = (uint32_t) - 1;
459 ctx->base.is_jmp = DISAS_STOP;
460 return;
461 case 0x0058: /* sets */
462 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
463 return;
464 case 0x0018: /* sett */
465 tcg_gen_movi_i32(cpu_sr_t, 1);
466 return;
467 case 0xfbfd: /* frchg */
468 CHECK_FPSCR_PR_0
469 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
470 ctx->base.is_jmp = DISAS_STOP;
471 return;
472 case 0xf3fd: /* fschg */
473 CHECK_FPSCR_PR_0
474 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
475 ctx->base.is_jmp = DISAS_STOP;
476 return;
477 case 0xf7fd: /* fpchg */
478 CHECK_SH4A
479 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
480 ctx->base.is_jmp = DISAS_STOP;
481 return;
482 case 0x0009: /* nop */
483 return;
484 case 0x001b: /* sleep */
485 CHECK_PRIVILEGED
486 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
487 gen_helper_sleep(tcg_env);
488 return;
491 switch (ctx->opcode & 0xf000) {
492 case 0x1000: /* mov.l Rm,@(disp,Rn) */
494 TCGv addr = tcg_temp_new();
495 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
496 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
497 MO_TEUL | UNALIGN(ctx));
499 return;
500 case 0x5000: /* mov.l @(disp,Rm),Rn */
502 TCGv addr = tcg_temp_new();
503 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
504 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
505 MO_TESL | UNALIGN(ctx));
507 return;
508 case 0xe000: /* mov #imm,Rn */
509 #ifdef CONFIG_USER_ONLY
511 * Detect the start of a gUSA region (mov #-n, r15).
512 * If so, update envflags and end the TB. This will allow us
513 * to see the end of the region (stored in R0) in the next TB.
515 if (B11_8 == 15 && B7_0s < 0 &&
516 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
517 ctx->envflags =
518 deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
519 ctx->base.is_jmp = DISAS_STOP;
521 #endif
522 tcg_gen_movi_i32(REG(B11_8), B7_0s);
523 return;
524 case 0x9000: /* mov.w @(disp,PC),Rn */
525 CHECK_NOT_DELAY_SLOT
527 TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
528 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
529 MO_TESW | MO_ALIGN);
531 return;
532 case 0xd000: /* mov.l @(disp,PC),Rn */
533 CHECK_NOT_DELAY_SLOT
535 TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
536 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
537 MO_TESL | MO_ALIGN);
539 return;
540 case 0x7000: /* add #imm,Rn */
541 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
542 return;
543 case 0xa000: /* bra disp */
544 CHECK_NOT_DELAY_SLOT
545 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
546 ctx->envflags |= TB_FLAG_DELAY_SLOT;
547 return;
548 case 0xb000: /* bsr disp */
549 CHECK_NOT_DELAY_SLOT
550 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
551 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
552 ctx->envflags |= TB_FLAG_DELAY_SLOT;
553 return;
556 switch (ctx->opcode & 0xf00f) {
557 case 0x6003: /* mov Rm,Rn */
558 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
559 return;
560 case 0x2000: /* mov.b Rm,@Rn */
561 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
562 return;
563 case 0x2001: /* mov.w Rm,@Rn */
564 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
565 MO_TEUW | UNALIGN(ctx));
566 return;
567 case 0x2002: /* mov.l Rm,@Rn */
568 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
569 MO_TEUL | UNALIGN(ctx));
570 return;
571 case 0x6000: /* mov.b @Rm,Rn */
572 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
573 return;
574 case 0x6001: /* mov.w @Rm,Rn */
575 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
576 MO_TESW | UNALIGN(ctx));
577 return;
578 case 0x6002: /* mov.l @Rm,Rn */
579 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
580 MO_TESL | UNALIGN(ctx));
581 return;
582 case 0x2004: /* mov.b Rm,@-Rn */
584 TCGv addr = tcg_temp_new();
585 tcg_gen_subi_i32(addr, REG(B11_8), 1);
586 /* might cause re-execution */
587 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
588 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
590 return;
591 case 0x2005: /* mov.w Rm,@-Rn */
593 TCGv addr = tcg_temp_new();
594 tcg_gen_subi_i32(addr, REG(B11_8), 2);
595 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
596 MO_TEUW | UNALIGN(ctx));
597 tcg_gen_mov_i32(REG(B11_8), addr);
599 return;
600 case 0x2006: /* mov.l Rm,@-Rn */
602 TCGv addr = tcg_temp_new();
603 tcg_gen_subi_i32(addr, REG(B11_8), 4);
604 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
605 MO_TEUL | UNALIGN(ctx));
606 tcg_gen_mov_i32(REG(B11_8), addr);
608 return;
609 case 0x6004: /* mov.b @Rm+,Rn */
610 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
611 if ( B11_8 != B7_4 )
612 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
613 return;
614 case 0x6005: /* mov.w @Rm+,Rn */
615 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
616 MO_TESW | UNALIGN(ctx));
617 if ( B11_8 != B7_4 )
618 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
619 return;
620 case 0x6006: /* mov.l @Rm+,Rn */
621 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
622 MO_TESL | UNALIGN(ctx));
623 if ( B11_8 != B7_4 )
624 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
625 return;
626 case 0x0004: /* mov.b Rm,@(R0,Rn) */
628 TCGv addr = tcg_temp_new();
629 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
630 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
632 return;
633 case 0x0005: /* mov.w Rm,@(R0,Rn) */
635 TCGv addr = tcg_temp_new();
636 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
637 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
638 MO_TEUW | UNALIGN(ctx));
640 return;
641 case 0x0006: /* mov.l Rm,@(R0,Rn) */
643 TCGv addr = tcg_temp_new();
644 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
645 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
646 MO_TEUL | UNALIGN(ctx));
648 return;
649 case 0x000c: /* mov.b @(R0,Rm),Rn */
651 TCGv addr = tcg_temp_new();
652 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
653 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
655 return;
656 case 0x000d: /* mov.w @(R0,Rm),Rn */
658 TCGv addr = tcg_temp_new();
659 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
660 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
661 MO_TESW | UNALIGN(ctx));
663 return;
664 case 0x000e: /* mov.l @(R0,Rm),Rn */
666 TCGv addr = tcg_temp_new();
667 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
668 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
669 MO_TESL | UNALIGN(ctx));
671 return;
672 case 0x6008: /* swap.b Rm,Rn */
674 TCGv low = tcg_temp_new();
675 tcg_gen_bswap16_i32(low, REG(B7_4), 0);
676 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
678 return;
679 case 0x6009: /* swap.w Rm,Rn */
680 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
681 return;
682 case 0x200d: /* xtrct Rm,Rn */
684 TCGv high, low;
685 high = tcg_temp_new();
686 tcg_gen_shli_i32(high, REG(B7_4), 16);
687 low = tcg_temp_new();
688 tcg_gen_shri_i32(low, REG(B11_8), 16);
689 tcg_gen_or_i32(REG(B11_8), high, low);
691 return;
692 case 0x300c: /* add Rm,Rn */
693 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
694 return;
695 case 0x300e: /* addc Rm,Rn */
697 TCGv t0, t1;
698 t0 = tcg_constant_tl(0);
699 t1 = tcg_temp_new();
700 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
701 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
702 REG(B11_8), t0, t1, cpu_sr_t);
704 return;
705 case 0x300f: /* addv Rm,Rn */
707 TCGv Rn = REG(B11_8);
708 TCGv Rm = REG(B7_4);
709 TCGv result, t1, t2;
711 result = tcg_temp_new();
712 t1 = tcg_temp_new();
713 t2 = tcg_temp_new();
714 tcg_gen_add_i32(result, Rm, Rn);
715 /* T = ((Rn ^ Rm) & (Result ^ Rn)) >> 31 */
716 tcg_gen_xor_i32(t1, result, Rn);
717 tcg_gen_xor_i32(t2, Rm, Rn);
718 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
719 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
720 tcg_gen_mov_i32(Rn, result);
722 return;
723 case 0x2009: /* and Rm,Rn */
724 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
725 return;
726 case 0x3000: /* cmp/eq Rm,Rn */
727 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
728 return;
729 case 0x3003: /* cmp/ge Rm,Rn */
730 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
731 return;
732 case 0x3007: /* cmp/gt Rm,Rn */
733 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
734 return;
735 case 0x3006: /* cmp/hi Rm,Rn */
736 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
737 return;
738 case 0x3002: /* cmp/hs Rm,Rn */
739 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
740 return;
741 case 0x200c: /* cmp/str Rm,Rn */
743 TCGv cmp1 = tcg_temp_new();
744 TCGv cmp2 = tcg_temp_new();
745 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
746 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
747 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
748 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
749 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
751 return;
752 case 0x2007: /* div0s Rm,Rn */
753 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
754 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
755 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
756 return;
757 case 0x3004: /* div1 Rm,Rn */
759 TCGv t0 = tcg_temp_new();
760 TCGv t1 = tcg_temp_new();
761 TCGv t2 = tcg_temp_new();
762 TCGv zero = tcg_constant_i32(0);
764 /* shift left arg1, saving the bit being pushed out and inserting
765 T on the right */
766 tcg_gen_shri_i32(t0, REG(B11_8), 31);
767 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
768 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
770 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
771 using 64-bit temps, we compute arg0's high part from q ^ m, so
772 that it is 0x00000000 when adding the value or 0xffffffff when
773 subtracting it. */
774 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
775 tcg_gen_subi_i32(t1, t1, 1);
776 tcg_gen_neg_i32(t2, REG(B7_4));
777 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
778 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
780 /* compute T and Q depending on carry */
781 tcg_gen_andi_i32(t1, t1, 1);
782 tcg_gen_xor_i32(t1, t1, t0);
783 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
784 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
786 return;
787 case 0x300d: /* dmuls.l Rm,Rn */
788 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
789 return;
790 case 0x3005: /* dmulu.l Rm,Rn */
791 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
792 return;
793 case 0x600e: /* exts.b Rm,Rn */
794 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
795 return;
796 case 0x600f: /* exts.w Rm,Rn */
797 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
798 return;
799 case 0x600c: /* extu.b Rm,Rn */
800 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
801 return;
802 case 0x600d: /* extu.w Rm,Rn */
803 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
804 return;
805 case 0x000f: /* mac.l @Rm+,@Rn+ */
807 TCGv arg0, arg1;
808 arg0 = tcg_temp_new();
809 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
810 MO_TESL | MO_ALIGN);
811 arg1 = tcg_temp_new();
812 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
813 MO_TESL | MO_ALIGN);
814 gen_helper_macl(tcg_env, arg0, arg1);
815 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
816 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
818 return;
819 case 0x400f: /* mac.w @Rm+,@Rn+ */
821 TCGv arg0, arg1;
822 arg0 = tcg_temp_new();
823 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
824 MO_TESW | MO_ALIGN);
825 arg1 = tcg_temp_new();
826 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
827 MO_TESW | MO_ALIGN);
828 gen_helper_macw(tcg_env, arg0, arg1);
829 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
830 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
832 return;
833 case 0x0007: /* mul.l Rm,Rn */
834 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
835 return;
836 case 0x200f: /* muls.w Rm,Rn */
838 TCGv arg0, arg1;
839 arg0 = tcg_temp_new();
840 tcg_gen_ext16s_i32(arg0, REG(B7_4));
841 arg1 = tcg_temp_new();
842 tcg_gen_ext16s_i32(arg1, REG(B11_8));
843 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
845 return;
846 case 0x200e: /* mulu.w Rm,Rn */
848 TCGv arg0, arg1;
849 arg0 = tcg_temp_new();
850 tcg_gen_ext16u_i32(arg0, REG(B7_4));
851 arg1 = tcg_temp_new();
852 tcg_gen_ext16u_i32(arg1, REG(B11_8));
853 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
855 return;
856 case 0x600b: /* neg Rm,Rn */
857 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
858 return;
859 case 0x600a: /* negc Rm,Rn */
861 TCGv t0 = tcg_constant_i32(0);
862 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
863 REG(B7_4), t0, cpu_sr_t, t0);
864 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
865 t0, t0, REG(B11_8), cpu_sr_t);
866 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
868 return;
869 case 0x6007: /* not Rm,Rn */
870 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
871 return;
872 case 0x200b: /* or Rm,Rn */
873 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
874 return;
875 case 0x400c: /* shad Rm,Rn */
877 TCGv t0 = tcg_temp_new();
878 TCGv t1 = tcg_temp_new();
879 TCGv t2 = tcg_temp_new();
881 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
883 /* positive case: shift to the left */
884 tcg_gen_shl_i32(t1, REG(B11_8), t0);
886 /* negative case: shift to the right in two steps to
887 correctly handle the -32 case */
888 tcg_gen_xori_i32(t0, t0, 0x1f);
889 tcg_gen_sar_i32(t2, REG(B11_8), t0);
890 tcg_gen_sari_i32(t2, t2, 1);
892 /* select between the two cases */
893 tcg_gen_movi_i32(t0, 0);
894 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
896 return;
897 case 0x400d: /* shld Rm,Rn */
899 TCGv t0 = tcg_temp_new();
900 TCGv t1 = tcg_temp_new();
901 TCGv t2 = tcg_temp_new();
903 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
905 /* positive case: shift to the left */
906 tcg_gen_shl_i32(t1, REG(B11_8), t0);
908 /* negative case: shift to the right in two steps to
909 correctly handle the -32 case */
910 tcg_gen_xori_i32(t0, t0, 0x1f);
911 tcg_gen_shr_i32(t2, REG(B11_8), t0);
912 tcg_gen_shri_i32(t2, t2, 1);
914 /* select between the two cases */
915 tcg_gen_movi_i32(t0, 0);
916 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
918 return;
919 case 0x3008: /* sub Rm,Rn */
920 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
921 return;
922 case 0x300a: /* subc Rm,Rn */
924 TCGv t0, t1;
925 t0 = tcg_constant_tl(0);
926 t1 = tcg_temp_new();
927 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
928 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
929 REG(B11_8), t0, t1, cpu_sr_t);
930 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
932 return;
933 case 0x300b: /* subv Rm,Rn */
935 TCGv Rn = REG(B11_8);
936 TCGv Rm = REG(B7_4);
937 TCGv result, t1, t2;
939 result = tcg_temp_new();
940 t1 = tcg_temp_new();
941 t2 = tcg_temp_new();
942 tcg_gen_sub_i32(result, Rn, Rm);
943 /* T = ((Rn ^ Rm) & (Result ^ Rn)) >> 31 */
944 tcg_gen_xor_i32(t1, result, Rn);
945 tcg_gen_xor_i32(t2, Rn, Rm);
946 tcg_gen_and_i32(t1, t1, t2);
947 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
948 tcg_gen_mov_i32(Rn, result);
950 return;
951 case 0x2008: /* tst Rm,Rn */
953 TCGv val = tcg_temp_new();
954 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
955 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
957 return;
958 case 0x200a: /* xor Rm,Rn */
959 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
960 return;
961 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
962 CHECK_FPU_ENABLED
963 if (ctx->tbflags & FPSCR_SZ) {
964 int xsrc = XHACK(B7_4);
965 int xdst = XHACK(B11_8);
966 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
967 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
968 } else {
969 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
971 return;
972 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
973 CHECK_FPU_ENABLED
974 if (ctx->tbflags & FPSCR_SZ) {
975 TCGv_i64 fp = tcg_temp_new_i64();
976 gen_load_fpr64(ctx, fp, XHACK(B7_4));
977 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx,
978 MO_TEUQ | MO_ALIGN);
979 } else {
980 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx,
981 MO_TEUL | MO_ALIGN);
983 return;
984 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
985 CHECK_FPU_ENABLED
986 if (ctx->tbflags & FPSCR_SZ) {
987 TCGv_i64 fp = tcg_temp_new_i64();
988 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
989 MO_TEUQ | MO_ALIGN);
990 gen_store_fpr64(ctx, fp, XHACK(B11_8));
991 } else {
992 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
993 MO_TEUL | MO_ALIGN);
995 return;
996 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
997 CHECK_FPU_ENABLED
998 if (ctx->tbflags & FPSCR_SZ) {
999 TCGv_i64 fp = tcg_temp_new_i64();
1000 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
1001 MO_TEUQ | MO_ALIGN);
1002 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1003 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1004 } else {
1005 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
1006 MO_TEUL | MO_ALIGN);
1007 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1009 return;
1010 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1011 CHECK_FPU_ENABLED
1013 TCGv addr = tcg_temp_new_i32();
1014 if (ctx->tbflags & FPSCR_SZ) {
1015 TCGv_i64 fp = tcg_temp_new_i64();
1016 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1017 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1018 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1019 MO_TEUQ | MO_ALIGN);
1020 } else {
1021 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1022 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1023 MO_TEUL | MO_ALIGN);
1025 tcg_gen_mov_i32(REG(B11_8), addr);
1027 return;
1028 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1029 CHECK_FPU_ENABLED
1031 TCGv addr = tcg_temp_new_i32();
1032 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1033 if (ctx->tbflags & FPSCR_SZ) {
1034 TCGv_i64 fp = tcg_temp_new_i64();
1035 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx,
1036 MO_TEUQ | MO_ALIGN);
1037 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1038 } else {
1039 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx,
1040 MO_TEUL | MO_ALIGN);
1043 return;
1044 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1045 CHECK_FPU_ENABLED
1047 TCGv addr = tcg_temp_new();
1048 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1049 if (ctx->tbflags & FPSCR_SZ) {
1050 TCGv_i64 fp = tcg_temp_new_i64();
1051 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1052 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1053 MO_TEUQ | MO_ALIGN);
1054 } else {
1055 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1056 MO_TEUL | MO_ALIGN);
1059 return;
1060 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1061 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1062 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1063 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1064 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1065 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1067 CHECK_FPU_ENABLED
1068 if (ctx->tbflags & FPSCR_PR) {
1069 TCGv_i64 fp0, fp1;
1071 if (ctx->opcode & 0x0110) {
1072 goto do_illegal;
1074 fp0 = tcg_temp_new_i64();
1075 fp1 = tcg_temp_new_i64();
1076 gen_load_fpr64(ctx, fp0, B11_8);
1077 gen_load_fpr64(ctx, fp1, B7_4);
1078 switch (ctx->opcode & 0xf00f) {
1079 case 0xf000: /* fadd Rm,Rn */
1080 gen_helper_fadd_DT(fp0, tcg_env, fp0, fp1);
1081 break;
1082 case 0xf001: /* fsub Rm,Rn */
1083 gen_helper_fsub_DT(fp0, tcg_env, fp0, fp1);
1084 break;
1085 case 0xf002: /* fmul Rm,Rn */
1086 gen_helper_fmul_DT(fp0, tcg_env, fp0, fp1);
1087 break;
1088 case 0xf003: /* fdiv Rm,Rn */
1089 gen_helper_fdiv_DT(fp0, tcg_env, fp0, fp1);
1090 break;
1091 case 0xf004: /* fcmp/eq Rm,Rn */
1092 gen_helper_fcmp_eq_DT(cpu_sr_t, tcg_env, fp0, fp1);
1093 return;
1094 case 0xf005: /* fcmp/gt Rm,Rn */
1095 gen_helper_fcmp_gt_DT(cpu_sr_t, tcg_env, fp0, fp1);
1096 return;
1098 gen_store_fpr64(ctx, fp0, B11_8);
1099 } else {
1100 switch (ctx->opcode & 0xf00f) {
1101 case 0xf000: /* fadd Rm,Rn */
1102 gen_helper_fadd_FT(FREG(B11_8), tcg_env,
1103 FREG(B11_8), FREG(B7_4));
1104 break;
1105 case 0xf001: /* fsub Rm,Rn */
1106 gen_helper_fsub_FT(FREG(B11_8), tcg_env,
1107 FREG(B11_8), FREG(B7_4));
1108 break;
1109 case 0xf002: /* fmul Rm,Rn */
1110 gen_helper_fmul_FT(FREG(B11_8), tcg_env,
1111 FREG(B11_8), FREG(B7_4));
1112 break;
1113 case 0xf003: /* fdiv Rm,Rn */
1114 gen_helper_fdiv_FT(FREG(B11_8), tcg_env,
1115 FREG(B11_8), FREG(B7_4));
1116 break;
1117 case 0xf004: /* fcmp/eq Rm,Rn */
1118 gen_helper_fcmp_eq_FT(cpu_sr_t, tcg_env,
1119 FREG(B11_8), FREG(B7_4));
1120 return;
1121 case 0xf005: /* fcmp/gt Rm,Rn */
1122 gen_helper_fcmp_gt_FT(cpu_sr_t, tcg_env,
1123 FREG(B11_8), FREG(B7_4));
1124 return;
1128 return;
1129 case 0xf00e: /* fmac FR0,RM,Rn */
1130 CHECK_FPU_ENABLED
1131 CHECK_FPSCR_PR_0
1132 gen_helper_fmac_FT(FREG(B11_8), tcg_env,
1133 FREG(0), FREG(B7_4), FREG(B11_8));
1134 return;
1137 switch (ctx->opcode & 0xff00) {
1138 case 0xc900: /* and #imm,R0 */
1139 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1140 return;
1141 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1143 TCGv addr, val;
1144 addr = tcg_temp_new();
1145 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1146 val = tcg_temp_new();
1147 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1148 tcg_gen_andi_i32(val, val, B7_0);
1149 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1151 return;
1152 case 0x8b00: /* bf label */
1153 CHECK_NOT_DELAY_SLOT
1154 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
1155 return;
1156 case 0x8f00: /* bf/s label */
1157 CHECK_NOT_DELAY_SLOT
1158 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1159 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1160 ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1161 return;
1162 case 0x8900: /* bt label */
1163 CHECK_NOT_DELAY_SLOT
1164 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
1165 return;
1166 case 0x8d00: /* bt/s label */
1167 CHECK_NOT_DELAY_SLOT
1168 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1169 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1170 ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1171 return;
1172 case 0x8800: /* cmp/eq #imm,R0 */
1173 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1174 return;
1175 case 0xc400: /* mov.b @(disp,GBR),R0 */
1177 TCGv addr = tcg_temp_new();
1178 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1179 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1181 return;
1182 case 0xc500: /* mov.w @(disp,GBR),R0 */
1184 TCGv addr = tcg_temp_new();
1185 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1186 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN);
1188 return;
1189 case 0xc600: /* mov.l @(disp,GBR),R0 */
1191 TCGv addr = tcg_temp_new();
1192 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1193 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN);
1195 return;
1196 case 0xc000: /* mov.b R0,@(disp,GBR) */
1198 TCGv addr = tcg_temp_new();
1199 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1200 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1202 return;
1203 case 0xc100: /* mov.w R0,@(disp,GBR) */
1205 TCGv addr = tcg_temp_new();
1206 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1207 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN);
1209 return;
1210 case 0xc200: /* mov.l R0,@(disp,GBR) */
1212 TCGv addr = tcg_temp_new();
1213 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1214 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1216 return;
1217 case 0x8000: /* mov.b R0,@(disp,Rn) */
1219 TCGv addr = tcg_temp_new();
1220 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1221 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1223 return;
1224 case 0x8100: /* mov.w R0,@(disp,Rn) */
1226 TCGv addr = tcg_temp_new();
1227 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1228 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
1229 MO_TEUW | UNALIGN(ctx));
1231 return;
1232 case 0x8400: /* mov.b @(disp,Rn),R0 */
1234 TCGv addr = tcg_temp_new();
1235 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1236 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1238 return;
1239 case 0x8500: /* mov.w @(disp,Rn),R0 */
1241 TCGv addr = tcg_temp_new();
1242 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1243 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
1244 MO_TESW | UNALIGN(ctx));
1246 return;
1247 case 0xc700: /* mova @(disp,PC),R0 */
1248 CHECK_NOT_DELAY_SLOT
1249 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1250 4 + B7_0 * 4) & ~3);
1251 return;
1252 case 0xcb00: /* or #imm,R0 */
1253 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1254 return;
1255 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1257 TCGv addr, val;
1258 addr = tcg_temp_new();
1259 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1260 val = tcg_temp_new();
1261 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1262 tcg_gen_ori_i32(val, val, B7_0);
1263 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1265 return;
1266 case 0xc300: /* trapa #imm */
1268 TCGv imm;
1269 CHECK_NOT_DELAY_SLOT
1270 gen_save_cpu_state(ctx, true);
1271 imm = tcg_constant_i32(B7_0);
1272 gen_helper_trapa(tcg_env, imm);
1273 ctx->base.is_jmp = DISAS_NORETURN;
1275 return;
1276 case 0xc800: /* tst #imm,R0 */
1278 TCGv val = tcg_temp_new();
1279 tcg_gen_andi_i32(val, REG(0), B7_0);
1280 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1282 return;
1283 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1285 TCGv val = tcg_temp_new();
1286 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1287 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1288 tcg_gen_andi_i32(val, val, B7_0);
1289 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1291 return;
1292 case 0xca00: /* xor #imm,R0 */
1293 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1294 return;
1295 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1297 TCGv addr, val;
1298 addr = tcg_temp_new();
1299 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1300 val = tcg_temp_new();
1301 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1302 tcg_gen_xori_i32(val, val, B7_0);
1303 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1305 return;
1308 switch (ctx->opcode & 0xf08f) {
1309 case 0x408e: /* ldc Rm,Rn_BANK */
1310 CHECK_PRIVILEGED
1311 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1312 return;
1313 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1314 CHECK_PRIVILEGED
1315 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx,
1316 MO_TESL | MO_ALIGN);
1317 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1318 return;
1319 case 0x0082: /* stc Rm_BANK,Rn */
1320 CHECK_PRIVILEGED
1321 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1322 return;
1323 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1324 CHECK_PRIVILEGED
1326 TCGv addr = tcg_temp_new();
1327 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1328 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx,
1329 MO_TEUL | MO_ALIGN);
1330 tcg_gen_mov_i32(REG(B11_8), addr);
1332 return;
1335 switch (ctx->opcode & 0xf0ff) {
1336 case 0x0023: /* braf Rn */
1337 CHECK_NOT_DELAY_SLOT
1338 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
1339 ctx->envflags |= TB_FLAG_DELAY_SLOT;
1340 ctx->delayed_pc = (uint32_t) - 1;
1341 return;
1342 case 0x0003: /* bsrf Rn */
1343 CHECK_NOT_DELAY_SLOT
1344 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1345 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1346 ctx->envflags |= TB_FLAG_DELAY_SLOT;
1347 ctx->delayed_pc = (uint32_t) - 1;
1348 return;
1349 case 0x4015: /* cmp/pl Rn */
1350 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1351 return;
1352 case 0x4011: /* cmp/pz Rn */
1353 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1354 return;
1355 case 0x4010: /* dt Rn */
1356 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1357 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1358 return;
1359 case 0x402b: /* jmp @Rn */
1360 CHECK_NOT_DELAY_SLOT
1361 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1362 ctx->envflags |= TB_FLAG_DELAY_SLOT;
1363 ctx->delayed_pc = (uint32_t) - 1;
1364 return;
1365 case 0x400b: /* jsr @Rn */
1366 CHECK_NOT_DELAY_SLOT
1367 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1368 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1369 ctx->envflags |= TB_FLAG_DELAY_SLOT;
1370 ctx->delayed_pc = (uint32_t) - 1;
1371 return;
1372 case 0x400e: /* ldc Rm,SR */
1373 CHECK_PRIVILEGED
1375 TCGv val = tcg_temp_new();
1376 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1377 gen_write_sr(val);
1378 ctx->base.is_jmp = DISAS_STOP;
1380 return;
1381 case 0x4007: /* ldc.l @Rm+,SR */
1382 CHECK_PRIVILEGED
1384 TCGv val = tcg_temp_new();
1385 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1386 MO_TESL | MO_ALIGN);
1387 tcg_gen_andi_i32(val, val, 0x700083f3);
1388 gen_write_sr(val);
1389 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1390 ctx->base.is_jmp = DISAS_STOP;
1392 return;
1393 case 0x0002: /* stc SR,Rn */
1394 CHECK_PRIVILEGED
1395 gen_read_sr(REG(B11_8));
1396 return;
1397 case 0x4003: /* stc SR,@-Rn */
1398 CHECK_PRIVILEGED
1400 TCGv addr = tcg_temp_new();
1401 TCGv val = tcg_temp_new();
1402 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1403 gen_read_sr(val);
1404 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1405 tcg_gen_mov_i32(REG(B11_8), addr);
1407 return;
1408 #define LD(reg,ldnum,ldpnum,prechk) \
1409 case ldnum: \
1410 prechk \
1411 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1412 return; \
1413 case ldpnum: \
1414 prechk \
1415 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, \
1416 MO_TESL | MO_ALIGN); \
1417 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1418 return;
1419 #define ST(reg,stnum,stpnum,prechk) \
1420 case stnum: \
1421 prechk \
1422 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1423 return; \
1424 case stpnum: \
1425 prechk \
1427 TCGv addr = tcg_temp_new(); \
1428 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1429 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, \
1430 MO_TEUL | MO_ALIGN); \
1431 tcg_gen_mov_i32(REG(B11_8), addr); \
1433 return;
1434 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1435 LD(reg,ldnum,ldpnum,prechk) \
1436 ST(reg,stnum,stpnum,prechk)
1437 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1438 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1439 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1440 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1441 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1442 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1443 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1444 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1445 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1446 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1447 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1448 case 0x406a: /* lds Rm,FPSCR */
1449 CHECK_FPU_ENABLED
1450 gen_helper_ld_fpscr(tcg_env, REG(B11_8));
1451 ctx->base.is_jmp = DISAS_STOP;
1452 return;
1453 case 0x4066: /* lds.l @Rm+,FPSCR */
1454 CHECK_FPU_ENABLED
1456 TCGv addr = tcg_temp_new();
1457 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
1458 MO_TESL | MO_ALIGN);
1459 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1460 gen_helper_ld_fpscr(tcg_env, addr);
1461 ctx->base.is_jmp = DISAS_STOP;
1463 return;
1464 case 0x006a: /* sts FPSCR,Rn */
1465 CHECK_FPU_ENABLED
1466 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1467 return;
1468 case 0x4062: /* sts FPSCR,@-Rn */
1469 CHECK_FPU_ENABLED
1471 TCGv addr, val;
1472 val = tcg_temp_new();
1473 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1474 addr = tcg_temp_new();
1475 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1476 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1477 tcg_gen_mov_i32(REG(B11_8), addr);
1479 return;
1480 case 0x00c3: /* movca.l R0,@Rm */
1482 TCGv val = tcg_temp_new();
1483 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1484 MO_TEUL | MO_ALIGN);
1485 gen_helper_movcal(tcg_env, REG(B11_8), val);
1486 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1487 MO_TEUL | MO_ALIGN);
1489 ctx->has_movcal = 1;
1490 return;
1491 case 0x40a9: /* movua.l @Rm,R0 */
1492 CHECK_SH4A
1493 /* Load non-boundary-aligned data */
1494 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1495 MO_TEUL | MO_UNALN);
1496 return;
1497 case 0x40e9: /* movua.l @Rm+,R0 */
1498 CHECK_SH4A
1499 /* Load non-boundary-aligned data */
1500 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1501 MO_TEUL | MO_UNALN);
1502 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1503 return;
1504 case 0x0029: /* movt Rn */
1505 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1506 return;
1507 case 0x0073:
1508 /* MOVCO.L
1509 * LDST -> T
1510 * If (T == 1) R0 -> (Rn)
1511 * 0 -> LDST
1513 * The above description doesn't work in a parallel context.
1514 * Since we currently support no smp boards, this implies user-mode.
1515 * But we can still support the official mechanism while user-mode
1516 * is single-threaded. */
1517 CHECK_SH4A
1519 TCGLabel *fail = gen_new_label();
1520 TCGLabel *done = gen_new_label();
1522 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1523 TCGv tmp;
1525 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1526 cpu_lock_addr, fail);
1527 tmp = tcg_temp_new();
1528 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1529 REG(0), ctx->memidx,
1530 MO_TEUL | MO_ALIGN);
1531 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1532 } else {
1533 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1534 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1535 MO_TEUL | MO_ALIGN);
1536 tcg_gen_movi_i32(cpu_sr_t, 1);
1538 tcg_gen_br(done);
1540 gen_set_label(fail);
1541 tcg_gen_movi_i32(cpu_sr_t, 0);
1543 gen_set_label(done);
1544 tcg_gen_movi_i32(cpu_lock_addr, -1);
1546 return;
1547 case 0x0063:
1548 /* MOVLI.L @Rm,R0
1549 * 1 -> LDST
1550 * (Rm) -> R0
1551 * When interrupt/exception
1552 * occurred 0 -> LDST
1554 * In a parallel context, we must also save the loaded value
1555 * for use with the cmpxchg that we'll use with movco.l. */
1556 CHECK_SH4A
1557 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1558 TCGv tmp = tcg_temp_new();
1559 tcg_gen_mov_i32(tmp, REG(B11_8));
1560 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1561 MO_TESL | MO_ALIGN);
1562 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1563 tcg_gen_mov_i32(cpu_lock_addr, tmp);
1564 } else {
1565 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1566 MO_TESL | MO_ALIGN);
1567 tcg_gen_movi_i32(cpu_lock_addr, 0);
1569 return;
1570 case 0x0093: /* ocbi @Rn */
1572 gen_helper_ocbi(tcg_env, REG(B11_8));
1574 return;
1575 case 0x00a3: /* ocbp @Rn */
1576 case 0x00b3: /* ocbwb @Rn */
1577 /* These instructions are supposed to do nothing in case of
1578 a cache miss. Given that we only partially emulate caches
1579 it is safe to simply ignore them. */
1580 return;
1581 case 0x0083: /* pref @Rn */
1582 return;
1583 case 0x00d3: /* prefi @Rn */
1584 CHECK_SH4A
1585 return;
1586 case 0x00e3: /* icbi @Rn */
1587 CHECK_SH4A
1588 return;
1589 case 0x00ab: /* synco */
1590 CHECK_SH4A
1591 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1592 return;
1593 case 0x4024: /* rotcl Rn */
1595 TCGv tmp = tcg_temp_new();
1596 tcg_gen_mov_i32(tmp, cpu_sr_t);
1597 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1598 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1599 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1601 return;
1602 case 0x4025: /* rotcr Rn */
1604 TCGv tmp = tcg_temp_new();
1605 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1606 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1607 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1608 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1610 return;
1611 case 0x4004: /* rotl Rn */
1612 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1613 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1614 return;
1615 case 0x4005: /* rotr Rn */
1616 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1617 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1618 return;
1619 case 0x4000: /* shll Rn */
1620 case 0x4020: /* shal Rn */
1621 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1622 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1623 return;
1624 case 0x4021: /* shar Rn */
1625 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1626 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1627 return;
1628 case 0x4001: /* shlr Rn */
1629 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1630 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1631 return;
1632 case 0x4008: /* shll2 Rn */
1633 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1634 return;
1635 case 0x4018: /* shll8 Rn */
1636 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1637 return;
1638 case 0x4028: /* shll16 Rn */
1639 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1640 return;
1641 case 0x4009: /* shlr2 Rn */
1642 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1643 return;
1644 case 0x4019: /* shlr8 Rn */
1645 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1646 return;
1647 case 0x4029: /* shlr16 Rn */
1648 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1649 return;
1650 case 0x401b: /* tas.b @Rn */
1651 tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
1652 tcg_constant_i32(0x80), ctx->memidx, MO_UB);
1653 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
1654 return;
1655 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1656 CHECK_FPU_ENABLED
1657 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1658 return;
1659 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1660 CHECK_FPU_ENABLED
1661 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1662 return;
1663 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1664 CHECK_FPU_ENABLED
1665 if (ctx->tbflags & FPSCR_PR) {
1666 TCGv_i64 fp;
1667 if (ctx->opcode & 0x0100) {
1668 goto do_illegal;
1670 fp = tcg_temp_new_i64();
1671 gen_helper_float_DT(fp, tcg_env, cpu_fpul);
1672 gen_store_fpr64(ctx, fp, B11_8);
1674 else {
1675 gen_helper_float_FT(FREG(B11_8), tcg_env, cpu_fpul);
1677 return;
1678 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1679 CHECK_FPU_ENABLED
1680 if (ctx->tbflags & FPSCR_PR) {
1681 TCGv_i64 fp;
1682 if (ctx->opcode & 0x0100) {
1683 goto do_illegal;
1685 fp = tcg_temp_new_i64();
1686 gen_load_fpr64(ctx, fp, B11_8);
1687 gen_helper_ftrc_DT(cpu_fpul, tcg_env, fp);
1689 else {
1690 gen_helper_ftrc_FT(cpu_fpul, tcg_env, FREG(B11_8));
1692 return;
1693 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1694 CHECK_FPU_ENABLED
1695 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1696 return;
1697 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1698 CHECK_FPU_ENABLED
1699 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1700 return;
1701 case 0xf06d: /* fsqrt FRn */
1702 CHECK_FPU_ENABLED
1703 if (ctx->tbflags & FPSCR_PR) {
1704 if (ctx->opcode & 0x0100) {
1705 goto do_illegal;
1707 TCGv_i64 fp = tcg_temp_new_i64();
1708 gen_load_fpr64(ctx, fp, B11_8);
1709 gen_helper_fsqrt_DT(fp, tcg_env, fp);
1710 gen_store_fpr64(ctx, fp, B11_8);
1711 } else {
1712 gen_helper_fsqrt_FT(FREG(B11_8), tcg_env, FREG(B11_8));
1714 return;
1715 case 0xf07d: /* fsrra FRn */
1716 CHECK_FPU_ENABLED
1717 CHECK_FPSCR_PR_0
1718 gen_helper_fsrra_FT(FREG(B11_8), tcg_env, FREG(B11_8));
1719 break;
1720 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1721 CHECK_FPU_ENABLED
1722 CHECK_FPSCR_PR_0
1723 tcg_gen_movi_i32(FREG(B11_8), 0);
1724 return;
1725 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1726 CHECK_FPU_ENABLED
1727 CHECK_FPSCR_PR_0
1728 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1729 return;
1730 case 0xf0ad: /* fcnvsd FPUL,DRn */
1731 CHECK_FPU_ENABLED
1733 TCGv_i64 fp = tcg_temp_new_i64();
1734 gen_helper_fcnvsd_FT_DT(fp, tcg_env, cpu_fpul);
1735 gen_store_fpr64(ctx, fp, B11_8);
1737 return;
1738 case 0xf0bd: /* fcnvds DRn,FPUL */
1739 CHECK_FPU_ENABLED
1741 TCGv_i64 fp = tcg_temp_new_i64();
1742 gen_load_fpr64(ctx, fp, B11_8);
1743 gen_helper_fcnvds_DT_FT(cpu_fpul, tcg_env, fp);
1745 return;
1746 case 0xf0ed: /* fipr FVm,FVn */
1747 CHECK_FPU_ENABLED
1748 CHECK_FPSCR_PR_1
1750 TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
1751 TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
1752 gen_helper_fipr(tcg_env, m, n);
1753 return;
1755 break;
1756 case 0xf0fd: /* ftrv XMTRX,FVn */
1757 CHECK_FPU_ENABLED
1758 CHECK_FPSCR_PR_1
1760 if ((ctx->opcode & 0x0300) != 0x0100) {
1761 goto do_illegal;
1763 TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
1764 gen_helper_ftrv(tcg_env, n);
1765 return;
1767 break;
1769 #if 0
1770 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1771 ctx->opcode, ctx->base.pc_next);
1772 fflush(stderr);
1773 #endif
1774 do_illegal:
1775 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1776 do_illegal_slot:
1777 gen_save_cpu_state(ctx, true);
1778 gen_helper_raise_slot_illegal_instruction(tcg_env);
1779 } else {
1780 gen_save_cpu_state(ctx, true);
1781 gen_helper_raise_illegal_instruction(tcg_env);
1783 ctx->base.is_jmp = DISAS_NORETURN;
1784 return;
1786 do_fpu_disabled:
1787 gen_save_cpu_state(ctx, true);
1788 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1789 gen_helper_raise_slot_fpu_disable(tcg_env);
1790 } else {
1791 gen_helper_raise_fpu_disable(tcg_env);
1793 ctx->base.is_jmp = DISAS_NORETURN;
1794 return;
1797 static void decode_opc(DisasContext * ctx)
1799 uint32_t old_flags = ctx->envflags;
1801 _decode_opc(ctx);
1803 if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
1804 /* go out of the delay slot */
1805 ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
1807 /* When in an exclusive region, we must continue to the end
1808 for conditional branches. */
1809 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
1810 && old_flags & TB_FLAG_DELAY_SLOT_COND) {
1811 gen_delayed_conditional_jump(ctx);
1812 return;
1814 /* Otherwise this is probably an invalid gUSA region.
1815 Drop the GUSA bits so the next TB doesn't see them. */
1816 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
1818 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1819 if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
1820 gen_delayed_conditional_jump(ctx);
1821 } else {
1822 gen_jump(ctx);
1827 #ifdef CONFIG_USER_ONLY
1829 * Restart with the EXCLUSIVE bit set, within a TB run via
1830 * cpu_exec_step_atomic holding the exclusive lock.
1832 static void gen_restart_exclusive(DisasContext *ctx)
1834 ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
1835 gen_save_cpu_state(ctx, false);
1836 gen_helper_exclusive(tcg_env);
1837 ctx->base.is_jmp = DISAS_NORETURN;
1840 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1841 Upon an interrupt, a real kernel would simply notice magic values in
1842 the registers and reset the PC to the start of the sequence.
1844 For QEMU, we cannot do this in quite the same way. Instead, we notice
1845 the normal start of such a sequence (mov #-x,r15). While we can handle
1846 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1847 sequences and transform them into atomic operations as seen by the host.
1849 static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
1851 uint16_t insns[5];
1852 int ld_adr, ld_dst, ld_mop;
1853 int op_dst, op_src, op_opc;
1854 int mv_src, mt_dst, st_src, st_mop;
1855 TCGv op_arg;
1856 uint32_t pc = ctx->base.pc_next;
1857 uint32_t pc_end = ctx->base.tb->cs_base;
1858 int max_insns = (pc_end - pc) / 2;
1859 int i;
1861 /* The state machine below will consume only a few insns.
1862 If there are more than that in a region, fail now. */
1863 if (max_insns > ARRAY_SIZE(insns)) {
1864 goto fail;
1867 /* Read all of the insns for the region. */
1868 for (i = 0; i < max_insns; ++i) {
1869 insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
1872 ld_adr = ld_dst = ld_mop = -1;
1873 mv_src = -1;
1874 op_dst = op_src = op_opc = -1;
1875 mt_dst = -1;
1876 st_src = st_mop = -1;
1877 op_arg = NULL;
1878 i = 0;
1880 #define NEXT_INSN \
1881 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1884 * Expect a load to begin the region.
1886 NEXT_INSN;
1887 switch (ctx->opcode & 0xf00f) {
1888 case 0x6000: /* mov.b @Rm,Rn */
1889 ld_mop = MO_SB;
1890 break;
1891 case 0x6001: /* mov.w @Rm,Rn */
1892 ld_mop = MO_TESW;
1893 break;
1894 case 0x6002: /* mov.l @Rm,Rn */
1895 ld_mop = MO_TESL;
1896 break;
1897 default:
1898 goto fail;
1900 ld_adr = B7_4;
1901 ld_dst = B11_8;
1902 if (ld_adr == ld_dst) {
1903 goto fail;
1905 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1906 op_dst = ld_dst;
1909 * Expect an optional register move.
1911 NEXT_INSN;
1912 switch (ctx->opcode & 0xf00f) {
1913 case 0x6003: /* mov Rm,Rn */
1915 * Here we want to recognize ld_dst being saved for later consumption,
1916 * or for another input register being copied so that ld_dst need not
1917 * be clobbered during the operation.
1919 op_dst = B11_8;
1920 mv_src = B7_4;
1921 if (op_dst == ld_dst) {
1922 /* Overwriting the load output. */
1923 goto fail;
1925 if (mv_src != ld_dst) {
1926 /* Copying a new input; constrain op_src to match the load. */
1927 op_src = ld_dst;
1929 break;
1931 default:
1932 /* Put back and re-examine as operation. */
1933 --i;
1937 * Expect the operation.
1939 NEXT_INSN;
1940 switch (ctx->opcode & 0xf00f) {
1941 case 0x300c: /* add Rm,Rn */
1942 op_opc = INDEX_op_add_i32;
1943 goto do_reg_op;
1944 case 0x2009: /* and Rm,Rn */
1945 op_opc = INDEX_op_and_i32;
1946 goto do_reg_op;
1947 case 0x200a: /* xor Rm,Rn */
1948 op_opc = INDEX_op_xor_i32;
1949 goto do_reg_op;
1950 case 0x200b: /* or Rm,Rn */
1951 op_opc = INDEX_op_or_i32;
1952 do_reg_op:
1953 /* The operation register should be as expected, and the
1954 other input cannot depend on the load. */
1955 if (op_dst != B11_8) {
1956 goto fail;
1958 if (op_src < 0) {
1959 /* Unconstrainted input. */
1960 op_src = B7_4;
1961 } else if (op_src == B7_4) {
1962 /* Constrained input matched load. All operations are
1963 commutative; "swap" them by "moving" the load output
1964 to the (implicit) first argument and the move source
1965 to the (explicit) second argument. */
1966 op_src = mv_src;
1967 } else {
1968 goto fail;
1970 op_arg = REG(op_src);
1971 break;
1973 case 0x6007: /* not Rm,Rn */
1974 if (ld_dst != B7_4 || mv_src >= 0) {
1975 goto fail;
1977 op_dst = B11_8;
1978 op_opc = INDEX_op_xor_i32;
1979 op_arg = tcg_constant_i32(-1);
1980 break;
1982 case 0x7000 ... 0x700f: /* add #imm,Rn */
1983 if (op_dst != B11_8 || mv_src >= 0) {
1984 goto fail;
1986 op_opc = INDEX_op_add_i32;
1987 op_arg = tcg_constant_i32(B7_0s);
1988 break;
1990 case 0x3000: /* cmp/eq Rm,Rn */
1991 /* Looking for the middle of a compare-and-swap sequence,
1992 beginning with the compare. Operands can be either order,
1993 but with only one overlapping the load. */
1994 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
1995 goto fail;
1997 op_opc = INDEX_op_setcond_i32; /* placeholder */
1998 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
1999 op_arg = REG(op_src);
2001 NEXT_INSN;
2002 switch (ctx->opcode & 0xff00) {
2003 case 0x8b00: /* bf label */
2004 case 0x8f00: /* bf/s label */
2005 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2006 goto fail;
2008 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2009 break;
2011 /* We're looking to unconditionally modify Rn with the
2012 result of the comparison, within the delay slot of
2013 the branch. This is used by older gcc. */
2014 NEXT_INSN;
2015 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2016 mt_dst = B11_8;
2017 } else {
2018 goto fail;
2020 break;
2022 default:
2023 goto fail;
2025 break;
2027 case 0x2008: /* tst Rm,Rn */
2028 /* Looking for a compare-and-swap against zero. */
2029 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2030 goto fail;
2032 op_opc = INDEX_op_setcond_i32;
2033 op_arg = tcg_constant_i32(0);
2035 NEXT_INSN;
2036 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2037 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2038 goto fail;
2040 break;
2042 default:
2043 /* Put back and re-examine as store. */
2044 --i;
2048 * Expect the store.
2050 /* The store must be the last insn. */
2051 if (i != max_insns - 1) {
2052 goto fail;
2054 NEXT_INSN;
2055 switch (ctx->opcode & 0xf00f) {
2056 case 0x2000: /* mov.b Rm,@Rn */
2057 st_mop = MO_UB;
2058 break;
2059 case 0x2001: /* mov.w Rm,@Rn */
2060 st_mop = MO_UW;
2061 break;
2062 case 0x2002: /* mov.l Rm,@Rn */
2063 st_mop = MO_UL;
2064 break;
2065 default:
2066 goto fail;
2068 /* The store must match the load. */
2069 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2070 goto fail;
2072 st_src = B7_4;
2074 #undef NEXT_INSN
2077 * Emit the operation.
2079 switch (op_opc) {
2080 case -1:
2081 /* No operation found. Look for exchange pattern. */
2082 if (st_src == ld_dst || mv_src >= 0) {
2083 goto fail;
2085 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2086 ctx->memidx, ld_mop);
2087 break;
2089 case INDEX_op_add_i32:
2090 if (op_dst != st_src) {
2091 goto fail;
2093 if (op_dst == ld_dst && st_mop == MO_UL) {
2094 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2095 op_arg, ctx->memidx, ld_mop);
2096 } else {
2097 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2098 op_arg, ctx->memidx, ld_mop);
2099 if (op_dst != ld_dst) {
2100 /* Note that mop sizes < 4 cannot use add_fetch
2101 because it won't carry into the higher bits. */
2102 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2105 break;
2107 case INDEX_op_and_i32:
2108 if (op_dst != st_src) {
2109 goto fail;
2111 if (op_dst == ld_dst) {
2112 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2113 op_arg, ctx->memidx, ld_mop);
2114 } else {
2115 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2116 op_arg, ctx->memidx, ld_mop);
2117 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2119 break;
2121 case INDEX_op_or_i32:
2122 if (op_dst != st_src) {
2123 goto fail;
2125 if (op_dst == ld_dst) {
2126 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2127 op_arg, ctx->memidx, ld_mop);
2128 } else {
2129 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2130 op_arg, ctx->memidx, ld_mop);
2131 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2133 break;
2135 case INDEX_op_xor_i32:
2136 if (op_dst != st_src) {
2137 goto fail;
2139 if (op_dst == ld_dst) {
2140 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2141 op_arg, ctx->memidx, ld_mop);
2142 } else {
2143 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2144 op_arg, ctx->memidx, ld_mop);
2145 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2147 break;
2149 case INDEX_op_setcond_i32:
2150 if (st_src == ld_dst) {
2151 goto fail;
2153 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2154 REG(st_src), ctx->memidx, ld_mop);
2155 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2156 if (mt_dst >= 0) {
2157 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2159 break;
2161 default:
2162 g_assert_not_reached();
2165 /* The entire region has been translated. */
2166 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2167 goto done;
2169 fail:
2170 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2171 pc, pc_end);
2173 gen_restart_exclusive(ctx);
2175 /* We're not executing an instruction, but we must report one for the
2176 purposes of accounting within the TB. We might as well report the
2177 entire region consumed via ctx->base.pc_next so that it's immediately
2178 available in the disassembly dump. */
2180 done:
2181 ctx->base.pc_next = pc_end;
2182 ctx->base.num_insns += max_insns - 1;
2185 * Emit insn_start to cover each of the insns in the region.
2186 * This matches an assert in tcg.c making sure that we have
2187 * tb->icount * insn_start.
2189 for (i = 1; i < max_insns; ++i) {
2190 tcg_gen_insn_start(pc + i * 2, ctx->envflags);
2191 ctx->base.insn_start = tcg_last_op();
2194 #endif
2196 static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2198 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2199 uint32_t tbflags;
2200 int bound;
2202 ctx->tbflags = tbflags = ctx->base.tb->flags;
2203 ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2204 ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2205 /* We don't know if the delayed pc came from a dynamic or static branch,
2206 so assume it is a dynamic branch. */
2207 ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2208 ctx->features = cpu_env(cs)->features;
2209 ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2210 ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2211 (tbflags & (1 << SR_RB))) * 0x10;
2212 ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2214 #ifdef CONFIG_USER_ONLY
2215 if (tbflags & TB_FLAG_GUSA_MASK) {
2216 /* In gUSA exclusive region. */
2217 uint32_t pc = ctx->base.pc_next;
2218 uint32_t pc_end = ctx->base.tb->cs_base;
2219 int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
2220 int max_insns = (pc_end - pc) / 2;
2222 if (pc != pc_end + backup || max_insns < 2) {
2223 /* This is a malformed gUSA region. Don't do anything special,
2224 since the interpreter is likely to get confused. */
2225 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2226 } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2227 /* Regardless of single-stepping or the end of the page,
2228 we must complete execution of the gUSA region while
2229 holding the exclusive lock. */
2230 ctx->base.max_insns = max_insns;
2231 return;
2234 #endif
2236 /* Since the ISA is fixed-width, we can bound by the number
2237 of instructions remaining on the page. */
2238 bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2239 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2242 static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2246 static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2248 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2250 tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2253 static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2255 CPUSH4State *env = cpu_env(cs);
2256 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2258 #ifdef CONFIG_USER_ONLY
2259 if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
2260 && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
2262 * We're in an gUSA region, and we have not already fallen
2263 * back on using an exclusive region. Attempt to parse the
2264 * region into a single supported atomic operation. Failure
2265 * is handled within the parser by raising an exception to
2266 * retry using an exclusive region.
2268 * Parsing the region in one block conflicts with plugins,
2269 * so always use exclusive mode if plugins enabled.
2271 if (ctx->base.plugin_enabled) {
2272 gen_restart_exclusive(ctx);
2273 ctx->base.pc_next += 2;
2274 } else {
2275 decode_gusa(ctx, env);
2277 return;
2279 #endif
2281 ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
2282 decode_opc(ctx);
2283 ctx->base.pc_next += 2;
2286 static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2288 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2290 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2291 /* Ending the region of exclusivity. Clear the bits. */
2292 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2295 switch (ctx->base.is_jmp) {
2296 case DISAS_STOP:
2297 gen_save_cpu_state(ctx, true);
2298 tcg_gen_exit_tb(NULL, 0);
2299 break;
2300 case DISAS_NEXT:
2301 case DISAS_TOO_MANY:
2302 gen_save_cpu_state(ctx, false);
2303 gen_goto_tb(ctx, 0, ctx->base.pc_next);
2304 break;
2305 case DISAS_NORETURN:
2306 break;
2307 default:
2308 g_assert_not_reached();
2312 static const TranslatorOps sh4_tr_ops = {
2313 .init_disas_context = sh4_tr_init_disas_context,
2314 .tb_start = sh4_tr_tb_start,
2315 .insn_start = sh4_tr_insn_start,
2316 .translate_insn = sh4_tr_translate_insn,
2317 .tb_stop = sh4_tr_tb_stop,
2320 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
2321 vaddr pc, void *host_pc)
2323 DisasContext ctx;
2325 translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);