xen-disk: use an IOThread per instance
[qemu.git] / target / sh4 / translate.c
blob703020fe87f58f282ea296d36c2bbd4a86b35e8c
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
22 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
36 typedef struct DisasContext {
37 struct TranslationBlock *tb;
38 target_ulong pc;
39 uint16_t opcode;
40 uint32_t tbflags; /* should stay unmodified during the TB translation */
41 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
42 int bstate;
43 int memidx;
44 int gbank;
45 int fbank;
46 uint32_t delayed_pc;
47 int singlestep_enabled;
48 uint32_t features;
49 int has_movcal;
50 } DisasContext;
52 #if defined(CONFIG_USER_ONLY)
53 #define IS_USER(ctx) 1
54 #else
55 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
56 #endif
58 enum {
59 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
60 * exception condition
62 BS_STOP = 1, /* We want to stop translation for any reason */
63 BS_BRANCH = 2, /* We reached a branch condition */
64 BS_EXCP = 3, /* We reached an exception condition */
67 /* global register indexes */
68 static TCGv cpu_gregs[32];
69 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
70 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
71 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
72 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
73 static TCGv cpu_fregs[32];
75 /* internal register indexes */
76 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
78 #include "exec/gen-icount.h"
80 void sh4_translate_init(void)
82 int i;
83 static const char * const gregnames[24] = {
84 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
85 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
86 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
87 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
88 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
90 static const char * const fregnames[32] = {
91 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
92 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
93 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
94 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
95 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
96 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
97 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
98 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
101 for (i = 0; i < 24; i++) {
102 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
103 offsetof(CPUSH4State, gregs[i]),
104 gregnames[i]);
106 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
108 cpu_pc = tcg_global_mem_new_i32(cpu_env,
109 offsetof(CPUSH4State, pc), "PC");
110 cpu_sr = tcg_global_mem_new_i32(cpu_env,
111 offsetof(CPUSH4State, sr), "SR");
112 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
113 offsetof(CPUSH4State, sr_m), "SR_M");
114 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
115 offsetof(CPUSH4State, sr_q), "SR_Q");
116 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
117 offsetof(CPUSH4State, sr_t), "SR_T");
118 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
119 offsetof(CPUSH4State, ssr), "SSR");
120 cpu_spc = tcg_global_mem_new_i32(cpu_env,
121 offsetof(CPUSH4State, spc), "SPC");
122 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
123 offsetof(CPUSH4State, gbr), "GBR");
124 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
125 offsetof(CPUSH4State, vbr), "VBR");
126 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
127 offsetof(CPUSH4State, sgr), "SGR");
128 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
129 offsetof(CPUSH4State, dbr), "DBR");
130 cpu_mach = tcg_global_mem_new_i32(cpu_env,
131 offsetof(CPUSH4State, mach), "MACH");
132 cpu_macl = tcg_global_mem_new_i32(cpu_env,
133 offsetof(CPUSH4State, macl), "MACL");
134 cpu_pr = tcg_global_mem_new_i32(cpu_env,
135 offsetof(CPUSH4State, pr), "PR");
136 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
137 offsetof(CPUSH4State, fpscr), "FPSCR");
138 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
139 offsetof(CPUSH4State, fpul), "FPUL");
141 cpu_flags = tcg_global_mem_new_i32(cpu_env,
142 offsetof(CPUSH4State, flags), "_flags_");
143 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
144 offsetof(CPUSH4State, delayed_pc),
145 "_delayed_pc_");
146 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
147 offsetof(CPUSH4State,
148 delayed_cond),
149 "_delayed_cond_");
150 cpu_ldst = tcg_global_mem_new_i32(cpu_env,
151 offsetof(CPUSH4State, ldst), "_ldst_");
153 for (i = 0; i < 32; i++)
154 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
155 offsetof(CPUSH4State, fregs[i]),
156 fregnames[i]);
159 void superh_cpu_dump_state(CPUState *cs, FILE *f,
160 fprintf_function cpu_fprintf, int flags)
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
164 int i;
165 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
167 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168 env->spc, env->ssr, env->gbr, env->vbr);
169 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170 env->sgr, env->dbr, env->delayed_pc, env->fpul);
171 for (i = 0; i < 24; i += 4) {
172 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173 i, env->gregs[i], i + 1, env->gregs[i + 1],
174 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
176 if (env->flags & DELAY_SLOT) {
177 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
178 env->delayed_pc);
179 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
180 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
181 env->delayed_pc);
182 } else if (env->flags & DELAY_SLOT_RTE) {
183 cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
184 env->delayed_pc);
188 static void gen_read_sr(TCGv dst)
190 TCGv t0 = tcg_temp_new();
191 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
192 tcg_gen_or_i32(dst, dst, t0);
193 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
194 tcg_gen_or_i32(dst, dst, t0);
195 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
196 tcg_gen_or_i32(dst, cpu_sr, t0);
197 tcg_temp_free_i32(t0);
200 static void gen_write_sr(TCGv src)
202 tcg_gen_andi_i32(cpu_sr, src,
203 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
204 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
205 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
206 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
209 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
211 if (save_pc) {
212 tcg_gen_movi_i32(cpu_pc, ctx->pc);
214 if (ctx->delayed_pc != (uint32_t) -1) {
215 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
217 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
218 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
222 static inline bool use_exit_tb(DisasContext *ctx)
224 return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
227 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
229 /* Use a direct jump if in same page and singlestep not enabled */
230 if (unlikely(ctx->singlestep_enabled || use_exit_tb(ctx))) {
231 return false;
233 #ifndef CONFIG_USER_ONLY
234 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
235 #else
236 return true;
237 #endif
240 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
242 if (use_goto_tb(ctx, dest)) {
243 tcg_gen_goto_tb(n);
244 tcg_gen_movi_i32(cpu_pc, dest);
245 tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
246 } else {
247 tcg_gen_movi_i32(cpu_pc, dest);
248 if (ctx->singlestep_enabled) {
249 gen_helper_debug(cpu_env);
250 } else if (use_exit_tb(ctx)) {
251 tcg_gen_exit_tb(0);
252 } else {
253 tcg_gen_lookup_and_goto_ptr();
258 static void gen_jump(DisasContext * ctx)
260 if (ctx->delayed_pc == -1) {
261 /* Target is not statically known, it comes necessarily from a
262 delayed jump as immediate jump are conditinal jumps */
263 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
264 tcg_gen_discard_i32(cpu_delayed_pc);
265 if (ctx->singlestep_enabled) {
266 gen_helper_debug(cpu_env);
267 } else if (use_exit_tb(ctx)) {
268 tcg_gen_exit_tb(0);
269 } else {
270 tcg_gen_lookup_and_goto_ptr();
272 } else {
273 gen_goto_tb(ctx, 0, ctx->delayed_pc);
277 /* Immediate conditional jump (bt or bf) */
278 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
279 bool jump_if_true)
281 TCGLabel *l1 = gen_new_label();
282 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
284 if (ctx->tbflags & GUSA_EXCLUSIVE) {
285 /* When in an exclusive region, we must continue to the end.
286 Therefore, exit the region on a taken branch, but otherwise
287 fall through to the next instruction. */
288 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
289 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
290 /* Note that this won't actually use a goto_tb opcode because we
291 disallow it in use_goto_tb, but it handles exit + singlestep. */
292 gen_goto_tb(ctx, 0, dest);
293 gen_set_label(l1);
294 return;
297 gen_save_cpu_state(ctx, false);
298 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
299 gen_goto_tb(ctx, 0, dest);
300 gen_set_label(l1);
301 gen_goto_tb(ctx, 1, ctx->pc + 2);
302 ctx->bstate = BS_BRANCH;
305 /* Delayed conditional jump (bt or bf) */
306 static void gen_delayed_conditional_jump(DisasContext * ctx)
308 TCGLabel *l1 = gen_new_label();
309 TCGv ds = tcg_temp_new();
311 tcg_gen_mov_i32(ds, cpu_delayed_cond);
312 tcg_gen_discard_i32(cpu_delayed_cond);
314 if (ctx->tbflags & GUSA_EXCLUSIVE) {
315 /* When in an exclusive region, we must continue to the end.
316 Therefore, exit the region on a taken branch, but otherwise
317 fall through to the next instruction. */
318 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
320 /* Leave the gUSA region. */
321 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
322 gen_jump(ctx);
324 gen_set_label(l1);
325 return;
328 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
329 gen_goto_tb(ctx, 1, ctx->pc + 2);
330 gen_set_label(l1);
331 gen_jump(ctx);
334 static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
336 /* We have already signaled illegal instruction for odd Dr. */
337 tcg_debug_assert((reg & 1) == 0);
338 reg ^= ctx->fbank;
339 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
342 static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
344 /* We have already signaled illegal instruction for odd Dr. */
345 tcg_debug_assert((reg & 1) == 0);
346 reg ^= ctx->fbank;
347 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
350 #define B3_0 (ctx->opcode & 0xf)
351 #define B6_4 ((ctx->opcode >> 4) & 0x7)
352 #define B7_4 ((ctx->opcode >> 4) & 0xf)
353 #define B7_0 (ctx->opcode & 0xff)
354 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
355 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
356 (ctx->opcode & 0xfff))
357 #define B11_8 ((ctx->opcode >> 8) & 0xf)
358 #define B15_12 ((ctx->opcode >> 12) & 0xf)
360 #define REG(x) cpu_gregs[(x) ^ ctx->gbank]
361 #define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
362 #define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
364 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
366 #define CHECK_NOT_DELAY_SLOT \
367 if (ctx->envflags & DELAY_SLOT_MASK) { \
368 goto do_illegal_slot; \
371 #define CHECK_PRIVILEGED \
372 if (IS_USER(ctx)) { \
373 goto do_illegal; \
376 #define CHECK_FPU_ENABLED \
377 if (ctx->tbflags & (1u << SR_FD)) { \
378 goto do_fpu_disabled; \
381 #define CHECK_FPSCR_PR_0 \
382 if (ctx->tbflags & FPSCR_PR) { \
383 goto do_illegal; \
386 #define CHECK_FPSCR_PR_1 \
387 if (!(ctx->tbflags & FPSCR_PR)) { \
388 goto do_illegal; \
391 #define CHECK_SH4A \
392 if (!(ctx->features & SH_FEATURE_SH4A)) { \
393 goto do_illegal; \
396 static void _decode_opc(DisasContext * ctx)
398 /* This code tries to make movcal emulation sufficiently
399 accurate for Linux purposes. This instruction writes
400 memory, and prior to that, always allocates a cache line.
401 It is used in two contexts:
402 - in memcpy, where data is copied in blocks, the first write
403 of to a block uses movca.l for performance.
404 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
405 to flush the cache. Here, the data written by movcal.l is never
406 written to memory, and the data written is just bogus.
408 To simulate this, we simulate movcal.l, we store the value to memory,
409 but we also remember the previous content. If we see ocbi, we check
410 if movcal.l for that address was done previously. If so, the write should
411 not have hit the memory, so we restore the previous content.
412 When we see an instruction that is neither movca.l
413 nor ocbi, the previous content is discarded.
415 To optimize, we only try to flush stores when we're at the start of
416 TB, or if we already saw movca.l in this TB and did not flush stores
417 yet. */
418 if (ctx->has_movcal)
420 int opcode = ctx->opcode & 0xf0ff;
421 if (opcode != 0x0093 /* ocbi */
422 && opcode != 0x00c3 /* movca.l */)
424 gen_helper_discard_movcal_backup(cpu_env);
425 ctx->has_movcal = 0;
429 #if 0
430 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
431 #endif
433 switch (ctx->opcode) {
434 case 0x0019: /* div0u */
435 tcg_gen_movi_i32(cpu_sr_m, 0);
436 tcg_gen_movi_i32(cpu_sr_q, 0);
437 tcg_gen_movi_i32(cpu_sr_t, 0);
438 return;
439 case 0x000b: /* rts */
440 CHECK_NOT_DELAY_SLOT
441 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
442 ctx->envflags |= DELAY_SLOT;
443 ctx->delayed_pc = (uint32_t) - 1;
444 return;
445 case 0x0028: /* clrmac */
446 tcg_gen_movi_i32(cpu_mach, 0);
447 tcg_gen_movi_i32(cpu_macl, 0);
448 return;
449 case 0x0048: /* clrs */
450 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
451 return;
452 case 0x0008: /* clrt */
453 tcg_gen_movi_i32(cpu_sr_t, 0);
454 return;
455 case 0x0038: /* ldtlb */
456 CHECK_PRIVILEGED
457 gen_helper_ldtlb(cpu_env);
458 return;
459 case 0x002b: /* rte */
460 CHECK_PRIVILEGED
461 CHECK_NOT_DELAY_SLOT
462 gen_write_sr(cpu_ssr);
463 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
464 ctx->envflags |= DELAY_SLOT_RTE;
465 ctx->delayed_pc = (uint32_t) - 1;
466 ctx->bstate = BS_STOP;
467 return;
468 case 0x0058: /* sets */
469 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
470 return;
471 case 0x0018: /* sett */
472 tcg_gen_movi_i32(cpu_sr_t, 1);
473 return;
474 case 0xfbfd: /* frchg */
475 CHECK_FPSCR_PR_0
476 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
477 ctx->bstate = BS_STOP;
478 return;
479 case 0xf3fd: /* fschg */
480 CHECK_FPSCR_PR_0
481 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
482 ctx->bstate = BS_STOP;
483 return;
484 case 0xf7fd: /* fpchg */
485 CHECK_SH4A
486 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
487 ctx->bstate = BS_STOP;
488 return;
489 case 0x0009: /* nop */
490 return;
491 case 0x001b: /* sleep */
492 CHECK_PRIVILEGED
493 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
494 gen_helper_sleep(cpu_env);
495 return;
498 switch (ctx->opcode & 0xf000) {
499 case 0x1000: /* mov.l Rm,@(disp,Rn) */
501 TCGv addr = tcg_temp_new();
502 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
503 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
504 tcg_temp_free(addr);
506 return;
507 case 0x5000: /* mov.l @(disp,Rm),Rn */
509 TCGv addr = tcg_temp_new();
510 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
511 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
512 tcg_temp_free(addr);
514 return;
515 case 0xe000: /* mov #imm,Rn */
516 #ifdef CONFIG_USER_ONLY
517 /* Detect the start of a gUSA region. If so, update envflags
518 and end the TB. This will allow us to see the end of the
519 region (stored in R0) in the next TB. */
520 if (B11_8 == 15 && B7_0s < 0 && (tb_cflags(ctx->tb) & CF_PARALLEL)) {
521 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
522 ctx->bstate = BS_STOP;
524 #endif
525 tcg_gen_movi_i32(REG(B11_8), B7_0s);
526 return;
527 case 0x9000: /* mov.w @(disp,PC),Rn */
529 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
530 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
531 tcg_temp_free(addr);
533 return;
534 case 0xd000: /* mov.l @(disp,PC),Rn */
536 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
537 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
538 tcg_temp_free(addr);
540 return;
541 case 0x7000: /* add #imm,Rn */
542 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
543 return;
544 case 0xa000: /* bra disp */
545 CHECK_NOT_DELAY_SLOT
546 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
547 ctx->envflags |= DELAY_SLOT;
548 return;
549 case 0xb000: /* bsr disp */
550 CHECK_NOT_DELAY_SLOT
551 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
552 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
553 ctx->envflags |= DELAY_SLOT;
554 return;
557 switch (ctx->opcode & 0xf00f) {
558 case 0x6003: /* mov Rm,Rn */
559 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
560 return;
561 case 0x2000: /* mov.b Rm,@Rn */
562 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
563 return;
564 case 0x2001: /* mov.w Rm,@Rn */
565 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
566 return;
567 case 0x2002: /* mov.l Rm,@Rn */
568 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
569 return;
570 case 0x6000: /* mov.b @Rm,Rn */
571 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
572 return;
573 case 0x6001: /* mov.w @Rm,Rn */
574 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
575 return;
576 case 0x6002: /* mov.l @Rm,Rn */
577 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
578 return;
579 case 0x2004: /* mov.b Rm,@-Rn */
581 TCGv addr = tcg_temp_new();
582 tcg_gen_subi_i32(addr, REG(B11_8), 1);
583 /* might cause re-execution */
584 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
585 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
586 tcg_temp_free(addr);
588 return;
589 case 0x2005: /* mov.w Rm,@-Rn */
591 TCGv addr = tcg_temp_new();
592 tcg_gen_subi_i32(addr, REG(B11_8), 2);
593 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
594 tcg_gen_mov_i32(REG(B11_8), addr);
595 tcg_temp_free(addr);
597 return;
598 case 0x2006: /* mov.l Rm,@-Rn */
600 TCGv addr = tcg_temp_new();
601 tcg_gen_subi_i32(addr, REG(B11_8), 4);
602 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
603 tcg_gen_mov_i32(REG(B11_8), addr);
605 return;
606 case 0x6004: /* mov.b @Rm+,Rn */
607 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
608 if ( B11_8 != B7_4 )
609 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
610 return;
611 case 0x6005: /* mov.w @Rm+,Rn */
612 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
613 if ( B11_8 != B7_4 )
614 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
615 return;
616 case 0x6006: /* mov.l @Rm+,Rn */
617 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
618 if ( B11_8 != B7_4 )
619 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
620 return;
621 case 0x0004: /* mov.b Rm,@(R0,Rn) */
623 TCGv addr = tcg_temp_new();
624 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
625 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
626 tcg_temp_free(addr);
628 return;
629 case 0x0005: /* mov.w Rm,@(R0,Rn) */
631 TCGv addr = tcg_temp_new();
632 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
633 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
634 tcg_temp_free(addr);
636 return;
637 case 0x0006: /* mov.l Rm,@(R0,Rn) */
639 TCGv addr = tcg_temp_new();
640 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
641 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
642 tcg_temp_free(addr);
644 return;
645 case 0x000c: /* mov.b @(R0,Rm),Rn */
647 TCGv addr = tcg_temp_new();
648 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
649 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
650 tcg_temp_free(addr);
652 return;
653 case 0x000d: /* mov.w @(R0,Rm),Rn */
655 TCGv addr = tcg_temp_new();
656 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
657 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
658 tcg_temp_free(addr);
660 return;
661 case 0x000e: /* mov.l @(R0,Rm),Rn */
663 TCGv addr = tcg_temp_new();
664 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
665 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
666 tcg_temp_free(addr);
668 return;
669 case 0x6008: /* swap.b Rm,Rn */
671 TCGv low = tcg_temp_new();;
672 tcg_gen_ext16u_i32(low, REG(B7_4));
673 tcg_gen_bswap16_i32(low, low);
674 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
675 tcg_temp_free(low);
677 return;
678 case 0x6009: /* swap.w Rm,Rn */
679 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
680 return;
681 case 0x200d: /* xtrct Rm,Rn */
683 TCGv high, low;
684 high = tcg_temp_new();
685 tcg_gen_shli_i32(high, REG(B7_4), 16);
686 low = tcg_temp_new();
687 tcg_gen_shri_i32(low, REG(B11_8), 16);
688 tcg_gen_or_i32(REG(B11_8), high, low);
689 tcg_temp_free(low);
690 tcg_temp_free(high);
692 return;
693 case 0x300c: /* add Rm,Rn */
694 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
695 return;
696 case 0x300e: /* addc Rm,Rn */
698 TCGv t0, t1;
699 t0 = tcg_const_tl(0);
700 t1 = tcg_temp_new();
701 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
702 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
703 REG(B11_8), t0, t1, cpu_sr_t);
704 tcg_temp_free(t0);
705 tcg_temp_free(t1);
707 return;
708 case 0x300f: /* addv Rm,Rn */
710 TCGv t0, t1, t2;
711 t0 = tcg_temp_new();
712 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
713 t1 = tcg_temp_new();
714 tcg_gen_xor_i32(t1, t0, REG(B11_8));
715 t2 = tcg_temp_new();
716 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
717 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
718 tcg_temp_free(t2);
719 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
720 tcg_temp_free(t1);
721 tcg_gen_mov_i32(REG(B7_4), t0);
722 tcg_temp_free(t0);
724 return;
725 case 0x2009: /* and Rm,Rn */
726 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
727 return;
728 case 0x3000: /* cmp/eq Rm,Rn */
729 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
730 return;
731 case 0x3003: /* cmp/ge Rm,Rn */
732 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
733 return;
734 case 0x3007: /* cmp/gt Rm,Rn */
735 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
736 return;
737 case 0x3006: /* cmp/hi Rm,Rn */
738 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
739 return;
740 case 0x3002: /* cmp/hs Rm,Rn */
741 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
742 return;
743 case 0x200c: /* cmp/str Rm,Rn */
745 TCGv cmp1 = tcg_temp_new();
746 TCGv cmp2 = tcg_temp_new();
747 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
748 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
749 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
750 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
751 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
752 tcg_temp_free(cmp2);
753 tcg_temp_free(cmp1);
755 return;
756 case 0x2007: /* div0s Rm,Rn */
757 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
758 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
759 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
760 return;
761 case 0x3004: /* div1 Rm,Rn */
763 TCGv t0 = tcg_temp_new();
764 TCGv t1 = tcg_temp_new();
765 TCGv t2 = tcg_temp_new();
766 TCGv zero = tcg_const_i32(0);
768 /* shift left arg1, saving the bit being pushed out and inserting
769 T on the right */
770 tcg_gen_shri_i32(t0, REG(B11_8), 31);
771 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
772 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
774 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
775 using 64-bit temps, we compute arg0's high part from q ^ m, so
776 that it is 0x00000000 when adding the value or 0xffffffff when
777 subtracting it. */
778 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
779 tcg_gen_subi_i32(t1, t1, 1);
780 tcg_gen_neg_i32(t2, REG(B7_4));
781 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
782 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
784 /* compute T and Q depending on carry */
785 tcg_gen_andi_i32(t1, t1, 1);
786 tcg_gen_xor_i32(t1, t1, t0);
787 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
788 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
790 tcg_temp_free(zero);
791 tcg_temp_free(t2);
792 tcg_temp_free(t1);
793 tcg_temp_free(t0);
795 return;
796 case 0x300d: /* dmuls.l Rm,Rn */
797 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
798 return;
799 case 0x3005: /* dmulu.l Rm,Rn */
800 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
801 return;
802 case 0x600e: /* exts.b Rm,Rn */
803 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
804 return;
805 case 0x600f: /* exts.w Rm,Rn */
806 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
807 return;
808 case 0x600c: /* extu.b Rm,Rn */
809 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
810 return;
811 case 0x600d: /* extu.w Rm,Rn */
812 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
813 return;
814 case 0x000f: /* mac.l @Rm+,@Rn+ */
816 TCGv arg0, arg1;
817 arg0 = tcg_temp_new();
818 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
819 arg1 = tcg_temp_new();
820 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
821 gen_helper_macl(cpu_env, arg0, arg1);
822 tcg_temp_free(arg1);
823 tcg_temp_free(arg0);
824 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
825 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
827 return;
828 case 0x400f: /* mac.w @Rm+,@Rn+ */
830 TCGv arg0, arg1;
831 arg0 = tcg_temp_new();
832 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
833 arg1 = tcg_temp_new();
834 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
835 gen_helper_macw(cpu_env, arg0, arg1);
836 tcg_temp_free(arg1);
837 tcg_temp_free(arg0);
838 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
839 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
841 return;
842 case 0x0007: /* mul.l Rm,Rn */
843 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
844 return;
845 case 0x200f: /* muls.w Rm,Rn */
847 TCGv arg0, arg1;
848 arg0 = tcg_temp_new();
849 tcg_gen_ext16s_i32(arg0, REG(B7_4));
850 arg1 = tcg_temp_new();
851 tcg_gen_ext16s_i32(arg1, REG(B11_8));
852 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
853 tcg_temp_free(arg1);
854 tcg_temp_free(arg0);
856 return;
857 case 0x200e: /* mulu.w Rm,Rn */
859 TCGv arg0, arg1;
860 arg0 = tcg_temp_new();
861 tcg_gen_ext16u_i32(arg0, REG(B7_4));
862 arg1 = tcg_temp_new();
863 tcg_gen_ext16u_i32(arg1, REG(B11_8));
864 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
865 tcg_temp_free(arg1);
866 tcg_temp_free(arg0);
868 return;
869 case 0x600b: /* neg Rm,Rn */
870 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
871 return;
872 case 0x600a: /* negc Rm,Rn */
874 TCGv t0 = tcg_const_i32(0);
875 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
876 REG(B7_4), t0, cpu_sr_t, t0);
877 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
878 t0, t0, REG(B11_8), cpu_sr_t);
879 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
880 tcg_temp_free(t0);
882 return;
883 case 0x6007: /* not Rm,Rn */
884 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
885 return;
886 case 0x200b: /* or Rm,Rn */
887 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
888 return;
889 case 0x400c: /* shad Rm,Rn */
891 TCGv t0 = tcg_temp_new();
892 TCGv t1 = tcg_temp_new();
893 TCGv t2 = tcg_temp_new();
895 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
897 /* positive case: shift to the left */
898 tcg_gen_shl_i32(t1, REG(B11_8), t0);
900 /* negative case: shift to the right in two steps to
901 correctly handle the -32 case */
902 tcg_gen_xori_i32(t0, t0, 0x1f);
903 tcg_gen_sar_i32(t2, REG(B11_8), t0);
904 tcg_gen_sari_i32(t2, t2, 1);
906 /* select between the two cases */
907 tcg_gen_movi_i32(t0, 0);
908 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
910 tcg_temp_free(t0);
911 tcg_temp_free(t1);
912 tcg_temp_free(t2);
914 return;
915 case 0x400d: /* shld Rm,Rn */
917 TCGv t0 = tcg_temp_new();
918 TCGv t1 = tcg_temp_new();
919 TCGv t2 = tcg_temp_new();
921 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
923 /* positive case: shift to the left */
924 tcg_gen_shl_i32(t1, REG(B11_8), t0);
926 /* negative case: shift to the right in two steps to
927 correctly handle the -32 case */
928 tcg_gen_xori_i32(t0, t0, 0x1f);
929 tcg_gen_shr_i32(t2, REG(B11_8), t0);
930 tcg_gen_shri_i32(t2, t2, 1);
932 /* select between the two cases */
933 tcg_gen_movi_i32(t0, 0);
934 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
936 tcg_temp_free(t0);
937 tcg_temp_free(t1);
938 tcg_temp_free(t2);
940 return;
941 case 0x3008: /* sub Rm,Rn */
942 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
943 return;
944 case 0x300a: /* subc Rm,Rn */
946 TCGv t0, t1;
947 t0 = tcg_const_tl(0);
948 t1 = tcg_temp_new();
949 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
950 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
951 REG(B11_8), t0, t1, cpu_sr_t);
952 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
953 tcg_temp_free(t0);
954 tcg_temp_free(t1);
956 return;
957 case 0x300b: /* subv Rm,Rn */
959 TCGv t0, t1, t2;
960 t0 = tcg_temp_new();
961 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
962 t1 = tcg_temp_new();
963 tcg_gen_xor_i32(t1, t0, REG(B7_4));
964 t2 = tcg_temp_new();
965 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
966 tcg_gen_and_i32(t1, t1, t2);
967 tcg_temp_free(t2);
968 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
969 tcg_temp_free(t1);
970 tcg_gen_mov_i32(REG(B11_8), t0);
971 tcg_temp_free(t0);
973 return;
974 case 0x2008: /* tst Rm,Rn */
976 TCGv val = tcg_temp_new();
977 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
978 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
979 tcg_temp_free(val);
981 return;
982 case 0x200a: /* xor Rm,Rn */
983 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
984 return;
985 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
986 CHECK_FPU_ENABLED
987 if (ctx->tbflags & FPSCR_SZ) {
988 int xsrc = XHACK(B7_4);
989 int xdst = XHACK(B11_8);
990 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
991 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
992 } else {
993 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
995 return;
996 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
997 CHECK_FPU_ENABLED
998 if (ctx->tbflags & FPSCR_SZ) {
999 TCGv_i64 fp = tcg_temp_new_i64();
1000 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1001 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1002 tcg_temp_free_i64(fp);
1003 } else {
1004 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
1006 return;
1007 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1008 CHECK_FPU_ENABLED
1009 if (ctx->tbflags & FPSCR_SZ) {
1010 TCGv_i64 fp = tcg_temp_new_i64();
1011 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1012 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1013 tcg_temp_free_i64(fp);
1014 } else {
1015 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1017 return;
1018 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1019 CHECK_FPU_ENABLED
1020 if (ctx->tbflags & FPSCR_SZ) {
1021 TCGv_i64 fp = tcg_temp_new_i64();
1022 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1023 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1024 tcg_temp_free_i64(fp);
1025 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1026 } else {
1027 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1028 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1030 return;
1031 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1032 CHECK_FPU_ENABLED
1034 TCGv addr = tcg_temp_new_i32();
1035 if (ctx->tbflags & FPSCR_SZ) {
1036 TCGv_i64 fp = tcg_temp_new_i64();
1037 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1038 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1039 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1040 tcg_temp_free_i64(fp);
1041 } else {
1042 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1043 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1045 tcg_gen_mov_i32(REG(B11_8), addr);
1046 tcg_temp_free(addr);
1048 return;
1049 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1050 CHECK_FPU_ENABLED
1052 TCGv addr = tcg_temp_new_i32();
1053 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1054 if (ctx->tbflags & FPSCR_SZ) {
1055 TCGv_i64 fp = tcg_temp_new_i64();
1056 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1057 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1058 tcg_temp_free_i64(fp);
1059 } else {
1060 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
1062 tcg_temp_free(addr);
1064 return;
1065 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1066 CHECK_FPU_ENABLED
1068 TCGv addr = tcg_temp_new();
1069 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1070 if (ctx->tbflags & FPSCR_SZ) {
1071 TCGv_i64 fp = tcg_temp_new_i64();
1072 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1073 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1074 tcg_temp_free_i64(fp);
1075 } else {
1076 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1078 tcg_temp_free(addr);
1080 return;
1081 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1082 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1083 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1084 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1085 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1086 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1088 CHECK_FPU_ENABLED
1089 if (ctx->tbflags & FPSCR_PR) {
1090 TCGv_i64 fp0, fp1;
1092 if (ctx->opcode & 0x0110) {
1093 goto do_illegal;
1095 fp0 = tcg_temp_new_i64();
1096 fp1 = tcg_temp_new_i64();
1097 gen_load_fpr64(ctx, fp0, B11_8);
1098 gen_load_fpr64(ctx, fp1, B7_4);
1099 switch (ctx->opcode & 0xf00f) {
1100 case 0xf000: /* fadd Rm,Rn */
1101 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1102 break;
1103 case 0xf001: /* fsub Rm,Rn */
1104 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1105 break;
1106 case 0xf002: /* fmul Rm,Rn */
1107 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1108 break;
1109 case 0xf003: /* fdiv Rm,Rn */
1110 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1111 break;
1112 case 0xf004: /* fcmp/eq Rm,Rn */
1113 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
1114 return;
1115 case 0xf005: /* fcmp/gt Rm,Rn */
1116 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
1117 return;
1119 gen_store_fpr64(ctx, fp0, B11_8);
1120 tcg_temp_free_i64(fp0);
1121 tcg_temp_free_i64(fp1);
1122 } else {
1123 switch (ctx->opcode & 0xf00f) {
1124 case 0xf000: /* fadd Rm,Rn */
1125 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1126 FREG(B11_8), FREG(B7_4));
1127 break;
1128 case 0xf001: /* fsub Rm,Rn */
1129 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1130 FREG(B11_8), FREG(B7_4));
1131 break;
1132 case 0xf002: /* fmul Rm,Rn */
1133 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1134 FREG(B11_8), FREG(B7_4));
1135 break;
1136 case 0xf003: /* fdiv Rm,Rn */
1137 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1138 FREG(B11_8), FREG(B7_4));
1139 break;
1140 case 0xf004: /* fcmp/eq Rm,Rn */
1141 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
1142 FREG(B11_8), FREG(B7_4));
1143 return;
1144 case 0xf005: /* fcmp/gt Rm,Rn */
1145 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
1146 FREG(B11_8), FREG(B7_4));
1147 return;
1151 return;
1152 case 0xf00e: /* fmac FR0,RM,Rn */
1153 CHECK_FPU_ENABLED
1154 CHECK_FPSCR_PR_0
1155 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1156 FREG(0), FREG(B7_4), FREG(B11_8));
1157 return;
1160 switch (ctx->opcode & 0xff00) {
1161 case 0xc900: /* and #imm,R0 */
1162 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1163 return;
1164 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1166 TCGv addr, val;
1167 addr = tcg_temp_new();
1168 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1169 val = tcg_temp_new();
1170 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1171 tcg_gen_andi_i32(val, val, B7_0);
1172 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1173 tcg_temp_free(val);
1174 tcg_temp_free(addr);
1176 return;
1177 case 0x8b00: /* bf label */
1178 CHECK_NOT_DELAY_SLOT
1179 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, false);
1180 return;
1181 case 0x8f00: /* bf/s label */
1182 CHECK_NOT_DELAY_SLOT
1183 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1184 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
1185 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1186 return;
1187 case 0x8900: /* bt label */
1188 CHECK_NOT_DELAY_SLOT
1189 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, true);
1190 return;
1191 case 0x8d00: /* bt/s label */
1192 CHECK_NOT_DELAY_SLOT
1193 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1194 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
1195 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1196 return;
1197 case 0x8800: /* cmp/eq #imm,R0 */
1198 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1199 return;
1200 case 0xc400: /* mov.b @(disp,GBR),R0 */
1202 TCGv addr = tcg_temp_new();
1203 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1204 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1205 tcg_temp_free(addr);
1207 return;
1208 case 0xc500: /* mov.w @(disp,GBR),R0 */
1210 TCGv addr = tcg_temp_new();
1211 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1212 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1213 tcg_temp_free(addr);
1215 return;
1216 case 0xc600: /* mov.l @(disp,GBR),R0 */
1218 TCGv addr = tcg_temp_new();
1219 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1220 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1221 tcg_temp_free(addr);
1223 return;
1224 case 0xc000: /* mov.b R0,@(disp,GBR) */
1226 TCGv addr = tcg_temp_new();
1227 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1228 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1229 tcg_temp_free(addr);
1231 return;
1232 case 0xc100: /* mov.w R0,@(disp,GBR) */
1234 TCGv addr = tcg_temp_new();
1235 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1236 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1237 tcg_temp_free(addr);
1239 return;
1240 case 0xc200: /* mov.l R0,@(disp,GBR) */
1242 TCGv addr = tcg_temp_new();
1243 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1244 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1245 tcg_temp_free(addr);
1247 return;
1248 case 0x8000: /* mov.b R0,@(disp,Rn) */
1250 TCGv addr = tcg_temp_new();
1251 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1252 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1253 tcg_temp_free(addr);
1255 return;
1256 case 0x8100: /* mov.w R0,@(disp,Rn) */
1258 TCGv addr = tcg_temp_new();
1259 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1260 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1261 tcg_temp_free(addr);
1263 return;
1264 case 0x8400: /* mov.b @(disp,Rn),R0 */
1266 TCGv addr = tcg_temp_new();
1267 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1268 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1269 tcg_temp_free(addr);
1271 return;
1272 case 0x8500: /* mov.w @(disp,Rn),R0 */
1274 TCGv addr = tcg_temp_new();
1275 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1276 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1277 tcg_temp_free(addr);
1279 return;
1280 case 0xc700: /* mova @(disp,PC),R0 */
1281 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1282 return;
1283 case 0xcb00: /* or #imm,R0 */
1284 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1285 return;
1286 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1288 TCGv addr, val;
1289 addr = tcg_temp_new();
1290 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1291 val = tcg_temp_new();
1292 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1293 tcg_gen_ori_i32(val, val, B7_0);
1294 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1295 tcg_temp_free(val);
1296 tcg_temp_free(addr);
1298 return;
1299 case 0xc300: /* trapa #imm */
1301 TCGv imm;
1302 CHECK_NOT_DELAY_SLOT
1303 gen_save_cpu_state(ctx, true);
1304 imm = tcg_const_i32(B7_0);
1305 gen_helper_trapa(cpu_env, imm);
1306 tcg_temp_free(imm);
1307 ctx->bstate = BS_EXCP;
1309 return;
1310 case 0xc800: /* tst #imm,R0 */
1312 TCGv val = tcg_temp_new();
1313 tcg_gen_andi_i32(val, REG(0), B7_0);
1314 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1315 tcg_temp_free(val);
1317 return;
1318 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1320 TCGv val = tcg_temp_new();
1321 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1322 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1323 tcg_gen_andi_i32(val, val, B7_0);
1324 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1325 tcg_temp_free(val);
1327 return;
1328 case 0xca00: /* xor #imm,R0 */
1329 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1330 return;
1331 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1333 TCGv addr, val;
1334 addr = tcg_temp_new();
1335 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1336 val = tcg_temp_new();
1337 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1338 tcg_gen_xori_i32(val, val, B7_0);
1339 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1340 tcg_temp_free(val);
1341 tcg_temp_free(addr);
1343 return;
1346 switch (ctx->opcode & 0xf08f) {
1347 case 0x408e: /* ldc Rm,Rn_BANK */
1348 CHECK_PRIVILEGED
1349 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1350 return;
1351 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1352 CHECK_PRIVILEGED
1353 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1354 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1355 return;
1356 case 0x0082: /* stc Rm_BANK,Rn */
1357 CHECK_PRIVILEGED
1358 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1359 return;
1360 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1361 CHECK_PRIVILEGED
1363 TCGv addr = tcg_temp_new();
1364 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1365 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1366 tcg_gen_mov_i32(REG(B11_8), addr);
1367 tcg_temp_free(addr);
1369 return;
1372 switch (ctx->opcode & 0xf0ff) {
1373 case 0x0023: /* braf Rn */
1374 CHECK_NOT_DELAY_SLOT
1375 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1376 ctx->envflags |= DELAY_SLOT;
1377 ctx->delayed_pc = (uint32_t) - 1;
1378 return;
1379 case 0x0003: /* bsrf Rn */
1380 CHECK_NOT_DELAY_SLOT
1381 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1382 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1383 ctx->envflags |= DELAY_SLOT;
1384 ctx->delayed_pc = (uint32_t) - 1;
1385 return;
1386 case 0x4015: /* cmp/pl Rn */
1387 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1388 return;
1389 case 0x4011: /* cmp/pz Rn */
1390 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1391 return;
1392 case 0x4010: /* dt Rn */
1393 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1394 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1395 return;
1396 case 0x402b: /* jmp @Rn */
1397 CHECK_NOT_DELAY_SLOT
1398 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1399 ctx->envflags |= DELAY_SLOT;
1400 ctx->delayed_pc = (uint32_t) - 1;
1401 return;
1402 case 0x400b: /* jsr @Rn */
1403 CHECK_NOT_DELAY_SLOT
1404 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1405 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1406 ctx->envflags |= DELAY_SLOT;
1407 ctx->delayed_pc = (uint32_t) - 1;
1408 return;
1409 case 0x400e: /* ldc Rm,SR */
1410 CHECK_PRIVILEGED
1412 TCGv val = tcg_temp_new();
1413 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1414 gen_write_sr(val);
1415 tcg_temp_free(val);
1416 ctx->bstate = BS_STOP;
1418 return;
1419 case 0x4007: /* ldc.l @Rm+,SR */
1420 CHECK_PRIVILEGED
1422 TCGv val = tcg_temp_new();
1423 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1424 tcg_gen_andi_i32(val, val, 0x700083f3);
1425 gen_write_sr(val);
1426 tcg_temp_free(val);
1427 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1428 ctx->bstate = BS_STOP;
1430 return;
1431 case 0x0002: /* stc SR,Rn */
1432 CHECK_PRIVILEGED
1433 gen_read_sr(REG(B11_8));
1434 return;
1435 case 0x4003: /* stc SR,@-Rn */
1436 CHECK_PRIVILEGED
1438 TCGv addr = tcg_temp_new();
1439 TCGv val = tcg_temp_new();
1440 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1441 gen_read_sr(val);
1442 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1443 tcg_gen_mov_i32(REG(B11_8), addr);
1444 tcg_temp_free(val);
1445 tcg_temp_free(addr);
1447 return;
1448 #define LD(reg,ldnum,ldpnum,prechk) \
1449 case ldnum: \
1450 prechk \
1451 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1452 return; \
1453 case ldpnum: \
1454 prechk \
1455 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1456 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1457 return;
1458 #define ST(reg,stnum,stpnum,prechk) \
1459 case stnum: \
1460 prechk \
1461 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1462 return; \
1463 case stpnum: \
1464 prechk \
1466 TCGv addr = tcg_temp_new(); \
1467 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1468 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1469 tcg_gen_mov_i32(REG(B11_8), addr); \
1470 tcg_temp_free(addr); \
1472 return;
1473 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1474 LD(reg,ldnum,ldpnum,prechk) \
1475 ST(reg,stnum,stpnum,prechk)
1476 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1477 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1478 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1479 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1480 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1481 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1482 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1483 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1484 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1485 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1486 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1487 case 0x406a: /* lds Rm,FPSCR */
1488 CHECK_FPU_ENABLED
1489 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1490 ctx->bstate = BS_STOP;
1491 return;
1492 case 0x4066: /* lds.l @Rm+,FPSCR */
1493 CHECK_FPU_ENABLED
1495 TCGv addr = tcg_temp_new();
1496 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1497 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1498 gen_helper_ld_fpscr(cpu_env, addr);
1499 tcg_temp_free(addr);
1500 ctx->bstate = BS_STOP;
1502 return;
1503 case 0x006a: /* sts FPSCR,Rn */
1504 CHECK_FPU_ENABLED
1505 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1506 return;
1507 case 0x4062: /* sts FPSCR,@-Rn */
1508 CHECK_FPU_ENABLED
1510 TCGv addr, val;
1511 val = tcg_temp_new();
1512 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1513 addr = tcg_temp_new();
1514 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1515 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1516 tcg_gen_mov_i32(REG(B11_8), addr);
1517 tcg_temp_free(addr);
1518 tcg_temp_free(val);
1520 return;
1521 case 0x00c3: /* movca.l R0,@Rm */
1523 TCGv val = tcg_temp_new();
1524 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1525 gen_helper_movcal(cpu_env, REG(B11_8), val);
1526 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1528 ctx->has_movcal = 1;
1529 return;
1530 case 0x40a9: /* movua.l @Rm,R0 */
1531 CHECK_SH4A
1532 /* Load non-boundary-aligned data */
1533 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1534 MO_TEUL | MO_UNALN);
1535 return;
1536 break;
1537 case 0x40e9: /* movua.l @Rm+,R0 */
1538 CHECK_SH4A
1539 /* Load non-boundary-aligned data */
1540 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1541 MO_TEUL | MO_UNALN);
1542 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1543 return;
1544 break;
1545 case 0x0029: /* movt Rn */
1546 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1547 return;
1548 case 0x0073:
1549 /* MOVCO.L
1550 LDST -> T
1551 If (T == 1) R0 -> (Rn)
1552 0 -> LDST
1554 CHECK_SH4A
1556 TCGLabel *label = gen_new_label();
1557 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1558 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1559 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1560 gen_set_label(label);
1561 tcg_gen_movi_i32(cpu_ldst, 0);
1562 return;
1564 case 0x0063:
1565 /* MOVLI.L @Rm,R0
1566 1 -> LDST
1567 (Rm) -> R0
1568 When interrupt/exception
1569 occurred 0 -> LDST
1571 CHECK_SH4A
1572 tcg_gen_movi_i32(cpu_ldst, 0);
1573 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1574 tcg_gen_movi_i32(cpu_ldst, 1);
1575 return;
1576 case 0x0093: /* ocbi @Rn */
1578 gen_helper_ocbi(cpu_env, REG(B11_8));
1580 return;
1581 case 0x00a3: /* ocbp @Rn */
1582 case 0x00b3: /* ocbwb @Rn */
1583 /* These instructions are supposed to do nothing in case of
1584 a cache miss. Given that we only partially emulate caches
1585 it is safe to simply ignore them. */
1586 return;
1587 case 0x0083: /* pref @Rn */
1588 return;
1589 case 0x00d3: /* prefi @Rn */
1590 CHECK_SH4A
1591 return;
1592 case 0x00e3: /* icbi @Rn */
1593 CHECK_SH4A
1594 return;
1595 case 0x00ab: /* synco */
1596 CHECK_SH4A
1597 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1598 return;
1599 break;
1600 case 0x4024: /* rotcl Rn */
1602 TCGv tmp = tcg_temp_new();
1603 tcg_gen_mov_i32(tmp, cpu_sr_t);
1604 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1605 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1606 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1607 tcg_temp_free(tmp);
1609 return;
1610 case 0x4025: /* rotcr Rn */
1612 TCGv tmp = tcg_temp_new();
1613 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1614 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1615 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1616 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1617 tcg_temp_free(tmp);
1619 return;
1620 case 0x4004: /* rotl Rn */
1621 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1622 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1623 return;
1624 case 0x4005: /* rotr Rn */
1625 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1626 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1627 return;
1628 case 0x4000: /* shll Rn */
1629 case 0x4020: /* shal Rn */
1630 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1631 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1632 return;
1633 case 0x4021: /* shar Rn */
1634 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1635 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1636 return;
1637 case 0x4001: /* shlr Rn */
1638 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1639 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1640 return;
1641 case 0x4008: /* shll2 Rn */
1642 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1643 return;
1644 case 0x4018: /* shll8 Rn */
1645 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1646 return;
1647 case 0x4028: /* shll16 Rn */
1648 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1649 return;
1650 case 0x4009: /* shlr2 Rn */
1651 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1652 return;
1653 case 0x4019: /* shlr8 Rn */
1654 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1655 return;
1656 case 0x4029: /* shlr16 Rn */
1657 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1658 return;
1659 case 0x401b: /* tas.b @Rn */
1661 TCGv val = tcg_const_i32(0x80);
1662 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1663 ctx->memidx, MO_UB);
1664 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1665 tcg_temp_free(val);
1667 return;
1668 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1669 CHECK_FPU_ENABLED
1670 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1671 return;
1672 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1673 CHECK_FPU_ENABLED
1674 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1675 return;
1676 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1677 CHECK_FPU_ENABLED
1678 if (ctx->tbflags & FPSCR_PR) {
1679 TCGv_i64 fp;
1680 if (ctx->opcode & 0x0100) {
1681 goto do_illegal;
1683 fp = tcg_temp_new_i64();
1684 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1685 gen_store_fpr64(ctx, fp, B11_8);
1686 tcg_temp_free_i64(fp);
1688 else {
1689 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
1691 return;
1692 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1693 CHECK_FPU_ENABLED
1694 if (ctx->tbflags & FPSCR_PR) {
1695 TCGv_i64 fp;
1696 if (ctx->opcode & 0x0100) {
1697 goto do_illegal;
1699 fp = tcg_temp_new_i64();
1700 gen_load_fpr64(ctx, fp, B11_8);
1701 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1702 tcg_temp_free_i64(fp);
1704 else {
1705 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
1707 return;
1708 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1709 CHECK_FPU_ENABLED
1710 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1711 return;
1712 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1713 CHECK_FPU_ENABLED
1714 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1715 return;
1716 case 0xf06d: /* fsqrt FRn */
1717 CHECK_FPU_ENABLED
1718 if (ctx->tbflags & FPSCR_PR) {
1719 if (ctx->opcode & 0x0100) {
1720 goto do_illegal;
1722 TCGv_i64 fp = tcg_temp_new_i64();
1723 gen_load_fpr64(ctx, fp, B11_8);
1724 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1725 gen_store_fpr64(ctx, fp, B11_8);
1726 tcg_temp_free_i64(fp);
1727 } else {
1728 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1730 return;
1731 case 0xf07d: /* fsrra FRn */
1732 CHECK_FPU_ENABLED
1733 CHECK_FPSCR_PR_0
1734 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1735 break;
1736 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1737 CHECK_FPU_ENABLED
1738 CHECK_FPSCR_PR_0
1739 tcg_gen_movi_i32(FREG(B11_8), 0);
1740 return;
1741 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1742 CHECK_FPU_ENABLED
1743 CHECK_FPSCR_PR_0
1744 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1745 return;
1746 case 0xf0ad: /* fcnvsd FPUL,DRn */
1747 CHECK_FPU_ENABLED
1749 TCGv_i64 fp = tcg_temp_new_i64();
1750 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1751 gen_store_fpr64(ctx, fp, B11_8);
1752 tcg_temp_free_i64(fp);
1754 return;
1755 case 0xf0bd: /* fcnvds DRn,FPUL */
1756 CHECK_FPU_ENABLED
1758 TCGv_i64 fp = tcg_temp_new_i64();
1759 gen_load_fpr64(ctx, fp, B11_8);
1760 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1761 tcg_temp_free_i64(fp);
1763 return;
1764 case 0xf0ed: /* fipr FVm,FVn */
1765 CHECK_FPU_ENABLED
1766 CHECK_FPSCR_PR_1
1768 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1769 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1770 gen_helper_fipr(cpu_env, m, n);
1771 tcg_temp_free(m);
1772 tcg_temp_free(n);
1773 return;
1775 break;
1776 case 0xf0fd: /* ftrv XMTRX,FVn */
1777 CHECK_FPU_ENABLED
1778 CHECK_FPSCR_PR_1
1780 if ((ctx->opcode & 0x0300) != 0x0100) {
1781 goto do_illegal;
1783 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1784 gen_helper_ftrv(cpu_env, n);
1785 tcg_temp_free(n);
1786 return;
1788 break;
1790 #if 0
1791 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1792 ctx->opcode, ctx->pc);
1793 fflush(stderr);
1794 #endif
1795 do_illegal:
1796 if (ctx->envflags & DELAY_SLOT_MASK) {
1797 do_illegal_slot:
1798 gen_save_cpu_state(ctx, true);
1799 gen_helper_raise_slot_illegal_instruction(cpu_env);
1800 } else {
1801 gen_save_cpu_state(ctx, true);
1802 gen_helper_raise_illegal_instruction(cpu_env);
1804 ctx->bstate = BS_EXCP;
1805 return;
1807 do_fpu_disabled:
1808 gen_save_cpu_state(ctx, true);
1809 if (ctx->envflags & DELAY_SLOT_MASK) {
1810 gen_helper_raise_slot_fpu_disable(cpu_env);
1811 } else {
1812 gen_helper_raise_fpu_disable(cpu_env);
1814 ctx->bstate = BS_EXCP;
1815 return;
1818 static void decode_opc(DisasContext * ctx)
1820 uint32_t old_flags = ctx->envflags;
1822 _decode_opc(ctx);
1824 if (old_flags & DELAY_SLOT_MASK) {
1825 /* go out of the delay slot */
1826 ctx->envflags &= ~DELAY_SLOT_MASK;
1828 /* When in an exclusive region, we must continue to the end
1829 for conditional branches. */
1830 if (ctx->tbflags & GUSA_EXCLUSIVE
1831 && old_flags & DELAY_SLOT_CONDITIONAL) {
1832 gen_delayed_conditional_jump(ctx);
1833 return;
1835 /* Otherwise this is probably an invalid gUSA region.
1836 Drop the GUSA bits so the next TB doesn't see them. */
1837 ctx->envflags &= ~GUSA_MASK;
1839 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1840 ctx->bstate = BS_BRANCH;
1841 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1842 gen_delayed_conditional_jump(ctx);
1843 } else {
1844 gen_jump(ctx);
1849 #ifdef CONFIG_USER_ONLY
1850 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1851 Upon an interrupt, a real kernel would simply notice magic values in
1852 the registers and reset the PC to the start of the sequence.
1854 For QEMU, we cannot do this in quite the same way. Instead, we notice
1855 the normal start of such a sequence (mov #-x,r15). While we can handle
1856 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1857 sequences and transform them into atomic operations as seen by the host.
1859 static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
1861 uint16_t insns[5];
1862 int ld_adr, ld_dst, ld_mop;
1863 int op_dst, op_src, op_opc;
1864 int mv_src, mt_dst, st_src, st_mop;
1865 TCGv op_arg;
1867 uint32_t pc = ctx->pc;
1868 uint32_t pc_end = ctx->tb->cs_base;
1869 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
1870 int max_insns = (pc_end - pc) / 2;
1871 int i;
1873 if (pc != pc_end + backup || max_insns < 2) {
1874 /* This is a malformed gUSA region. Don't do anything special,
1875 since the interpreter is likely to get confused. */
1876 ctx->envflags &= ~GUSA_MASK;
1877 return 0;
1880 if (ctx->tbflags & GUSA_EXCLUSIVE) {
1881 /* Regardless of single-stepping or the end of the page,
1882 we must complete execution of the gUSA region while
1883 holding the exclusive lock. */
1884 *pmax_insns = max_insns;
1885 return 0;
1888 /* The state machine below will consume only a few insns.
1889 If there are more than that in a region, fail now. */
1890 if (max_insns > ARRAY_SIZE(insns)) {
1891 goto fail;
1894 /* Read all of the insns for the region. */
1895 for (i = 0; i < max_insns; ++i) {
1896 insns[i] = cpu_lduw_code(env, pc + i * 2);
1899 ld_adr = ld_dst = ld_mop = -1;
1900 mv_src = -1;
1901 op_dst = op_src = op_opc = -1;
1902 mt_dst = -1;
1903 st_src = st_mop = -1;
1904 TCGV_UNUSED(op_arg);
1905 i = 0;
1907 #define NEXT_INSN \
1908 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1911 * Expect a load to begin the region.
1913 NEXT_INSN;
1914 switch (ctx->opcode & 0xf00f) {
1915 case 0x6000: /* mov.b @Rm,Rn */
1916 ld_mop = MO_SB;
1917 break;
1918 case 0x6001: /* mov.w @Rm,Rn */
1919 ld_mop = MO_TESW;
1920 break;
1921 case 0x6002: /* mov.l @Rm,Rn */
1922 ld_mop = MO_TESL;
1923 break;
1924 default:
1925 goto fail;
1927 ld_adr = B7_4;
1928 ld_dst = B11_8;
1929 if (ld_adr == ld_dst) {
1930 goto fail;
1932 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1933 op_dst = ld_dst;
1936 * Expect an optional register move.
1938 NEXT_INSN;
1939 switch (ctx->opcode & 0xf00f) {
1940 case 0x6003: /* mov Rm,Rn */
1941 /* Here we want to recognize ld_dst being saved for later consumtion,
1942 or for another input register being copied so that ld_dst need not
1943 be clobbered during the operation. */
1944 op_dst = B11_8;
1945 mv_src = B7_4;
1946 if (op_dst == ld_dst) {
1947 /* Overwriting the load output. */
1948 goto fail;
1950 if (mv_src != ld_dst) {
1951 /* Copying a new input; constrain op_src to match the load. */
1952 op_src = ld_dst;
1954 break;
1956 default:
1957 /* Put back and re-examine as operation. */
1958 --i;
1962 * Expect the operation.
1964 NEXT_INSN;
1965 switch (ctx->opcode & 0xf00f) {
1966 case 0x300c: /* add Rm,Rn */
1967 op_opc = INDEX_op_add_i32;
1968 goto do_reg_op;
1969 case 0x2009: /* and Rm,Rn */
1970 op_opc = INDEX_op_and_i32;
1971 goto do_reg_op;
1972 case 0x200a: /* xor Rm,Rn */
1973 op_opc = INDEX_op_xor_i32;
1974 goto do_reg_op;
1975 case 0x200b: /* or Rm,Rn */
1976 op_opc = INDEX_op_or_i32;
1977 do_reg_op:
1978 /* The operation register should be as expected, and the
1979 other input cannot depend on the load. */
1980 if (op_dst != B11_8) {
1981 goto fail;
1983 if (op_src < 0) {
1984 /* Unconstrainted input. */
1985 op_src = B7_4;
1986 } else if (op_src == B7_4) {
1987 /* Constrained input matched load. All operations are
1988 commutative; "swap" them by "moving" the load output
1989 to the (implicit) first argument and the move source
1990 to the (explicit) second argument. */
1991 op_src = mv_src;
1992 } else {
1993 goto fail;
1995 op_arg = REG(op_src);
1996 break;
1998 case 0x6007: /* not Rm,Rn */
1999 if (ld_dst != B7_4 || mv_src >= 0) {
2000 goto fail;
2002 op_dst = B11_8;
2003 op_opc = INDEX_op_xor_i32;
2004 op_arg = tcg_const_i32(-1);
2005 break;
2007 case 0x7000 ... 0x700f: /* add #imm,Rn */
2008 if (op_dst != B11_8 || mv_src >= 0) {
2009 goto fail;
2011 op_opc = INDEX_op_add_i32;
2012 op_arg = tcg_const_i32(B7_0s);
2013 break;
2015 case 0x3000: /* cmp/eq Rm,Rn */
2016 /* Looking for the middle of a compare-and-swap sequence,
2017 beginning with the compare. Operands can be either order,
2018 but with only one overlapping the load. */
2019 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2020 goto fail;
2022 op_opc = INDEX_op_setcond_i32; /* placeholder */
2023 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2024 op_arg = REG(op_src);
2026 NEXT_INSN;
2027 switch (ctx->opcode & 0xff00) {
2028 case 0x8b00: /* bf label */
2029 case 0x8f00: /* bf/s label */
2030 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2031 goto fail;
2033 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2034 break;
2036 /* We're looking to unconditionally modify Rn with the
2037 result of the comparison, within the delay slot of
2038 the branch. This is used by older gcc. */
2039 NEXT_INSN;
2040 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2041 mt_dst = B11_8;
2042 } else {
2043 goto fail;
2045 break;
2047 default:
2048 goto fail;
2050 break;
2052 case 0x2008: /* tst Rm,Rn */
2053 /* Looking for a compare-and-swap against zero. */
2054 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2055 goto fail;
2057 op_opc = INDEX_op_setcond_i32;
2058 op_arg = tcg_const_i32(0);
2060 NEXT_INSN;
2061 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2062 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2063 goto fail;
2065 break;
2067 default:
2068 /* Put back and re-examine as store. */
2069 --i;
2073 * Expect the store.
2075 /* The store must be the last insn. */
2076 if (i != max_insns - 1) {
2077 goto fail;
2079 NEXT_INSN;
2080 switch (ctx->opcode & 0xf00f) {
2081 case 0x2000: /* mov.b Rm,@Rn */
2082 st_mop = MO_UB;
2083 break;
2084 case 0x2001: /* mov.w Rm,@Rn */
2085 st_mop = MO_UW;
2086 break;
2087 case 0x2002: /* mov.l Rm,@Rn */
2088 st_mop = MO_UL;
2089 break;
2090 default:
2091 goto fail;
2093 /* The store must match the load. */
2094 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2095 goto fail;
2097 st_src = B7_4;
2099 #undef NEXT_INSN
2102 * Emit the operation.
2104 tcg_gen_insn_start(pc, ctx->envflags);
2105 switch (op_opc) {
2106 case -1:
2107 /* No operation found. Look for exchange pattern. */
2108 if (st_src == ld_dst || mv_src >= 0) {
2109 goto fail;
2111 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2112 ctx->memidx, ld_mop);
2113 break;
2115 case INDEX_op_add_i32:
2116 if (op_dst != st_src) {
2117 goto fail;
2119 if (op_dst == ld_dst && st_mop == MO_UL) {
2120 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2121 op_arg, ctx->memidx, ld_mop);
2122 } else {
2123 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2124 op_arg, ctx->memidx, ld_mop);
2125 if (op_dst != ld_dst) {
2126 /* Note that mop sizes < 4 cannot use add_fetch
2127 because it won't carry into the higher bits. */
2128 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2131 break;
2133 case INDEX_op_and_i32:
2134 if (op_dst != st_src) {
2135 goto fail;
2137 if (op_dst == ld_dst) {
2138 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2139 op_arg, ctx->memidx, ld_mop);
2140 } else {
2141 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2142 op_arg, ctx->memidx, ld_mop);
2143 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2145 break;
2147 case INDEX_op_or_i32:
2148 if (op_dst != st_src) {
2149 goto fail;
2151 if (op_dst == ld_dst) {
2152 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2153 op_arg, ctx->memidx, ld_mop);
2154 } else {
2155 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2156 op_arg, ctx->memidx, ld_mop);
2157 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2159 break;
2161 case INDEX_op_xor_i32:
2162 if (op_dst != st_src) {
2163 goto fail;
2165 if (op_dst == ld_dst) {
2166 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2167 op_arg, ctx->memidx, ld_mop);
2168 } else {
2169 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2170 op_arg, ctx->memidx, ld_mop);
2171 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2173 break;
2175 case INDEX_op_setcond_i32:
2176 if (st_src == ld_dst) {
2177 goto fail;
2179 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2180 REG(st_src), ctx->memidx, ld_mop);
2181 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2182 if (mt_dst >= 0) {
2183 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2185 break;
2187 default:
2188 g_assert_not_reached();
2191 /* If op_src is not a valid register, then op_arg was a constant. */
2192 if (op_src < 0) {
2193 tcg_temp_free_i32(op_arg);
2196 /* The entire region has been translated. */
2197 ctx->envflags &= ~GUSA_MASK;
2198 ctx->pc = pc_end;
2199 return max_insns;
2201 fail:
2202 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2203 pc, pc_end);
2205 /* Restart with the EXCLUSIVE bit set, within a TB run via
2206 cpu_exec_step_atomic holding the exclusive lock. */
2207 tcg_gen_insn_start(pc, ctx->envflags);
2208 ctx->envflags |= GUSA_EXCLUSIVE;
2209 gen_save_cpu_state(ctx, false);
2210 gen_helper_exclusive(cpu_env);
2211 ctx->bstate = BS_EXCP;
2213 /* We're not executing an instruction, but we must report one for the
2214 purposes of accounting within the TB. We might as well report the
2215 entire region consumed via ctx->pc so that it's immediately available
2216 in the disassembly dump. */
2217 ctx->pc = pc_end;
2218 return 1;
2220 #endif
2222 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
2224 CPUSH4State *env = cs->env_ptr;
2225 DisasContext ctx;
2226 target_ulong pc_start;
2227 int num_insns;
2228 int max_insns;
2230 pc_start = tb->pc;
2231 ctx.pc = pc_start;
2232 ctx.tbflags = (uint32_t)tb->flags;
2233 ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
2234 ctx.bstate = BS_NONE;
2235 ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2236 /* We don't know if the delayed pc came from a dynamic or static branch,
2237 so assume it is a dynamic branch. */
2238 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
2239 ctx.tb = tb;
2240 ctx.singlestep_enabled = cs->singlestep_enabled;
2241 ctx.features = env->features;
2242 ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA);
2243 ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) &&
2244 (ctx.tbflags & (1 << SR_RB))) * 0x10;
2245 ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
2247 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
2248 if (max_insns == 0) {
2249 max_insns = CF_COUNT_MASK;
2251 max_insns = MIN(max_insns, TCG_MAX_INSNS);
2253 /* Since the ISA is fixed-width, we can bound by the number
2254 of instructions remaining on the page. */
2255 num_insns = -(ctx.pc | TARGET_PAGE_MASK) / 2;
2256 max_insns = MIN(max_insns, num_insns);
2258 /* Single stepping means just that. */
2259 if (ctx.singlestep_enabled || singlestep) {
2260 max_insns = 1;
2263 gen_tb_start(tb);
2264 num_insns = 0;
2266 #ifdef CONFIG_USER_ONLY
2267 if (ctx.tbflags & GUSA_MASK) {
2268 num_insns = decode_gusa(&ctx, env, &max_insns);
2270 #endif
2272 while (ctx.bstate == BS_NONE
2273 && num_insns < max_insns
2274 && !tcg_op_buf_full()) {
2275 tcg_gen_insn_start(ctx.pc, ctx.envflags);
2276 num_insns++;
2278 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2279 /* We have hit a breakpoint - make sure PC is up-to-date */
2280 gen_save_cpu_state(&ctx, true);
2281 gen_helper_debug(cpu_env);
2282 ctx.bstate = BS_EXCP;
2283 /* The address covered by the breakpoint must be included in
2284 [tb->pc, tb->pc + tb->size) in order to for it to be
2285 properly cleared -- thus we increment the PC here so that
2286 the logic setting tb->size below does the right thing. */
2287 ctx.pc += 2;
2288 break;
2291 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
2292 gen_io_start();
2295 ctx.opcode = cpu_lduw_code(env, ctx.pc);
2296 decode_opc(&ctx);
2297 ctx.pc += 2;
2299 if (tb_cflags(tb) & CF_LAST_IO) {
2300 gen_io_end();
2303 if (ctx.tbflags & GUSA_EXCLUSIVE) {
2304 /* Ending the region of exclusivity. Clear the bits. */
2305 ctx.envflags &= ~GUSA_MASK;
2308 if (cs->singlestep_enabled) {
2309 gen_save_cpu_state(&ctx, true);
2310 gen_helper_debug(cpu_env);
2311 } else {
2312 switch (ctx.bstate) {
2313 case BS_STOP:
2314 gen_save_cpu_state(&ctx, true);
2315 tcg_gen_exit_tb(0);
2316 break;
2317 case BS_NONE:
2318 gen_save_cpu_state(&ctx, false);
2319 gen_goto_tb(&ctx, 0, ctx.pc);
2320 break;
2321 case BS_EXCP:
2322 /* fall through */
2323 case BS_BRANCH:
2324 default:
2325 break;
2329 gen_tb_end(tb, num_insns);
2331 tb->size = ctx.pc - pc_start;
2332 tb->icount = num_insns;
2334 #ifdef DEBUG_DISAS
2335 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2336 && qemu_log_in_addr_range(pc_start)) {
2337 qemu_log_lock();
2338 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2339 log_target_disas(cs, pc_start, ctx.pc - pc_start);
2340 qemu_log("\n");
2341 qemu_log_unlock();
2343 #endif
2346 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2347 target_ulong *data)
2349 env->pc = data[0];
2350 env->flags = data[1];
2351 /* Theoretically delayed_pc should also be restored. In practice the
2352 branch instruction is re-executed after exception, so the delayed
2353 branch target will be recomputed. */