target/m68k: call gen_raise_exception() directly if single-stepping in gen_jmp_tb()
[qemu/ar7.git] / target / m68k / translate.c
blobf14ecab5a502aba2e63d91782784c36757930497
1 /*
2 * m68k translation
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/translator.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
34 #include "trace-tcg.h"
35 #include "exec/log.h"
36 #include "fpu/softfloat.h"
39 //#define DEBUG_DISPATCH 1
41 #define DEFO32(name, offset) static TCGv QREG_##name;
42 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
43 #include "qregs.def"
44 #undef DEFO32
45 #undef DEFO64
47 static TCGv_i32 cpu_halted;
48 static TCGv_i32 cpu_exception_index;
50 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
51 static TCGv cpu_dregs[8];
52 static TCGv cpu_aregs[8];
53 static TCGv_i64 cpu_macc[4];
55 #define REG(insn, pos) (((insn) >> (pos)) & 7)
56 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
57 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
58 #define MACREG(acc) cpu_macc[acc]
59 #define QREG_SP get_areg(s, 7)
61 static TCGv NULL_QREG;
62 #define IS_NULL_QREG(t) (t == NULL_QREG)
63 /* Used to distinguish stores from bad addressing modes. */
64 static TCGv store_dummy;
66 #include "exec/gen-icount.h"
68 void m68k_tcg_init(void)
70 char *p;
71 int i;
73 #define DEFO32(name, offset) \
74 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
75 offsetof(CPUM68KState, offset), #name);
76 #define DEFO64(name, offset) \
77 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
78 offsetof(CPUM68KState, offset), #name);
79 #include "qregs.def"
80 #undef DEFO32
81 #undef DEFO64
83 cpu_halted = tcg_global_mem_new_i32(cpu_env,
84 -offsetof(M68kCPU, env) +
85 offsetof(CPUState, halted), "HALTED");
86 cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
87 -offsetof(M68kCPU, env) +
88 offsetof(CPUState, exception_index),
89 "EXCEPTION");
91 p = cpu_reg_names;
92 for (i = 0; i < 8; i++) {
93 sprintf(p, "D%d", i);
94 cpu_dregs[i] = tcg_global_mem_new(cpu_env,
95 offsetof(CPUM68KState, dregs[i]), p);
96 p += 3;
97 sprintf(p, "A%d", i);
98 cpu_aregs[i] = tcg_global_mem_new(cpu_env,
99 offsetof(CPUM68KState, aregs[i]), p);
100 p += 3;
102 for (i = 0; i < 4; i++) {
103 sprintf(p, "ACC%d", i);
104 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
105 offsetof(CPUM68KState, macc[i]), p);
106 p += 5;
109 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
110 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
113 /* internal defines */
114 typedef struct DisasContext {
115 DisasContextBase base;
116 CPUM68KState *env;
117 target_ulong pc;
118 CCOp cc_op; /* Current CC operation */
119 int cc_op_synced;
120 TCGv_i64 mactmp;
121 int done_mac;
122 int writeback_mask;
123 TCGv writeback[8];
124 #define MAX_TO_RELEASE 8
125 int release_count;
126 TCGv release[MAX_TO_RELEASE];
127 } DisasContext;
129 static void init_release_array(DisasContext *s)
131 #ifdef CONFIG_DEBUG_TCG
132 memset(s->release, 0, sizeof(s->release));
133 #endif
134 s->release_count = 0;
137 static void do_release(DisasContext *s)
139 int i;
140 for (i = 0; i < s->release_count; i++) {
141 tcg_temp_free(s->release[i]);
143 init_release_array(s);
146 static TCGv mark_to_release(DisasContext *s, TCGv tmp)
148 g_assert(s->release_count < MAX_TO_RELEASE);
149 return s->release[s->release_count++] = tmp;
152 static TCGv get_areg(DisasContext *s, unsigned regno)
154 if (s->writeback_mask & (1 << regno)) {
155 return s->writeback[regno];
156 } else {
157 return cpu_aregs[regno];
161 static void delay_set_areg(DisasContext *s, unsigned regno,
162 TCGv val, bool give_temp)
164 if (s->writeback_mask & (1 << regno)) {
165 if (give_temp) {
166 tcg_temp_free(s->writeback[regno]);
167 s->writeback[regno] = val;
168 } else {
169 tcg_gen_mov_i32(s->writeback[regno], val);
171 } else {
172 s->writeback_mask |= 1 << regno;
173 if (give_temp) {
174 s->writeback[regno] = val;
175 } else {
176 TCGv tmp = tcg_temp_new();
177 s->writeback[regno] = tmp;
178 tcg_gen_mov_i32(tmp, val);
183 static void do_writebacks(DisasContext *s)
185 unsigned mask = s->writeback_mask;
186 if (mask) {
187 s->writeback_mask = 0;
188 do {
189 unsigned regno = ctz32(mask);
190 tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
191 tcg_temp_free(s->writeback[regno]);
192 mask &= mask - 1;
193 } while (mask);
197 static bool is_singlestepping(DisasContext *s)
200 * Return true if we are singlestepping either because of QEMU gdbstub
201 * singlestep. This does not include the command line '-singlestep' mode
202 * which is rather misnamed as it only means "one instruction per TB" and
203 * doesn't affect the code we generate.
205 return s->base.singlestep_enabled;
208 /* is_jmp field values */
209 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
210 #define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
212 #if defined(CONFIG_USER_ONLY)
213 #define IS_USER(s) 1
214 #else
215 #define IS_USER(s) (!(s->base.tb->flags & TB_FLAGS_MSR_S))
216 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
217 MMU_KERNEL_IDX : MMU_USER_IDX)
218 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
219 MMU_KERNEL_IDX : MMU_USER_IDX)
220 #endif
222 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
224 #ifdef DEBUG_DISPATCH
225 #define DISAS_INSN(name) \
226 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
227 uint16_t insn); \
228 static void disas_##name(CPUM68KState *env, DisasContext *s, \
229 uint16_t insn) \
231 qemu_log("Dispatch " #name "\n"); \
232 real_disas_##name(env, s, insn); \
234 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
235 uint16_t insn)
236 #else
237 #define DISAS_INSN(name) \
238 static void disas_##name(CPUM68KState *env, DisasContext *s, \
239 uint16_t insn)
240 #endif
242 static const uint8_t cc_op_live[CC_OP_NB] = {
243 [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
244 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
245 [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
246 [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
247 [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
248 [CC_OP_LOGIC] = CCF_X | CCF_N
251 static void set_cc_op(DisasContext *s, CCOp op)
253 CCOp old_op = s->cc_op;
254 int dead;
256 if (old_op == op) {
257 return;
259 s->cc_op = op;
260 s->cc_op_synced = 0;
263 * Discard CC computation that will no longer be used.
264 * Note that X and N are never dead.
266 dead = cc_op_live[old_op] & ~cc_op_live[op];
267 if (dead & CCF_C) {
268 tcg_gen_discard_i32(QREG_CC_C);
270 if (dead & CCF_Z) {
271 tcg_gen_discard_i32(QREG_CC_Z);
273 if (dead & CCF_V) {
274 tcg_gen_discard_i32(QREG_CC_V);
278 /* Update the CPU env CC_OP state. */
279 static void update_cc_op(DisasContext *s)
281 if (!s->cc_op_synced) {
282 s->cc_op_synced = 1;
283 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
287 /* Generate a jump to an immediate address. */
288 static void gen_jmp_im(DisasContext *s, uint32_t dest)
290 update_cc_op(s);
291 tcg_gen_movi_i32(QREG_PC, dest);
292 s->base.is_jmp = DISAS_JUMP;
295 /* Generate a jump to the address in qreg DEST. */
296 static void gen_jmp(DisasContext *s, TCGv dest)
298 update_cc_op(s);
299 tcg_gen_mov_i32(QREG_PC, dest);
300 s->base.is_jmp = DISAS_JUMP;
303 static void gen_raise_exception(int nr)
305 TCGv_i32 tmp;
307 tmp = tcg_const_i32(nr);
308 gen_helper_raise_exception(cpu_env, tmp);
309 tcg_temp_free_i32(tmp);
312 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
314 update_cc_op(s);
315 tcg_gen_movi_i32(QREG_PC, dest);
317 gen_raise_exception(nr);
319 s->base.is_jmp = DISAS_NORETURN;
322 static inline void gen_addr_fault(DisasContext *s)
324 gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
328 * Generate a load from the specified address. Narrow values are
329 * sign extended to full register width.
331 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
332 int sign, int index)
334 TCGv tmp;
335 tmp = tcg_temp_new_i32();
336 switch(opsize) {
337 case OS_BYTE:
338 if (sign)
339 tcg_gen_qemu_ld8s(tmp, addr, index);
340 else
341 tcg_gen_qemu_ld8u(tmp, addr, index);
342 break;
343 case OS_WORD:
344 if (sign)
345 tcg_gen_qemu_ld16s(tmp, addr, index);
346 else
347 tcg_gen_qemu_ld16u(tmp, addr, index);
348 break;
349 case OS_LONG:
350 tcg_gen_qemu_ld32u(tmp, addr, index);
351 break;
352 default:
353 g_assert_not_reached();
355 return tmp;
358 /* Generate a store. */
359 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
360 int index)
362 switch(opsize) {
363 case OS_BYTE:
364 tcg_gen_qemu_st8(val, addr, index);
365 break;
366 case OS_WORD:
367 tcg_gen_qemu_st16(val, addr, index);
368 break;
369 case OS_LONG:
370 tcg_gen_qemu_st32(val, addr, index);
371 break;
372 default:
373 g_assert_not_reached();
377 typedef enum {
378 EA_STORE,
379 EA_LOADU,
380 EA_LOADS
381 } ea_what;
384 * Generate an unsigned load if VAL is 0 a signed load if val is -1,
385 * otherwise generate a store.
387 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
388 ea_what what, int index)
390 if (what == EA_STORE) {
391 gen_store(s, opsize, addr, val, index);
392 return store_dummy;
393 } else {
394 return mark_to_release(s, gen_load(s, opsize, addr,
395 what == EA_LOADS, index));
399 /* Read a 16-bit immediate constant */
400 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
402 uint16_t im;
403 im = translator_lduw(env, s->pc);
404 s->pc += 2;
405 return im;
408 /* Read an 8-bit immediate constant */
409 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
411 return read_im16(env, s);
414 /* Read a 32-bit immediate constant. */
415 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
417 uint32_t im;
418 im = read_im16(env, s) << 16;
419 im |= 0xffff & read_im16(env, s);
420 return im;
423 /* Read a 64-bit immediate constant. */
424 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
426 uint64_t im;
427 im = (uint64_t)read_im32(env, s) << 32;
428 im |= (uint64_t)read_im32(env, s);
429 return im;
432 /* Calculate and address index. */
433 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
435 TCGv add;
436 int scale;
438 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
439 if ((ext & 0x800) == 0) {
440 tcg_gen_ext16s_i32(tmp, add);
441 add = tmp;
443 scale = (ext >> 9) & 3;
444 if (scale != 0) {
445 tcg_gen_shli_i32(tmp, add, scale);
446 add = tmp;
448 return add;
452 * Handle a base + index + displacement effective address.
453 * A NULL_QREG base means pc-relative.
455 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
457 uint32_t offset;
458 uint16_t ext;
459 TCGv add;
460 TCGv tmp;
461 uint32_t bd, od;
463 offset = s->pc;
464 ext = read_im16(env, s);
466 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
467 return NULL_QREG;
469 if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
470 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
471 ext &= ~(3 << 9);
474 if (ext & 0x100) {
475 /* full extension word format */
476 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
477 return NULL_QREG;
479 if ((ext & 0x30) > 0x10) {
480 /* base displacement */
481 if ((ext & 0x30) == 0x20) {
482 bd = (int16_t)read_im16(env, s);
483 } else {
484 bd = read_im32(env, s);
486 } else {
487 bd = 0;
489 tmp = mark_to_release(s, tcg_temp_new());
490 if ((ext & 0x44) == 0) {
491 /* pre-index */
492 add = gen_addr_index(s, ext, tmp);
493 } else {
494 add = NULL_QREG;
496 if ((ext & 0x80) == 0) {
497 /* base not suppressed */
498 if (IS_NULL_QREG(base)) {
499 base = mark_to_release(s, tcg_const_i32(offset + bd));
500 bd = 0;
502 if (!IS_NULL_QREG(add)) {
503 tcg_gen_add_i32(tmp, add, base);
504 add = tmp;
505 } else {
506 add = base;
509 if (!IS_NULL_QREG(add)) {
510 if (bd != 0) {
511 tcg_gen_addi_i32(tmp, add, bd);
512 add = tmp;
514 } else {
515 add = mark_to_release(s, tcg_const_i32(bd));
517 if ((ext & 3) != 0) {
518 /* memory indirect */
519 base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s)));
520 if ((ext & 0x44) == 4) {
521 add = gen_addr_index(s, ext, tmp);
522 tcg_gen_add_i32(tmp, add, base);
523 add = tmp;
524 } else {
525 add = base;
527 if ((ext & 3) > 1) {
528 /* outer displacement */
529 if ((ext & 3) == 2) {
530 od = (int16_t)read_im16(env, s);
531 } else {
532 od = read_im32(env, s);
534 } else {
535 od = 0;
537 if (od != 0) {
538 tcg_gen_addi_i32(tmp, add, od);
539 add = tmp;
542 } else {
543 /* brief extension word format */
544 tmp = mark_to_release(s, tcg_temp_new());
545 add = gen_addr_index(s, ext, tmp);
546 if (!IS_NULL_QREG(base)) {
547 tcg_gen_add_i32(tmp, add, base);
548 if ((int8_t)ext)
549 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
550 } else {
551 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
553 add = tmp;
555 return add;
558 /* Sign or zero extend a value. */
560 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
562 switch (opsize) {
563 case OS_BYTE:
564 if (sign) {
565 tcg_gen_ext8s_i32(res, val);
566 } else {
567 tcg_gen_ext8u_i32(res, val);
569 break;
570 case OS_WORD:
571 if (sign) {
572 tcg_gen_ext16s_i32(res, val);
573 } else {
574 tcg_gen_ext16u_i32(res, val);
576 break;
577 case OS_LONG:
578 tcg_gen_mov_i32(res, val);
579 break;
580 default:
581 g_assert_not_reached();
585 /* Evaluate all the CC flags. */
587 static void gen_flush_flags(DisasContext *s)
589 TCGv t0, t1;
591 switch (s->cc_op) {
592 case CC_OP_FLAGS:
593 return;
595 case CC_OP_ADDB:
596 case CC_OP_ADDW:
597 case CC_OP_ADDL:
598 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
599 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
600 /* Compute signed overflow for addition. */
601 t0 = tcg_temp_new();
602 t1 = tcg_temp_new();
603 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
604 gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
605 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
606 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
607 tcg_temp_free(t0);
608 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
609 tcg_temp_free(t1);
610 break;
612 case CC_OP_SUBB:
613 case CC_OP_SUBW:
614 case CC_OP_SUBL:
615 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
616 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
617 /* Compute signed overflow for subtraction. */
618 t0 = tcg_temp_new();
619 t1 = tcg_temp_new();
620 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
621 gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
622 tcg_gen_xor_i32(t1, QREG_CC_N, t0);
623 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
624 tcg_temp_free(t0);
625 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
626 tcg_temp_free(t1);
627 break;
629 case CC_OP_CMPB:
630 case CC_OP_CMPW:
631 case CC_OP_CMPL:
632 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
633 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
634 gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
635 /* Compute signed overflow for subtraction. */
636 t0 = tcg_temp_new();
637 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
638 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
639 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
640 tcg_temp_free(t0);
641 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
642 break;
644 case CC_OP_LOGIC:
645 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
646 tcg_gen_movi_i32(QREG_CC_C, 0);
647 tcg_gen_movi_i32(QREG_CC_V, 0);
648 break;
650 case CC_OP_DYNAMIC:
651 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
652 s->cc_op_synced = 1;
653 break;
655 default:
656 t0 = tcg_const_i32(s->cc_op);
657 gen_helper_flush_flags(cpu_env, t0);
658 tcg_temp_free(t0);
659 s->cc_op_synced = 1;
660 break;
663 /* Note that flush_flags also assigned to env->cc_op. */
664 s->cc_op = CC_OP_FLAGS;
667 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
669 TCGv tmp;
671 if (opsize == OS_LONG) {
672 tmp = val;
673 } else {
674 tmp = mark_to_release(s, tcg_temp_new());
675 gen_ext(tmp, val, opsize, sign);
678 return tmp;
681 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
683 gen_ext(QREG_CC_N, val, opsize, 1);
684 set_cc_op(s, CC_OP_LOGIC);
687 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
689 tcg_gen_mov_i32(QREG_CC_N, dest);
690 tcg_gen_mov_i32(QREG_CC_V, src);
691 set_cc_op(s, CC_OP_CMPB + opsize);
694 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
696 gen_ext(QREG_CC_N, dest, opsize, 1);
697 tcg_gen_mov_i32(QREG_CC_V, src);
700 static inline int opsize_bytes(int opsize)
702 switch (opsize) {
703 case OS_BYTE: return 1;
704 case OS_WORD: return 2;
705 case OS_LONG: return 4;
706 case OS_SINGLE: return 4;
707 case OS_DOUBLE: return 8;
708 case OS_EXTENDED: return 12;
709 case OS_PACKED: return 12;
710 default:
711 g_assert_not_reached();
715 static inline int insn_opsize(int insn)
717 switch ((insn >> 6) & 3) {
718 case 0: return OS_BYTE;
719 case 1: return OS_WORD;
720 case 2: return OS_LONG;
721 default:
722 g_assert_not_reached();
726 static inline int ext_opsize(int ext, int pos)
728 switch ((ext >> pos) & 7) {
729 case 0: return OS_LONG;
730 case 1: return OS_SINGLE;
731 case 2: return OS_EXTENDED;
732 case 3: return OS_PACKED;
733 case 4: return OS_WORD;
734 case 5: return OS_DOUBLE;
735 case 6: return OS_BYTE;
736 default:
737 g_assert_not_reached();
742 * Assign value to a register. If the width is less than the register width
743 * only the low part of the register is set.
745 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
747 TCGv tmp;
748 switch (opsize) {
749 case OS_BYTE:
750 tcg_gen_andi_i32(reg, reg, 0xffffff00);
751 tmp = tcg_temp_new();
752 tcg_gen_ext8u_i32(tmp, val);
753 tcg_gen_or_i32(reg, reg, tmp);
754 tcg_temp_free(tmp);
755 break;
756 case OS_WORD:
757 tcg_gen_andi_i32(reg, reg, 0xffff0000);
758 tmp = tcg_temp_new();
759 tcg_gen_ext16u_i32(tmp, val);
760 tcg_gen_or_i32(reg, reg, tmp);
761 tcg_temp_free(tmp);
762 break;
763 case OS_LONG:
764 case OS_SINGLE:
765 tcg_gen_mov_i32(reg, val);
766 break;
767 default:
768 g_assert_not_reached();
773 * Generate code for an "effective address". Does not adjust the base
774 * register for autoincrement addressing modes.
776 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
777 int mode, int reg0, int opsize)
779 TCGv reg;
780 TCGv tmp;
781 uint16_t ext;
782 uint32_t offset;
784 switch (mode) {
785 case 0: /* Data register direct. */
786 case 1: /* Address register direct. */
787 return NULL_QREG;
788 case 3: /* Indirect postincrement. */
789 if (opsize == OS_UNSIZED) {
790 return NULL_QREG;
792 /* fallthru */
793 case 2: /* Indirect register */
794 return get_areg(s, reg0);
795 case 4: /* Indirect predecrememnt. */
796 if (opsize == OS_UNSIZED) {
797 return NULL_QREG;
799 reg = get_areg(s, reg0);
800 tmp = mark_to_release(s, tcg_temp_new());
801 if (reg0 == 7 && opsize == OS_BYTE &&
802 m68k_feature(s->env, M68K_FEATURE_M68000)) {
803 tcg_gen_subi_i32(tmp, reg, 2);
804 } else {
805 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
807 return tmp;
808 case 5: /* Indirect displacement. */
809 reg = get_areg(s, reg0);
810 tmp = mark_to_release(s, tcg_temp_new());
811 ext = read_im16(env, s);
812 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
813 return tmp;
814 case 6: /* Indirect index + displacement. */
815 reg = get_areg(s, reg0);
816 return gen_lea_indexed(env, s, reg);
817 case 7: /* Other */
818 switch (reg0) {
819 case 0: /* Absolute short. */
820 offset = (int16_t)read_im16(env, s);
821 return mark_to_release(s, tcg_const_i32(offset));
822 case 1: /* Absolute long. */
823 offset = read_im32(env, s);
824 return mark_to_release(s, tcg_const_i32(offset));
825 case 2: /* pc displacement */
826 offset = s->pc;
827 offset += (int16_t)read_im16(env, s);
828 return mark_to_release(s, tcg_const_i32(offset));
829 case 3: /* pc index+displacement. */
830 return gen_lea_indexed(env, s, NULL_QREG);
831 case 4: /* Immediate. */
832 default:
833 return NULL_QREG;
836 /* Should never happen. */
837 return NULL_QREG;
840 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
841 int opsize)
843 int mode = extract32(insn, 3, 3);
844 int reg0 = REG(insn, 0);
845 return gen_lea_mode(env, s, mode, reg0, opsize);
849 * Generate code to load/store a value from/into an EA. If WHAT > 0 this is
850 * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
851 * ADDRP is non-null for readwrite operands.
853 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
854 int opsize, TCGv val, TCGv *addrp, ea_what what,
855 int index)
857 TCGv reg, tmp, result;
858 int32_t offset;
860 switch (mode) {
861 case 0: /* Data register direct. */
862 reg = cpu_dregs[reg0];
863 if (what == EA_STORE) {
864 gen_partset_reg(opsize, reg, val);
865 return store_dummy;
866 } else {
867 return gen_extend(s, reg, opsize, what == EA_LOADS);
869 case 1: /* Address register direct. */
870 reg = get_areg(s, reg0);
871 if (what == EA_STORE) {
872 tcg_gen_mov_i32(reg, val);
873 return store_dummy;
874 } else {
875 return gen_extend(s, reg, opsize, what == EA_LOADS);
877 case 2: /* Indirect register */
878 reg = get_areg(s, reg0);
879 return gen_ldst(s, opsize, reg, val, what, index);
880 case 3: /* Indirect postincrement. */
881 reg = get_areg(s, reg0);
882 result = gen_ldst(s, opsize, reg, val, what, index);
883 if (what == EA_STORE || !addrp) {
884 TCGv tmp = tcg_temp_new();
885 if (reg0 == 7 && opsize == OS_BYTE &&
886 m68k_feature(s->env, M68K_FEATURE_M68000)) {
887 tcg_gen_addi_i32(tmp, reg, 2);
888 } else {
889 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
891 delay_set_areg(s, reg0, tmp, true);
893 return result;
894 case 4: /* Indirect predecrememnt. */
895 if (addrp && what == EA_STORE) {
896 tmp = *addrp;
897 } else {
898 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
899 if (IS_NULL_QREG(tmp)) {
900 return tmp;
902 if (addrp) {
903 *addrp = tmp;
906 result = gen_ldst(s, opsize, tmp, val, what, index);
907 if (what == EA_STORE || !addrp) {
908 delay_set_areg(s, reg0, tmp, false);
910 return result;
911 case 5: /* Indirect displacement. */
912 case 6: /* Indirect index + displacement. */
913 do_indirect:
914 if (addrp && what == EA_STORE) {
915 tmp = *addrp;
916 } else {
917 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
918 if (IS_NULL_QREG(tmp)) {
919 return tmp;
921 if (addrp) {
922 *addrp = tmp;
925 return gen_ldst(s, opsize, tmp, val, what, index);
926 case 7: /* Other */
927 switch (reg0) {
928 case 0: /* Absolute short. */
929 case 1: /* Absolute long. */
930 case 2: /* pc displacement */
931 case 3: /* pc index+displacement. */
932 goto do_indirect;
933 case 4: /* Immediate. */
934 /* Sign extend values for consistency. */
935 switch (opsize) {
936 case OS_BYTE:
937 if (what == EA_LOADS) {
938 offset = (int8_t)read_im8(env, s);
939 } else {
940 offset = read_im8(env, s);
942 break;
943 case OS_WORD:
944 if (what == EA_LOADS) {
945 offset = (int16_t)read_im16(env, s);
946 } else {
947 offset = read_im16(env, s);
949 break;
950 case OS_LONG:
951 offset = read_im32(env, s);
952 break;
953 default:
954 g_assert_not_reached();
956 return mark_to_release(s, tcg_const_i32(offset));
957 default:
958 return NULL_QREG;
961 /* Should never happen. */
962 return NULL_QREG;
965 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
966 int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
968 int mode = extract32(insn, 3, 3);
969 int reg0 = REG(insn, 0);
970 return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
973 static TCGv_ptr gen_fp_ptr(int freg)
975 TCGv_ptr fp = tcg_temp_new_ptr();
976 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
977 return fp;
980 static TCGv_ptr gen_fp_result_ptr(void)
982 TCGv_ptr fp = tcg_temp_new_ptr();
983 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
984 return fp;
987 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
989 TCGv t32;
990 TCGv_i64 t64;
992 t32 = tcg_temp_new();
993 tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
994 tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
995 tcg_temp_free(t32);
997 t64 = tcg_temp_new_i64();
998 tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
999 tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
1000 tcg_temp_free_i64(t64);
1003 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1004 int index)
1006 TCGv tmp;
1007 TCGv_i64 t64;
1009 t64 = tcg_temp_new_i64();
1010 tmp = tcg_temp_new();
1011 switch (opsize) {
1012 case OS_BYTE:
1013 tcg_gen_qemu_ld8s(tmp, addr, index);
1014 gen_helper_exts32(cpu_env, fp, tmp);
1015 break;
1016 case OS_WORD:
1017 tcg_gen_qemu_ld16s(tmp, addr, index);
1018 gen_helper_exts32(cpu_env, fp, tmp);
1019 break;
1020 case OS_LONG:
1021 tcg_gen_qemu_ld32u(tmp, addr, index);
1022 gen_helper_exts32(cpu_env, fp, tmp);
1023 break;
1024 case OS_SINGLE:
1025 tcg_gen_qemu_ld32u(tmp, addr, index);
1026 gen_helper_extf32(cpu_env, fp, tmp);
1027 break;
1028 case OS_DOUBLE:
1029 tcg_gen_qemu_ld64(t64, addr, index);
1030 gen_helper_extf64(cpu_env, fp, t64);
1031 break;
1032 case OS_EXTENDED:
1033 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1034 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1035 break;
1037 tcg_gen_qemu_ld32u(tmp, addr, index);
1038 tcg_gen_shri_i32(tmp, tmp, 16);
1039 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1040 tcg_gen_addi_i32(tmp, addr, 4);
1041 tcg_gen_qemu_ld64(t64, tmp, index);
1042 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1043 break;
1044 case OS_PACKED:
1046 * unimplemented data type on 68040/ColdFire
1047 * FIXME if needed for another FPU
1049 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1050 break;
1051 default:
1052 g_assert_not_reached();
1054 tcg_temp_free(tmp);
1055 tcg_temp_free_i64(t64);
1058 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1059 int index)
1061 TCGv tmp;
1062 TCGv_i64 t64;
1064 t64 = tcg_temp_new_i64();
1065 tmp = tcg_temp_new();
1066 switch (opsize) {
1067 case OS_BYTE:
1068 gen_helper_reds32(tmp, cpu_env, fp);
1069 tcg_gen_qemu_st8(tmp, addr, index);
1070 break;
1071 case OS_WORD:
1072 gen_helper_reds32(tmp, cpu_env, fp);
1073 tcg_gen_qemu_st16(tmp, addr, index);
1074 break;
1075 case OS_LONG:
1076 gen_helper_reds32(tmp, cpu_env, fp);
1077 tcg_gen_qemu_st32(tmp, addr, index);
1078 break;
1079 case OS_SINGLE:
1080 gen_helper_redf32(tmp, cpu_env, fp);
1081 tcg_gen_qemu_st32(tmp, addr, index);
1082 break;
1083 case OS_DOUBLE:
1084 gen_helper_redf64(t64, cpu_env, fp);
1085 tcg_gen_qemu_st64(t64, addr, index);
1086 break;
1087 case OS_EXTENDED:
1088 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1089 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1090 break;
1092 tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1093 tcg_gen_shli_i32(tmp, tmp, 16);
1094 tcg_gen_qemu_st32(tmp, addr, index);
1095 tcg_gen_addi_i32(tmp, addr, 4);
1096 tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1097 tcg_gen_qemu_st64(t64, tmp, index);
1098 break;
1099 case OS_PACKED:
1101 * unimplemented data type on 68040/ColdFire
1102 * FIXME if needed for another FPU
1104 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1105 break;
1106 default:
1107 g_assert_not_reached();
1109 tcg_temp_free(tmp);
1110 tcg_temp_free_i64(t64);
1113 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1114 TCGv_ptr fp, ea_what what, int index)
1116 if (what == EA_STORE) {
1117 gen_store_fp(s, opsize, addr, fp, index);
1118 } else {
1119 gen_load_fp(s, opsize, addr, fp, index);
1123 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1124 int reg0, int opsize, TCGv_ptr fp, ea_what what,
1125 int index)
1127 TCGv reg, addr, tmp;
1128 TCGv_i64 t64;
1130 switch (mode) {
1131 case 0: /* Data register direct. */
1132 reg = cpu_dregs[reg0];
1133 if (what == EA_STORE) {
1134 switch (opsize) {
1135 case OS_BYTE:
1136 case OS_WORD:
1137 case OS_LONG:
1138 gen_helper_reds32(reg, cpu_env, fp);
1139 break;
1140 case OS_SINGLE:
1141 gen_helper_redf32(reg, cpu_env, fp);
1142 break;
1143 default:
1144 g_assert_not_reached();
1146 } else {
1147 tmp = tcg_temp_new();
1148 switch (opsize) {
1149 case OS_BYTE:
1150 tcg_gen_ext8s_i32(tmp, reg);
1151 gen_helper_exts32(cpu_env, fp, tmp);
1152 break;
1153 case OS_WORD:
1154 tcg_gen_ext16s_i32(tmp, reg);
1155 gen_helper_exts32(cpu_env, fp, tmp);
1156 break;
1157 case OS_LONG:
1158 gen_helper_exts32(cpu_env, fp, reg);
1159 break;
1160 case OS_SINGLE:
1161 gen_helper_extf32(cpu_env, fp, reg);
1162 break;
1163 default:
1164 g_assert_not_reached();
1166 tcg_temp_free(tmp);
1168 return 0;
1169 case 1: /* Address register direct. */
1170 return -1;
1171 case 2: /* Indirect register */
1172 addr = get_areg(s, reg0);
1173 gen_ldst_fp(s, opsize, addr, fp, what, index);
1174 return 0;
1175 case 3: /* Indirect postincrement. */
1176 addr = cpu_aregs[reg0];
1177 gen_ldst_fp(s, opsize, addr, fp, what, index);
1178 tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1179 return 0;
1180 case 4: /* Indirect predecrememnt. */
1181 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1182 if (IS_NULL_QREG(addr)) {
1183 return -1;
1185 gen_ldst_fp(s, opsize, addr, fp, what, index);
1186 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1187 return 0;
1188 case 5: /* Indirect displacement. */
1189 case 6: /* Indirect index + displacement. */
1190 do_indirect:
1191 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1192 if (IS_NULL_QREG(addr)) {
1193 return -1;
1195 gen_ldst_fp(s, opsize, addr, fp, what, index);
1196 return 0;
1197 case 7: /* Other */
1198 switch (reg0) {
1199 case 0: /* Absolute short. */
1200 case 1: /* Absolute long. */
1201 case 2: /* pc displacement */
1202 case 3: /* pc index+displacement. */
1203 goto do_indirect;
1204 case 4: /* Immediate. */
1205 if (what == EA_STORE) {
1206 return -1;
1208 switch (opsize) {
1209 case OS_BYTE:
1210 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1211 gen_helper_exts32(cpu_env, fp, tmp);
1212 tcg_temp_free(tmp);
1213 break;
1214 case OS_WORD:
1215 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1216 gen_helper_exts32(cpu_env, fp, tmp);
1217 tcg_temp_free(tmp);
1218 break;
1219 case OS_LONG:
1220 tmp = tcg_const_i32(read_im32(env, s));
1221 gen_helper_exts32(cpu_env, fp, tmp);
1222 tcg_temp_free(tmp);
1223 break;
1224 case OS_SINGLE:
1225 tmp = tcg_const_i32(read_im32(env, s));
1226 gen_helper_extf32(cpu_env, fp, tmp);
1227 tcg_temp_free(tmp);
1228 break;
1229 case OS_DOUBLE:
1230 t64 = tcg_const_i64(read_im64(env, s));
1231 gen_helper_extf64(cpu_env, fp, t64);
1232 tcg_temp_free_i64(t64);
1233 break;
1234 case OS_EXTENDED:
1235 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1236 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1237 break;
1239 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1240 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1241 tcg_temp_free(tmp);
1242 t64 = tcg_const_i64(read_im64(env, s));
1243 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1244 tcg_temp_free_i64(t64);
1245 break;
1246 case OS_PACKED:
1248 * unimplemented data type on 68040/ColdFire
1249 * FIXME if needed for another FPU
1251 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1252 break;
1253 default:
1254 g_assert_not_reached();
1256 return 0;
1257 default:
1258 return -1;
1261 return -1;
1264 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1265 int opsize, TCGv_ptr fp, ea_what what, int index)
1267 int mode = extract32(insn, 3, 3);
1268 int reg0 = REG(insn, 0);
1269 return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1272 typedef struct {
1273 TCGCond tcond;
1274 bool g1;
1275 bool g2;
1276 TCGv v1;
1277 TCGv v2;
1278 } DisasCompare;
1280 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1282 TCGv tmp, tmp2;
1283 TCGCond tcond;
1284 CCOp op = s->cc_op;
1286 /* The CC_OP_CMP form can handle most normal comparisons directly. */
1287 if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1288 c->g1 = c->g2 = 1;
1289 c->v1 = QREG_CC_N;
1290 c->v2 = QREG_CC_V;
1291 switch (cond) {
1292 case 2: /* HI */
1293 case 3: /* LS */
1294 tcond = TCG_COND_LEU;
1295 goto done;
1296 case 4: /* CC */
1297 case 5: /* CS */
1298 tcond = TCG_COND_LTU;
1299 goto done;
1300 case 6: /* NE */
1301 case 7: /* EQ */
1302 tcond = TCG_COND_EQ;
1303 goto done;
1304 case 10: /* PL */
1305 case 11: /* MI */
1306 c->g1 = c->g2 = 0;
1307 c->v2 = tcg_const_i32(0);
1308 c->v1 = tmp = tcg_temp_new();
1309 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1310 gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1311 /* fallthru */
1312 case 12: /* GE */
1313 case 13: /* LT */
1314 tcond = TCG_COND_LT;
1315 goto done;
1316 case 14: /* GT */
1317 case 15: /* LE */
1318 tcond = TCG_COND_LE;
1319 goto done;
1323 c->g1 = 1;
1324 c->g2 = 0;
1325 c->v2 = tcg_const_i32(0);
1327 switch (cond) {
1328 case 0: /* T */
1329 case 1: /* F */
1330 c->v1 = c->v2;
1331 tcond = TCG_COND_NEVER;
1332 goto done;
1333 case 14: /* GT (!(Z || (N ^ V))) */
1334 case 15: /* LE (Z || (N ^ V)) */
1336 * Logic operations clear V, which simplifies LE to (Z || N),
1337 * and since Z and N are co-located, this becomes a normal
1338 * comparison vs N.
1340 if (op == CC_OP_LOGIC) {
1341 c->v1 = QREG_CC_N;
1342 tcond = TCG_COND_LE;
1343 goto done;
1345 break;
1346 case 12: /* GE (!(N ^ V)) */
1347 case 13: /* LT (N ^ V) */
1348 /* Logic operations clear V, which simplifies this to N. */
1349 if (op != CC_OP_LOGIC) {
1350 break;
1352 /* fallthru */
1353 case 10: /* PL (!N) */
1354 case 11: /* MI (N) */
1355 /* Several cases represent N normally. */
1356 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1357 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1358 op == CC_OP_LOGIC) {
1359 c->v1 = QREG_CC_N;
1360 tcond = TCG_COND_LT;
1361 goto done;
1363 break;
1364 case 6: /* NE (!Z) */
1365 case 7: /* EQ (Z) */
1366 /* Some cases fold Z into N. */
1367 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1368 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1369 op == CC_OP_LOGIC) {
1370 tcond = TCG_COND_EQ;
1371 c->v1 = QREG_CC_N;
1372 goto done;
1374 break;
1375 case 4: /* CC (!C) */
1376 case 5: /* CS (C) */
1377 /* Some cases fold C into X. */
1378 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1379 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1380 tcond = TCG_COND_NE;
1381 c->v1 = QREG_CC_X;
1382 goto done;
1384 /* fallthru */
1385 case 8: /* VC (!V) */
1386 case 9: /* VS (V) */
1387 /* Logic operations clear V and C. */
1388 if (op == CC_OP_LOGIC) {
1389 tcond = TCG_COND_NEVER;
1390 c->v1 = c->v2;
1391 goto done;
1393 break;
1396 /* Otherwise, flush flag state to CC_OP_FLAGS. */
1397 gen_flush_flags(s);
1399 switch (cond) {
1400 case 0: /* T */
1401 case 1: /* F */
1402 default:
1403 /* Invalid, or handled above. */
1404 abort();
1405 case 2: /* HI (!C && !Z) -> !(C || Z)*/
1406 case 3: /* LS (C || Z) */
1407 c->v1 = tmp = tcg_temp_new();
1408 c->g1 = 0;
1409 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1410 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1411 tcond = TCG_COND_NE;
1412 break;
1413 case 4: /* CC (!C) */
1414 case 5: /* CS (C) */
1415 c->v1 = QREG_CC_C;
1416 tcond = TCG_COND_NE;
1417 break;
1418 case 6: /* NE (!Z) */
1419 case 7: /* EQ (Z) */
1420 c->v1 = QREG_CC_Z;
1421 tcond = TCG_COND_EQ;
1422 break;
1423 case 8: /* VC (!V) */
1424 case 9: /* VS (V) */
1425 c->v1 = QREG_CC_V;
1426 tcond = TCG_COND_LT;
1427 break;
1428 case 10: /* PL (!N) */
1429 case 11: /* MI (N) */
1430 c->v1 = QREG_CC_N;
1431 tcond = TCG_COND_LT;
1432 break;
1433 case 12: /* GE (!(N ^ V)) */
1434 case 13: /* LT (N ^ V) */
1435 c->v1 = tmp = tcg_temp_new();
1436 c->g1 = 0;
1437 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1438 tcond = TCG_COND_LT;
1439 break;
1440 case 14: /* GT (!(Z || (N ^ V))) */
1441 case 15: /* LE (Z || (N ^ V)) */
1442 c->v1 = tmp = tcg_temp_new();
1443 c->g1 = 0;
1444 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1445 tcg_gen_neg_i32(tmp, tmp);
1446 tmp2 = tcg_temp_new();
1447 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1448 tcg_gen_or_i32(tmp, tmp, tmp2);
1449 tcg_temp_free(tmp2);
1450 tcond = TCG_COND_LT;
1451 break;
1454 done:
1455 if ((cond & 1) == 0) {
1456 tcond = tcg_invert_cond(tcond);
1458 c->tcond = tcond;
1461 static void free_cond(DisasCompare *c)
1463 if (!c->g1) {
1464 tcg_temp_free(c->v1);
1466 if (!c->g2) {
1467 tcg_temp_free(c->v2);
1471 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1473 DisasCompare c;
1475 gen_cc_cond(&c, s, cond);
1476 update_cc_op(s);
1477 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1478 free_cond(&c);
1481 /* Force a TB lookup after an instruction that changes the CPU state. */
1482 static void gen_exit_tb(DisasContext *s)
1484 update_cc_op(s);
1485 tcg_gen_movi_i32(QREG_PC, s->pc);
1486 s->base.is_jmp = DISAS_EXIT;
1489 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1490 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1491 op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \
1492 if (IS_NULL_QREG(result)) { \
1493 gen_addr_fault(s); \
1494 return; \
1496 } while (0)
1498 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1499 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \
1500 EA_STORE, IS_USER(s)); \
1501 if (IS_NULL_QREG(ea_result)) { \
1502 gen_addr_fault(s); \
1503 return; \
1505 } while (0)
1507 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1509 #ifndef CONFIG_USER_ONLY
1510 return (s->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)
1511 || (s->base.pc_next & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1512 #else
1513 return true;
1514 #endif
1517 /* Generate a jump to an immediate address. */
1518 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1520 if (unlikely(is_singlestepping(s))) {
1521 update_cc_op(s);
1522 tcg_gen_movi_i32(QREG_PC, dest);
1523 gen_raise_exception(EXCP_DEBUG);
1524 } else if (use_goto_tb(s, dest)) {
1525 tcg_gen_goto_tb(n);
1526 tcg_gen_movi_i32(QREG_PC, dest);
1527 tcg_gen_exit_tb(s->base.tb, n);
1528 } else {
1529 gen_jmp_im(s, dest);
1530 tcg_gen_exit_tb(NULL, 0);
1532 s->base.is_jmp = DISAS_NORETURN;
1535 DISAS_INSN(scc)
1537 DisasCompare c;
1538 int cond;
1539 TCGv tmp;
1541 cond = (insn >> 8) & 0xf;
1542 gen_cc_cond(&c, s, cond);
1544 tmp = tcg_temp_new();
1545 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1546 free_cond(&c);
1548 tcg_gen_neg_i32(tmp, tmp);
1549 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1550 tcg_temp_free(tmp);
1553 DISAS_INSN(dbcc)
1555 TCGLabel *l1;
1556 TCGv reg;
1557 TCGv tmp;
1558 int16_t offset;
1559 uint32_t base;
1561 reg = DREG(insn, 0);
1562 base = s->pc;
1563 offset = (int16_t)read_im16(env, s);
1564 l1 = gen_new_label();
1565 gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1567 tmp = tcg_temp_new();
1568 tcg_gen_ext16s_i32(tmp, reg);
1569 tcg_gen_addi_i32(tmp, tmp, -1);
1570 gen_partset_reg(OS_WORD, reg, tmp);
1571 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1572 gen_jmp_tb(s, 1, base + offset);
1573 gen_set_label(l1);
1574 gen_jmp_tb(s, 0, s->pc);
1577 DISAS_INSN(undef_mac)
1579 gen_exception(s, s->base.pc_next, EXCP_LINEA);
1582 DISAS_INSN(undef_fpu)
1584 gen_exception(s, s->base.pc_next, EXCP_LINEF);
1587 DISAS_INSN(undef)
1590 * ??? This is both instructions that are as yet unimplemented
1591 * for the 680x0 series, as well as those that are implemented
1592 * but actually illegal for CPU32 or pre-68020.
1594 qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1595 insn, s->base.pc_next);
1596 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1599 DISAS_INSN(mulw)
1601 TCGv reg;
1602 TCGv tmp;
1603 TCGv src;
1604 int sign;
1606 sign = (insn & 0x100) != 0;
1607 reg = DREG(insn, 9);
1608 tmp = tcg_temp_new();
1609 if (sign)
1610 tcg_gen_ext16s_i32(tmp, reg);
1611 else
1612 tcg_gen_ext16u_i32(tmp, reg);
1613 SRC_EA(env, src, OS_WORD, sign, NULL);
1614 tcg_gen_mul_i32(tmp, tmp, src);
1615 tcg_gen_mov_i32(reg, tmp);
1616 gen_logic_cc(s, tmp, OS_LONG);
1617 tcg_temp_free(tmp);
1620 DISAS_INSN(divw)
1622 int sign;
1623 TCGv src;
1624 TCGv destr;
1626 /* divX.w <EA>,Dn 32/16 -> 16r:16q */
1628 sign = (insn & 0x100) != 0;
1630 /* dest.l / src.w */
1632 SRC_EA(env, src, OS_WORD, sign, NULL);
1633 destr = tcg_const_i32(REG(insn, 9));
1634 if (sign) {
1635 gen_helper_divsw(cpu_env, destr, src);
1636 } else {
1637 gen_helper_divuw(cpu_env, destr, src);
1639 tcg_temp_free(destr);
1641 set_cc_op(s, CC_OP_FLAGS);
1644 DISAS_INSN(divl)
1646 TCGv num, reg, den;
1647 int sign;
1648 uint16_t ext;
1650 ext = read_im16(env, s);
1652 sign = (ext & 0x0800) != 0;
1654 if (ext & 0x400) {
1655 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1656 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1657 return;
1660 /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
1662 SRC_EA(env, den, OS_LONG, 0, NULL);
1663 num = tcg_const_i32(REG(ext, 12));
1664 reg = tcg_const_i32(REG(ext, 0));
1665 if (sign) {
1666 gen_helper_divsll(cpu_env, num, reg, den);
1667 } else {
1668 gen_helper_divull(cpu_env, num, reg, den);
1670 tcg_temp_free(reg);
1671 tcg_temp_free(num);
1672 set_cc_op(s, CC_OP_FLAGS);
1673 return;
1676 /* divX.l <EA>, Dq 32/32 -> 32q */
1677 /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
1679 SRC_EA(env, den, OS_LONG, 0, NULL);
1680 num = tcg_const_i32(REG(ext, 12));
1681 reg = tcg_const_i32(REG(ext, 0));
1682 if (sign) {
1683 gen_helper_divsl(cpu_env, num, reg, den);
1684 } else {
1685 gen_helper_divul(cpu_env, num, reg, den);
1687 tcg_temp_free(reg);
1688 tcg_temp_free(num);
1690 set_cc_op(s, CC_OP_FLAGS);
1693 static void bcd_add(TCGv dest, TCGv src)
1695 TCGv t0, t1;
1698 * dest10 = dest10 + src10 + X
1700 * t1 = src
1701 * t2 = t1 + 0x066
1702 * t3 = t2 + dest + X
1703 * t4 = t2 ^ dest
1704 * t5 = t3 ^ t4
1705 * t6 = ~t5 & 0x110
1706 * t7 = (t6 >> 2) | (t6 >> 3)
1707 * return t3 - t7
1711 * t1 = (src + 0x066) + dest + X
1712 * = result with some possible exceeding 0x6
1715 t0 = tcg_const_i32(0x066);
1716 tcg_gen_add_i32(t0, t0, src);
1718 t1 = tcg_temp_new();
1719 tcg_gen_add_i32(t1, t0, dest);
1720 tcg_gen_add_i32(t1, t1, QREG_CC_X);
1722 /* we will remove exceeding 0x6 where there is no carry */
1725 * t0 = (src + 0x0066) ^ dest
1726 * = t1 without carries
1729 tcg_gen_xor_i32(t0, t0, dest);
1732 * extract the carries
1733 * t0 = t0 ^ t1
1734 * = only the carries
1737 tcg_gen_xor_i32(t0, t0, t1);
1740 * generate 0x1 where there is no carry
1741 * and for each 0x10, generate a 0x6
1744 tcg_gen_shri_i32(t0, t0, 3);
1745 tcg_gen_not_i32(t0, t0);
1746 tcg_gen_andi_i32(t0, t0, 0x22);
1747 tcg_gen_add_i32(dest, t0, t0);
1748 tcg_gen_add_i32(dest, dest, t0);
1749 tcg_temp_free(t0);
1752 * remove the exceeding 0x6
1753 * for digits that have not generated a carry
1756 tcg_gen_sub_i32(dest, t1, dest);
1757 tcg_temp_free(t1);
1760 static void bcd_sub(TCGv dest, TCGv src)
1762 TCGv t0, t1, t2;
1765 * dest10 = dest10 - src10 - X
1766 * = bcd_add(dest + 1 - X, 0x199 - src)
1769 /* t0 = 0x066 + (0x199 - src) */
1771 t0 = tcg_temp_new();
1772 tcg_gen_subfi_i32(t0, 0x1ff, src);
1774 /* t1 = t0 + dest + 1 - X*/
1776 t1 = tcg_temp_new();
1777 tcg_gen_add_i32(t1, t0, dest);
1778 tcg_gen_addi_i32(t1, t1, 1);
1779 tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1781 /* t2 = t0 ^ dest */
1783 t2 = tcg_temp_new();
1784 tcg_gen_xor_i32(t2, t0, dest);
1786 /* t0 = t1 ^ t2 */
1788 tcg_gen_xor_i32(t0, t1, t2);
1791 * t2 = ~t0 & 0x110
1792 * t0 = (t2 >> 2) | (t2 >> 3)
1794 * to fit on 8bit operands, changed in:
1796 * t2 = ~(t0 >> 3) & 0x22
1797 * t0 = t2 + t2
1798 * t0 = t0 + t2
1801 tcg_gen_shri_i32(t2, t0, 3);
1802 tcg_gen_not_i32(t2, t2);
1803 tcg_gen_andi_i32(t2, t2, 0x22);
1804 tcg_gen_add_i32(t0, t2, t2);
1805 tcg_gen_add_i32(t0, t0, t2);
1806 tcg_temp_free(t2);
1808 /* return t1 - t0 */
1810 tcg_gen_sub_i32(dest, t1, t0);
1811 tcg_temp_free(t0);
1812 tcg_temp_free(t1);
1815 static void bcd_flags(TCGv val)
1817 tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1818 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1820 tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1822 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1825 DISAS_INSN(abcd_reg)
1827 TCGv src;
1828 TCGv dest;
1830 gen_flush_flags(s); /* !Z is sticky */
1832 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1833 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1834 bcd_add(dest, src);
1835 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1837 bcd_flags(dest);
1840 DISAS_INSN(abcd_mem)
1842 TCGv src, dest, addr;
1844 gen_flush_flags(s); /* !Z is sticky */
1846 /* Indirect pre-decrement load (mode 4) */
1848 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1849 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1850 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1851 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1853 bcd_add(dest, src);
1855 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1856 EA_STORE, IS_USER(s));
1858 bcd_flags(dest);
1861 DISAS_INSN(sbcd_reg)
1863 TCGv src, dest;
1865 gen_flush_flags(s); /* !Z is sticky */
1867 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1868 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1870 bcd_sub(dest, src);
1872 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1874 bcd_flags(dest);
1877 DISAS_INSN(sbcd_mem)
1879 TCGv src, dest, addr;
1881 gen_flush_flags(s); /* !Z is sticky */
1883 /* Indirect pre-decrement load (mode 4) */
1885 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1886 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1887 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1888 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1890 bcd_sub(dest, src);
1892 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1893 EA_STORE, IS_USER(s));
1895 bcd_flags(dest);
1898 DISAS_INSN(nbcd)
1900 TCGv src, dest;
1901 TCGv addr;
1903 gen_flush_flags(s); /* !Z is sticky */
1905 SRC_EA(env, src, OS_BYTE, 0, &addr);
1907 dest = tcg_const_i32(0);
1908 bcd_sub(dest, src);
1910 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1912 bcd_flags(dest);
1914 tcg_temp_free(dest);
1917 DISAS_INSN(addsub)
1919 TCGv reg;
1920 TCGv dest;
1921 TCGv src;
1922 TCGv tmp;
1923 TCGv addr;
1924 int add;
1925 int opsize;
1927 add = (insn & 0x4000) != 0;
1928 opsize = insn_opsize(insn);
1929 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1930 dest = tcg_temp_new();
1931 if (insn & 0x100) {
1932 SRC_EA(env, tmp, opsize, 1, &addr);
1933 src = reg;
1934 } else {
1935 tmp = reg;
1936 SRC_EA(env, src, opsize, 1, NULL);
1938 if (add) {
1939 tcg_gen_add_i32(dest, tmp, src);
1940 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1941 set_cc_op(s, CC_OP_ADDB + opsize);
1942 } else {
1943 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1944 tcg_gen_sub_i32(dest, tmp, src);
1945 set_cc_op(s, CC_OP_SUBB + opsize);
1947 gen_update_cc_add(dest, src, opsize);
1948 if (insn & 0x100) {
1949 DEST_EA(env, insn, opsize, dest, &addr);
1950 } else {
1951 gen_partset_reg(opsize, DREG(insn, 9), dest);
1953 tcg_temp_free(dest);
1956 /* Reverse the order of the bits in REG. */
1957 DISAS_INSN(bitrev)
1959 TCGv reg;
1960 reg = DREG(insn, 0);
1961 gen_helper_bitrev(reg, reg);
1964 DISAS_INSN(bitop_reg)
1966 int opsize;
1967 int op;
1968 TCGv src1;
1969 TCGv src2;
1970 TCGv tmp;
1971 TCGv addr;
1972 TCGv dest;
1974 if ((insn & 0x38) != 0)
1975 opsize = OS_BYTE;
1976 else
1977 opsize = OS_LONG;
1978 op = (insn >> 6) & 3;
1979 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1981 gen_flush_flags(s);
1982 src2 = tcg_temp_new();
1983 if (opsize == OS_BYTE)
1984 tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1985 else
1986 tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1988 tmp = tcg_const_i32(1);
1989 tcg_gen_shl_i32(tmp, tmp, src2);
1990 tcg_temp_free(src2);
1992 tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1994 dest = tcg_temp_new();
1995 switch (op) {
1996 case 1: /* bchg */
1997 tcg_gen_xor_i32(dest, src1, tmp);
1998 break;
1999 case 2: /* bclr */
2000 tcg_gen_andc_i32(dest, src1, tmp);
2001 break;
2002 case 3: /* bset */
2003 tcg_gen_or_i32(dest, src1, tmp);
2004 break;
2005 default: /* btst */
2006 break;
2008 tcg_temp_free(tmp);
2009 if (op) {
2010 DEST_EA(env, insn, opsize, dest, &addr);
2012 tcg_temp_free(dest);
2015 DISAS_INSN(sats)
2017 TCGv reg;
2018 reg = DREG(insn, 0);
2019 gen_flush_flags(s);
2020 gen_helper_sats(reg, reg, QREG_CC_V);
2021 gen_logic_cc(s, reg, OS_LONG);
2024 static void gen_push(DisasContext *s, TCGv val)
2026 TCGv tmp;
2028 tmp = tcg_temp_new();
2029 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2030 gen_store(s, OS_LONG, tmp, val, IS_USER(s));
2031 tcg_gen_mov_i32(QREG_SP, tmp);
2032 tcg_temp_free(tmp);
2035 static TCGv mreg(int reg)
2037 if (reg < 8) {
2038 /* Dx */
2039 return cpu_dregs[reg];
2041 /* Ax */
2042 return cpu_aregs[reg & 7];
2045 DISAS_INSN(movem)
2047 TCGv addr, incr, tmp, r[16];
2048 int is_load = (insn & 0x0400) != 0;
2049 int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
2050 uint16_t mask = read_im16(env, s);
2051 int mode = extract32(insn, 3, 3);
2052 int reg0 = REG(insn, 0);
2053 int i;
2055 tmp = cpu_aregs[reg0];
2057 switch (mode) {
2058 case 0: /* data register direct */
2059 case 1: /* addr register direct */
2060 do_addr_fault:
2061 gen_addr_fault(s);
2062 return;
2064 case 2: /* indirect */
2065 break;
2067 case 3: /* indirect post-increment */
2068 if (!is_load) {
2069 /* post-increment is not allowed */
2070 goto do_addr_fault;
2072 break;
2074 case 4: /* indirect pre-decrement */
2075 if (is_load) {
2076 /* pre-decrement is not allowed */
2077 goto do_addr_fault;
2080 * We want a bare copy of the address reg, without any pre-decrement
2081 * adjustment, as gen_lea would provide.
2083 break;
2085 default:
2086 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2087 if (IS_NULL_QREG(tmp)) {
2088 goto do_addr_fault;
2090 break;
2093 addr = tcg_temp_new();
2094 tcg_gen_mov_i32(addr, tmp);
2095 incr = tcg_const_i32(opsize_bytes(opsize));
2097 if (is_load) {
2098 /* memory to register */
2099 for (i = 0; i < 16; i++) {
2100 if (mask & (1 << i)) {
2101 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
2102 tcg_gen_add_i32(addr, addr, incr);
2105 for (i = 0; i < 16; i++) {
2106 if (mask & (1 << i)) {
2107 tcg_gen_mov_i32(mreg(i), r[i]);
2108 tcg_temp_free(r[i]);
2111 if (mode == 3) {
2112 /* post-increment: movem (An)+,X */
2113 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2115 } else {
2116 /* register to memory */
2117 if (mode == 4) {
2118 /* pre-decrement: movem X,-(An) */
2119 for (i = 15; i >= 0; i--) {
2120 if ((mask << i) & 0x8000) {
2121 tcg_gen_sub_i32(addr, addr, incr);
2122 if (reg0 + 8 == i &&
2123 m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2125 * M68020+: if the addressing register is the
2126 * register moved to memory, the value written
2127 * is the initial value decremented by the size of
2128 * the operation, regardless of how many actual
2129 * stores have been performed until this point.
2130 * M68000/M68010: the value is the initial value.
2132 tmp = tcg_temp_new();
2133 tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2134 gen_store(s, opsize, addr, tmp, IS_USER(s));
2135 tcg_temp_free(tmp);
2136 } else {
2137 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2141 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2142 } else {
2143 for (i = 0; i < 16; i++) {
2144 if (mask & (1 << i)) {
2145 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2146 tcg_gen_add_i32(addr, addr, incr);
2152 tcg_temp_free(incr);
2153 tcg_temp_free(addr);
2156 DISAS_INSN(movep)
2158 uint8_t i;
2159 int16_t displ;
2160 TCGv reg;
2161 TCGv addr;
2162 TCGv abuf;
2163 TCGv dbuf;
2165 displ = read_im16(env, s);
2167 addr = AREG(insn, 0);
2168 reg = DREG(insn, 9);
2170 abuf = tcg_temp_new();
2171 tcg_gen_addi_i32(abuf, addr, displ);
2172 dbuf = tcg_temp_new();
2174 if (insn & 0x40) {
2175 i = 4;
2176 } else {
2177 i = 2;
2180 if (insn & 0x80) {
2181 for ( ; i > 0 ; i--) {
2182 tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2183 tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
2184 if (i > 1) {
2185 tcg_gen_addi_i32(abuf, abuf, 2);
2188 } else {
2189 for ( ; i > 0 ; i--) {
2190 tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
2191 tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2192 if (i > 1) {
2193 tcg_gen_addi_i32(abuf, abuf, 2);
2197 tcg_temp_free(abuf);
2198 tcg_temp_free(dbuf);
2201 DISAS_INSN(bitop_im)
2203 int opsize;
2204 int op;
2205 TCGv src1;
2206 uint32_t mask;
2207 int bitnum;
2208 TCGv tmp;
2209 TCGv addr;
2211 if ((insn & 0x38) != 0)
2212 opsize = OS_BYTE;
2213 else
2214 opsize = OS_LONG;
2215 op = (insn >> 6) & 3;
2217 bitnum = read_im16(env, s);
2218 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2219 if (bitnum & 0xfe00) {
2220 disas_undef(env, s, insn);
2221 return;
2223 } else {
2224 if (bitnum & 0xff00) {
2225 disas_undef(env, s, insn);
2226 return;
2230 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2232 gen_flush_flags(s);
2233 if (opsize == OS_BYTE)
2234 bitnum &= 7;
2235 else
2236 bitnum &= 31;
2237 mask = 1 << bitnum;
2239 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2241 if (op) {
2242 tmp = tcg_temp_new();
2243 switch (op) {
2244 case 1: /* bchg */
2245 tcg_gen_xori_i32(tmp, src1, mask);
2246 break;
2247 case 2: /* bclr */
2248 tcg_gen_andi_i32(tmp, src1, ~mask);
2249 break;
2250 case 3: /* bset */
2251 tcg_gen_ori_i32(tmp, src1, mask);
2252 break;
2253 default: /* btst */
2254 break;
2256 DEST_EA(env, insn, opsize, tmp, &addr);
2257 tcg_temp_free(tmp);
2261 static TCGv gen_get_ccr(DisasContext *s)
2263 TCGv dest;
2265 update_cc_op(s);
2266 dest = tcg_temp_new();
2267 gen_helper_get_ccr(dest, cpu_env);
2268 return dest;
2271 static TCGv gen_get_sr(DisasContext *s)
2273 TCGv ccr;
2274 TCGv sr;
2276 ccr = gen_get_ccr(s);
2277 sr = tcg_temp_new();
2278 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2279 tcg_gen_or_i32(sr, sr, ccr);
2280 tcg_temp_free(ccr);
2281 return sr;
2284 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2286 if (ccr_only) {
2287 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2288 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2289 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2290 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2291 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2292 } else {
2293 TCGv sr = tcg_const_i32(val);
2294 gen_helper_set_sr(cpu_env, sr);
2295 tcg_temp_free(sr);
2297 set_cc_op(s, CC_OP_FLAGS);
2300 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2302 if (ccr_only) {
2303 gen_helper_set_ccr(cpu_env, val);
2304 } else {
2305 gen_helper_set_sr(cpu_env, val);
2307 set_cc_op(s, CC_OP_FLAGS);
2310 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2311 bool ccr_only)
2313 if ((insn & 0x3f) == 0x3c) {
2314 uint16_t val;
2315 val = read_im16(env, s);
2316 gen_set_sr_im(s, val, ccr_only);
2317 } else {
2318 TCGv src;
2319 SRC_EA(env, src, OS_WORD, 0, NULL);
2320 gen_set_sr(s, src, ccr_only);
2324 DISAS_INSN(arith_im)
2326 int op;
2327 TCGv im;
2328 TCGv src1;
2329 TCGv dest;
2330 TCGv addr;
2331 int opsize;
2332 bool with_SR = ((insn & 0x3f) == 0x3c);
2334 op = (insn >> 9) & 7;
2335 opsize = insn_opsize(insn);
2336 switch (opsize) {
2337 case OS_BYTE:
2338 im = tcg_const_i32((int8_t)read_im8(env, s));
2339 break;
2340 case OS_WORD:
2341 im = tcg_const_i32((int16_t)read_im16(env, s));
2342 break;
2343 case OS_LONG:
2344 im = tcg_const_i32(read_im32(env, s));
2345 break;
2346 default:
2347 g_assert_not_reached();
2350 if (with_SR) {
2351 /* SR/CCR can only be used with andi/eori/ori */
2352 if (op == 2 || op == 3 || op == 6) {
2353 disas_undef(env, s, insn);
2354 return;
2356 switch (opsize) {
2357 case OS_BYTE:
2358 src1 = gen_get_ccr(s);
2359 break;
2360 case OS_WORD:
2361 if (IS_USER(s)) {
2362 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2363 return;
2365 src1 = gen_get_sr(s);
2366 break;
2367 default:
2368 /* OS_LONG; others already g_assert_not_reached. */
2369 disas_undef(env, s, insn);
2370 return;
2372 } else {
2373 SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2375 dest = tcg_temp_new();
2376 switch (op) {
2377 case 0: /* ori */
2378 tcg_gen_or_i32(dest, src1, im);
2379 if (with_SR) {
2380 gen_set_sr(s, dest, opsize == OS_BYTE);
2381 } else {
2382 DEST_EA(env, insn, opsize, dest, &addr);
2383 gen_logic_cc(s, dest, opsize);
2385 break;
2386 case 1: /* andi */
2387 tcg_gen_and_i32(dest, src1, im);
2388 if (with_SR) {
2389 gen_set_sr(s, dest, opsize == OS_BYTE);
2390 } else {
2391 DEST_EA(env, insn, opsize, dest, &addr);
2392 gen_logic_cc(s, dest, opsize);
2394 break;
2395 case 2: /* subi */
2396 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2397 tcg_gen_sub_i32(dest, src1, im);
2398 gen_update_cc_add(dest, im, opsize);
2399 set_cc_op(s, CC_OP_SUBB + opsize);
2400 DEST_EA(env, insn, opsize, dest, &addr);
2401 break;
2402 case 3: /* addi */
2403 tcg_gen_add_i32(dest, src1, im);
2404 gen_update_cc_add(dest, im, opsize);
2405 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2406 set_cc_op(s, CC_OP_ADDB + opsize);
2407 DEST_EA(env, insn, opsize, dest, &addr);
2408 break;
2409 case 5: /* eori */
2410 tcg_gen_xor_i32(dest, src1, im);
2411 if (with_SR) {
2412 gen_set_sr(s, dest, opsize == OS_BYTE);
2413 } else {
2414 DEST_EA(env, insn, opsize, dest, &addr);
2415 gen_logic_cc(s, dest, opsize);
2417 break;
2418 case 6: /* cmpi */
2419 gen_update_cc_cmp(s, src1, im, opsize);
2420 break;
2421 default:
2422 abort();
2424 tcg_temp_free(im);
2425 tcg_temp_free(dest);
2428 DISAS_INSN(cas)
2430 int opsize;
2431 TCGv addr;
2432 uint16_t ext;
2433 TCGv load;
2434 TCGv cmp;
2435 MemOp opc;
2437 switch ((insn >> 9) & 3) {
2438 case 1:
2439 opsize = OS_BYTE;
2440 opc = MO_SB;
2441 break;
2442 case 2:
2443 opsize = OS_WORD;
2444 opc = MO_TESW;
2445 break;
2446 case 3:
2447 opsize = OS_LONG;
2448 opc = MO_TESL;
2449 break;
2450 default:
2451 g_assert_not_reached();
2454 ext = read_im16(env, s);
2456 /* cas Dc,Du,<EA> */
2458 addr = gen_lea(env, s, insn, opsize);
2459 if (IS_NULL_QREG(addr)) {
2460 gen_addr_fault(s);
2461 return;
2464 cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2467 * if <EA> == Dc then
2468 * <EA> = Du
2469 * Dc = <EA> (because <EA> == Dc)
2470 * else
2471 * Dc = <EA>
2474 load = tcg_temp_new();
2475 tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2476 IS_USER(s), opc);
2477 /* update flags before setting cmp to load */
2478 gen_update_cc_cmp(s, load, cmp, opsize);
2479 gen_partset_reg(opsize, DREG(ext, 0), load);
2481 tcg_temp_free(load);
2483 switch (extract32(insn, 3, 3)) {
2484 case 3: /* Indirect postincrement. */
2485 tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2486 break;
2487 case 4: /* Indirect predecrememnt. */
2488 tcg_gen_mov_i32(AREG(insn, 0), addr);
2489 break;
2493 DISAS_INSN(cas2w)
2495 uint16_t ext1, ext2;
2496 TCGv addr1, addr2;
2497 TCGv regs;
2499 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2501 ext1 = read_im16(env, s);
2503 if (ext1 & 0x8000) {
2504 /* Address Register */
2505 addr1 = AREG(ext1, 12);
2506 } else {
2507 /* Data Register */
2508 addr1 = DREG(ext1, 12);
2511 ext2 = read_im16(env, s);
2512 if (ext2 & 0x8000) {
2513 /* Address Register */
2514 addr2 = AREG(ext2, 12);
2515 } else {
2516 /* Data Register */
2517 addr2 = DREG(ext2, 12);
2521 * if (R1) == Dc1 && (R2) == Dc2 then
2522 * (R1) = Du1
2523 * (R2) = Du2
2524 * else
2525 * Dc1 = (R1)
2526 * Dc2 = (R2)
2529 regs = tcg_const_i32(REG(ext2, 6) |
2530 (REG(ext1, 6) << 3) |
2531 (REG(ext2, 0) << 6) |
2532 (REG(ext1, 0) << 9));
2533 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2534 gen_helper_exit_atomic(cpu_env);
2535 } else {
2536 gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2538 tcg_temp_free(regs);
2540 /* Note that cas2w also assigned to env->cc_op. */
2541 s->cc_op = CC_OP_CMPW;
2542 s->cc_op_synced = 1;
2545 DISAS_INSN(cas2l)
2547 uint16_t ext1, ext2;
2548 TCGv addr1, addr2, regs;
2550 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2552 ext1 = read_im16(env, s);
2554 if (ext1 & 0x8000) {
2555 /* Address Register */
2556 addr1 = AREG(ext1, 12);
2557 } else {
2558 /* Data Register */
2559 addr1 = DREG(ext1, 12);
2562 ext2 = read_im16(env, s);
2563 if (ext2 & 0x8000) {
2564 /* Address Register */
2565 addr2 = AREG(ext2, 12);
2566 } else {
2567 /* Data Register */
2568 addr2 = DREG(ext2, 12);
2572 * if (R1) == Dc1 && (R2) == Dc2 then
2573 * (R1) = Du1
2574 * (R2) = Du2
2575 * else
2576 * Dc1 = (R1)
2577 * Dc2 = (R2)
2580 regs = tcg_const_i32(REG(ext2, 6) |
2581 (REG(ext1, 6) << 3) |
2582 (REG(ext2, 0) << 6) |
2583 (REG(ext1, 0) << 9));
2584 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2585 gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2586 } else {
2587 gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2589 tcg_temp_free(regs);
2591 /* Note that cas2l also assigned to env->cc_op. */
2592 s->cc_op = CC_OP_CMPL;
2593 s->cc_op_synced = 1;
2596 DISAS_INSN(byterev)
2598 TCGv reg;
2600 reg = DREG(insn, 0);
2601 tcg_gen_bswap32_i32(reg, reg);
2604 DISAS_INSN(move)
2606 TCGv src;
2607 TCGv dest;
2608 int op;
2609 int opsize;
2611 switch (insn >> 12) {
2612 case 1: /* move.b */
2613 opsize = OS_BYTE;
2614 break;
2615 case 2: /* move.l */
2616 opsize = OS_LONG;
2617 break;
2618 case 3: /* move.w */
2619 opsize = OS_WORD;
2620 break;
2621 default:
2622 abort();
2624 SRC_EA(env, src, opsize, 1, NULL);
2625 op = (insn >> 6) & 7;
2626 if (op == 1) {
2627 /* movea */
2628 /* The value will already have been sign extended. */
2629 dest = AREG(insn, 9);
2630 tcg_gen_mov_i32(dest, src);
2631 } else {
2632 /* normal move */
2633 uint16_t dest_ea;
2634 dest_ea = ((insn >> 9) & 7) | (op << 3);
2635 DEST_EA(env, dest_ea, opsize, src, NULL);
2636 /* This will be correct because loads sign extend. */
2637 gen_logic_cc(s, src, opsize);
2641 DISAS_INSN(negx)
2643 TCGv z;
2644 TCGv src;
2645 TCGv addr;
2646 int opsize;
2648 opsize = insn_opsize(insn);
2649 SRC_EA(env, src, opsize, 1, &addr);
2651 gen_flush_flags(s); /* compute old Z */
2654 * Perform subtract with borrow.
2655 * (X, N) = -(src + X);
2658 z = tcg_const_i32(0);
2659 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2660 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2661 tcg_temp_free(z);
2662 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2664 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2667 * Compute signed-overflow for negation. The normal formula for
2668 * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2669 * this simplifies to res & src.
2672 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2674 /* Copy the rest of the results into place. */
2675 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2676 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2678 set_cc_op(s, CC_OP_FLAGS);
2680 /* result is in QREG_CC_N */
2682 DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2685 DISAS_INSN(lea)
2687 TCGv reg;
2688 TCGv tmp;
2690 reg = AREG(insn, 9);
2691 tmp = gen_lea(env, s, insn, OS_LONG);
2692 if (IS_NULL_QREG(tmp)) {
2693 gen_addr_fault(s);
2694 return;
2696 tcg_gen_mov_i32(reg, tmp);
2699 DISAS_INSN(clr)
2701 int opsize;
2702 TCGv zero;
2704 zero = tcg_const_i32(0);
2706 opsize = insn_opsize(insn);
2707 DEST_EA(env, insn, opsize, zero, NULL);
2708 gen_logic_cc(s, zero, opsize);
2709 tcg_temp_free(zero);
2712 DISAS_INSN(move_from_ccr)
2714 TCGv ccr;
2716 ccr = gen_get_ccr(s);
2717 DEST_EA(env, insn, OS_WORD, ccr, NULL);
2720 DISAS_INSN(neg)
2722 TCGv src1;
2723 TCGv dest;
2724 TCGv addr;
2725 int opsize;
2727 opsize = insn_opsize(insn);
2728 SRC_EA(env, src1, opsize, 1, &addr);
2729 dest = tcg_temp_new();
2730 tcg_gen_neg_i32(dest, src1);
2731 set_cc_op(s, CC_OP_SUBB + opsize);
2732 gen_update_cc_add(dest, src1, opsize);
2733 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2734 DEST_EA(env, insn, opsize, dest, &addr);
2735 tcg_temp_free(dest);
2738 DISAS_INSN(move_to_ccr)
2740 gen_move_to_sr(env, s, insn, true);
2743 DISAS_INSN(not)
2745 TCGv src1;
2746 TCGv dest;
2747 TCGv addr;
2748 int opsize;
2750 opsize = insn_opsize(insn);
2751 SRC_EA(env, src1, opsize, 1, &addr);
2752 dest = tcg_temp_new();
2753 tcg_gen_not_i32(dest, src1);
2754 DEST_EA(env, insn, opsize, dest, &addr);
2755 gen_logic_cc(s, dest, opsize);
2758 DISAS_INSN(swap)
2760 TCGv src1;
2761 TCGv src2;
2762 TCGv reg;
2764 src1 = tcg_temp_new();
2765 src2 = tcg_temp_new();
2766 reg = DREG(insn, 0);
2767 tcg_gen_shli_i32(src1, reg, 16);
2768 tcg_gen_shri_i32(src2, reg, 16);
2769 tcg_gen_or_i32(reg, src1, src2);
2770 tcg_temp_free(src2);
2771 tcg_temp_free(src1);
2772 gen_logic_cc(s, reg, OS_LONG);
2775 DISAS_INSN(bkpt)
2777 gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2780 DISAS_INSN(pea)
2782 TCGv tmp;
2784 tmp = gen_lea(env, s, insn, OS_LONG);
2785 if (IS_NULL_QREG(tmp)) {
2786 gen_addr_fault(s);
2787 return;
2789 gen_push(s, tmp);
2792 DISAS_INSN(ext)
2794 int op;
2795 TCGv reg;
2796 TCGv tmp;
2798 reg = DREG(insn, 0);
2799 op = (insn >> 6) & 7;
2800 tmp = tcg_temp_new();
2801 if (op == 3)
2802 tcg_gen_ext16s_i32(tmp, reg);
2803 else
2804 tcg_gen_ext8s_i32(tmp, reg);
2805 if (op == 2)
2806 gen_partset_reg(OS_WORD, reg, tmp);
2807 else
2808 tcg_gen_mov_i32(reg, tmp);
2809 gen_logic_cc(s, tmp, OS_LONG);
2810 tcg_temp_free(tmp);
2813 DISAS_INSN(tst)
2815 int opsize;
2816 TCGv tmp;
2818 opsize = insn_opsize(insn);
2819 SRC_EA(env, tmp, opsize, 1, NULL);
2820 gen_logic_cc(s, tmp, opsize);
2823 DISAS_INSN(pulse)
2825 /* Implemented as a NOP. */
2828 DISAS_INSN(illegal)
2830 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2833 /* ??? This should be atomic. */
2834 DISAS_INSN(tas)
2836 TCGv dest;
2837 TCGv src1;
2838 TCGv addr;
2840 dest = tcg_temp_new();
2841 SRC_EA(env, src1, OS_BYTE, 1, &addr);
2842 gen_logic_cc(s, src1, OS_BYTE);
2843 tcg_gen_ori_i32(dest, src1, 0x80);
2844 DEST_EA(env, insn, OS_BYTE, dest, &addr);
2845 tcg_temp_free(dest);
2848 DISAS_INSN(mull)
2850 uint16_t ext;
2851 TCGv src1;
2852 int sign;
2854 ext = read_im16(env, s);
2856 sign = ext & 0x800;
2858 if (ext & 0x400) {
2859 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2860 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2861 return;
2864 SRC_EA(env, src1, OS_LONG, 0, NULL);
2866 if (sign) {
2867 tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2868 } else {
2869 tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2871 /* if Dl == Dh, 68040 returns low word */
2872 tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2873 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2874 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2876 tcg_gen_movi_i32(QREG_CC_V, 0);
2877 tcg_gen_movi_i32(QREG_CC_C, 0);
2879 set_cc_op(s, CC_OP_FLAGS);
2880 return;
2882 SRC_EA(env, src1, OS_LONG, 0, NULL);
2883 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2884 tcg_gen_movi_i32(QREG_CC_C, 0);
2885 if (sign) {
2886 tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2887 /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2888 tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2889 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2890 } else {
2891 tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2892 /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2893 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2895 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2896 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2898 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2900 set_cc_op(s, CC_OP_FLAGS);
2901 } else {
2903 * The upper 32 bits of the product are discarded, so
2904 * muls.l and mulu.l are functionally equivalent.
2906 tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2907 gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2911 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2913 TCGv reg;
2914 TCGv tmp;
2916 reg = AREG(insn, 0);
2917 tmp = tcg_temp_new();
2918 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2919 gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2920 if ((insn & 7) != 7) {
2921 tcg_gen_mov_i32(reg, tmp);
2923 tcg_gen_addi_i32(QREG_SP, tmp, offset);
2924 tcg_temp_free(tmp);
2927 DISAS_INSN(link)
2929 int16_t offset;
2931 offset = read_im16(env, s);
2932 gen_link(s, insn, offset);
2935 DISAS_INSN(linkl)
2937 int32_t offset;
2939 offset = read_im32(env, s);
2940 gen_link(s, insn, offset);
2943 DISAS_INSN(unlk)
2945 TCGv src;
2946 TCGv reg;
2947 TCGv tmp;
2949 src = tcg_temp_new();
2950 reg = AREG(insn, 0);
2951 tcg_gen_mov_i32(src, reg);
2952 tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2953 tcg_gen_mov_i32(reg, tmp);
2954 tcg_gen_addi_i32(QREG_SP, src, 4);
2955 tcg_temp_free(src);
2956 tcg_temp_free(tmp);
2959 #if defined(CONFIG_SOFTMMU)
2960 DISAS_INSN(reset)
2962 if (IS_USER(s)) {
2963 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2964 return;
2967 gen_helper_reset(cpu_env);
2969 #endif
2971 DISAS_INSN(nop)
2975 DISAS_INSN(rtd)
2977 TCGv tmp;
2978 int16_t offset = read_im16(env, s);
2980 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2981 tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2982 gen_jmp(s, tmp);
2985 DISAS_INSN(rtr)
2987 TCGv tmp;
2988 TCGv ccr;
2989 TCGv sp;
2991 sp = tcg_temp_new();
2992 ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2993 tcg_gen_addi_i32(sp, QREG_SP, 2);
2994 tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
2995 tcg_gen_addi_i32(QREG_SP, sp, 4);
2996 tcg_temp_free(sp);
2998 gen_set_sr(s, ccr, true);
2999 tcg_temp_free(ccr);
3001 gen_jmp(s, tmp);
3004 DISAS_INSN(rts)
3006 TCGv tmp;
3008 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
3009 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
3010 gen_jmp(s, tmp);
3013 DISAS_INSN(jump)
3015 TCGv tmp;
3018 * Load the target address first to ensure correct exception
3019 * behavior.
3021 tmp = gen_lea(env, s, insn, OS_LONG);
3022 if (IS_NULL_QREG(tmp)) {
3023 gen_addr_fault(s);
3024 return;
3026 if ((insn & 0x40) == 0) {
3027 /* jsr */
3028 gen_push(s, tcg_const_i32(s->pc));
3030 gen_jmp(s, tmp);
3033 DISAS_INSN(addsubq)
3035 TCGv src;
3036 TCGv dest;
3037 TCGv val;
3038 int imm;
3039 TCGv addr;
3040 int opsize;
3042 if ((insn & 070) == 010) {
3043 /* Operation on address register is always long. */
3044 opsize = OS_LONG;
3045 } else {
3046 opsize = insn_opsize(insn);
3048 SRC_EA(env, src, opsize, 1, &addr);
3049 imm = (insn >> 9) & 7;
3050 if (imm == 0) {
3051 imm = 8;
3053 val = tcg_const_i32(imm);
3054 dest = tcg_temp_new();
3055 tcg_gen_mov_i32(dest, src);
3056 if ((insn & 0x38) == 0x08) {
3058 * Don't update condition codes if the destination is an
3059 * address register.
3061 if (insn & 0x0100) {
3062 tcg_gen_sub_i32(dest, dest, val);
3063 } else {
3064 tcg_gen_add_i32(dest, dest, val);
3066 } else {
3067 if (insn & 0x0100) {
3068 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
3069 tcg_gen_sub_i32(dest, dest, val);
3070 set_cc_op(s, CC_OP_SUBB + opsize);
3071 } else {
3072 tcg_gen_add_i32(dest, dest, val);
3073 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
3074 set_cc_op(s, CC_OP_ADDB + opsize);
3076 gen_update_cc_add(dest, val, opsize);
3078 tcg_temp_free(val);
3079 DEST_EA(env, insn, opsize, dest, &addr);
3080 tcg_temp_free(dest);
3083 DISAS_INSN(tpf)
3085 switch (insn & 7) {
3086 case 2: /* One extension word. */
3087 s->pc += 2;
3088 break;
3089 case 3: /* Two extension words. */
3090 s->pc += 4;
3091 break;
3092 case 4: /* No extension words. */
3093 break;
3094 default:
3095 disas_undef(env, s, insn);
3099 DISAS_INSN(branch)
3101 int32_t offset;
3102 uint32_t base;
3103 int op;
3105 base = s->pc;
3106 op = (insn >> 8) & 0xf;
3107 offset = (int8_t)insn;
3108 if (offset == 0) {
3109 offset = (int16_t)read_im16(env, s);
3110 } else if (offset == -1) {
3111 offset = read_im32(env, s);
3113 if (op == 1) {
3114 /* bsr */
3115 gen_push(s, tcg_const_i32(s->pc));
3117 if (op > 1) {
3118 /* Bcc */
3119 TCGLabel *l1 = gen_new_label();
3120 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
3121 gen_jmp_tb(s, 1, base + offset);
3122 gen_set_label(l1);
3123 gen_jmp_tb(s, 0, s->pc);
3124 } else {
3125 /* Unconditional branch. */
3126 update_cc_op(s);
3127 gen_jmp_tb(s, 0, base + offset);
3131 DISAS_INSN(moveq)
3133 tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3134 gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3137 DISAS_INSN(mvzs)
3139 int opsize;
3140 TCGv src;
3141 TCGv reg;
3143 if (insn & 0x40)
3144 opsize = OS_WORD;
3145 else
3146 opsize = OS_BYTE;
3147 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3148 reg = DREG(insn, 9);
3149 tcg_gen_mov_i32(reg, src);
3150 gen_logic_cc(s, src, opsize);
3153 DISAS_INSN(or)
3155 TCGv reg;
3156 TCGv dest;
3157 TCGv src;
3158 TCGv addr;
3159 int opsize;
3161 opsize = insn_opsize(insn);
3162 reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3163 dest = tcg_temp_new();
3164 if (insn & 0x100) {
3165 SRC_EA(env, src, opsize, 0, &addr);
3166 tcg_gen_or_i32(dest, src, reg);
3167 DEST_EA(env, insn, opsize, dest, &addr);
3168 } else {
3169 SRC_EA(env, src, opsize, 0, NULL);
3170 tcg_gen_or_i32(dest, src, reg);
3171 gen_partset_reg(opsize, DREG(insn, 9), dest);
3173 gen_logic_cc(s, dest, opsize);
3174 tcg_temp_free(dest);
3177 DISAS_INSN(suba)
3179 TCGv src;
3180 TCGv reg;
3182 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3183 reg = AREG(insn, 9);
3184 tcg_gen_sub_i32(reg, reg, src);
3187 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3189 TCGv tmp;
3191 gen_flush_flags(s); /* compute old Z */
3194 * Perform subtract with borrow.
3195 * (X, N) = dest - (src + X);
3198 tmp = tcg_const_i32(0);
3199 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
3200 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
3201 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3202 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3204 /* Compute signed-overflow for subtract. */
3206 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3207 tcg_gen_xor_i32(tmp, dest, src);
3208 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3209 tcg_temp_free(tmp);
3211 /* Copy the rest of the results into place. */
3212 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3213 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3215 set_cc_op(s, CC_OP_FLAGS);
3217 /* result is in QREG_CC_N */
3220 DISAS_INSN(subx_reg)
3222 TCGv dest;
3223 TCGv src;
3224 int opsize;
3226 opsize = insn_opsize(insn);
3228 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3229 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3231 gen_subx(s, src, dest, opsize);
3233 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3236 DISAS_INSN(subx_mem)
3238 TCGv src;
3239 TCGv addr_src;
3240 TCGv dest;
3241 TCGv addr_dest;
3242 int opsize;
3244 opsize = insn_opsize(insn);
3246 addr_src = AREG(insn, 0);
3247 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3248 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3250 addr_dest = AREG(insn, 9);
3251 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3252 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3254 gen_subx(s, src, dest, opsize);
3256 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3258 tcg_temp_free(dest);
3259 tcg_temp_free(src);
3262 DISAS_INSN(mov3q)
3264 TCGv src;
3265 int val;
3267 val = (insn >> 9) & 7;
3268 if (val == 0)
3269 val = -1;
3270 src = tcg_const_i32(val);
3271 gen_logic_cc(s, src, OS_LONG);
3272 DEST_EA(env, insn, OS_LONG, src, NULL);
3273 tcg_temp_free(src);
3276 DISAS_INSN(cmp)
3278 TCGv src;
3279 TCGv reg;
3280 int opsize;
3282 opsize = insn_opsize(insn);
3283 SRC_EA(env, src, opsize, 1, NULL);
3284 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3285 gen_update_cc_cmp(s, reg, src, opsize);
3288 DISAS_INSN(cmpa)
3290 int opsize;
3291 TCGv src;
3292 TCGv reg;
3294 if (insn & 0x100) {
3295 opsize = OS_LONG;
3296 } else {
3297 opsize = OS_WORD;
3299 SRC_EA(env, src, opsize, 1, NULL);
3300 reg = AREG(insn, 9);
3301 gen_update_cc_cmp(s, reg, src, OS_LONG);
3304 DISAS_INSN(cmpm)
3306 int opsize = insn_opsize(insn);
3307 TCGv src, dst;
3309 /* Post-increment load (mode 3) from Ay. */
3310 src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3311 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3312 /* Post-increment load (mode 3) from Ax. */
3313 dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3314 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3316 gen_update_cc_cmp(s, dst, src, opsize);
3319 DISAS_INSN(eor)
3321 TCGv src;
3322 TCGv dest;
3323 TCGv addr;
3324 int opsize;
3326 opsize = insn_opsize(insn);
3328 SRC_EA(env, src, opsize, 0, &addr);
3329 dest = tcg_temp_new();
3330 tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3331 gen_logic_cc(s, dest, opsize);
3332 DEST_EA(env, insn, opsize, dest, &addr);
3333 tcg_temp_free(dest);
3336 static void do_exg(TCGv reg1, TCGv reg2)
3338 TCGv temp = tcg_temp_new();
3339 tcg_gen_mov_i32(temp, reg1);
3340 tcg_gen_mov_i32(reg1, reg2);
3341 tcg_gen_mov_i32(reg2, temp);
3342 tcg_temp_free(temp);
3345 DISAS_INSN(exg_dd)
3347 /* exchange Dx and Dy */
3348 do_exg(DREG(insn, 9), DREG(insn, 0));
3351 DISAS_INSN(exg_aa)
3353 /* exchange Ax and Ay */
3354 do_exg(AREG(insn, 9), AREG(insn, 0));
3357 DISAS_INSN(exg_da)
3359 /* exchange Dx and Ay */
3360 do_exg(DREG(insn, 9), AREG(insn, 0));
3363 DISAS_INSN(and)
3365 TCGv src;
3366 TCGv reg;
3367 TCGv dest;
3368 TCGv addr;
3369 int opsize;
3371 dest = tcg_temp_new();
3373 opsize = insn_opsize(insn);
3374 reg = DREG(insn, 9);
3375 if (insn & 0x100) {
3376 SRC_EA(env, src, opsize, 0, &addr);
3377 tcg_gen_and_i32(dest, src, reg);
3378 DEST_EA(env, insn, opsize, dest, &addr);
3379 } else {
3380 SRC_EA(env, src, opsize, 0, NULL);
3381 tcg_gen_and_i32(dest, src, reg);
3382 gen_partset_reg(opsize, reg, dest);
3384 gen_logic_cc(s, dest, opsize);
3385 tcg_temp_free(dest);
3388 DISAS_INSN(adda)
3390 TCGv src;
3391 TCGv reg;
3393 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3394 reg = AREG(insn, 9);
3395 tcg_gen_add_i32(reg, reg, src);
3398 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3400 TCGv tmp;
3402 gen_flush_flags(s); /* compute old Z */
3405 * Perform addition with carry.
3406 * (X, N) = src + dest + X;
3409 tmp = tcg_const_i32(0);
3410 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3411 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3412 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3414 /* Compute signed-overflow for addition. */
3416 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3417 tcg_gen_xor_i32(tmp, dest, src);
3418 tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3419 tcg_temp_free(tmp);
3421 /* Copy the rest of the results into place. */
3422 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3423 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3425 set_cc_op(s, CC_OP_FLAGS);
3427 /* result is in QREG_CC_N */
3430 DISAS_INSN(addx_reg)
3432 TCGv dest;
3433 TCGv src;
3434 int opsize;
3436 opsize = insn_opsize(insn);
3438 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3439 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3441 gen_addx(s, src, dest, opsize);
3443 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3446 DISAS_INSN(addx_mem)
3448 TCGv src;
3449 TCGv addr_src;
3450 TCGv dest;
3451 TCGv addr_dest;
3452 int opsize;
3454 opsize = insn_opsize(insn);
3456 addr_src = AREG(insn, 0);
3457 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3458 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3460 addr_dest = AREG(insn, 9);
3461 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3462 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3464 gen_addx(s, src, dest, opsize);
3466 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3468 tcg_temp_free(dest);
3469 tcg_temp_free(src);
3472 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3474 int count = (insn >> 9) & 7;
3475 int logical = insn & 8;
3476 int left = insn & 0x100;
3477 int bits = opsize_bytes(opsize) * 8;
3478 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3480 if (count == 0) {
3481 count = 8;
3484 tcg_gen_movi_i32(QREG_CC_V, 0);
3485 if (left) {
3486 tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3487 tcg_gen_shli_i32(QREG_CC_N, reg, count);
3490 * Note that ColdFire always clears V (done above),
3491 * while M68000 sets if the most significant bit is changed at
3492 * any time during the shift operation.
3494 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3495 /* if shift count >= bits, V is (reg != 0) */
3496 if (count >= bits) {
3497 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3498 } else {
3499 TCGv t0 = tcg_temp_new();
3500 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3501 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3502 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3503 tcg_temp_free(t0);
3505 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3507 } else {
3508 tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3509 if (logical) {
3510 tcg_gen_shri_i32(QREG_CC_N, reg, count);
3511 } else {
3512 tcg_gen_sari_i32(QREG_CC_N, reg, count);
3516 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3517 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3518 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3519 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3521 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3522 set_cc_op(s, CC_OP_FLAGS);
3525 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3527 int logical = insn & 8;
3528 int left = insn & 0x100;
3529 int bits = opsize_bytes(opsize) * 8;
3530 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3531 TCGv s32;
3532 TCGv_i64 t64, s64;
3534 t64 = tcg_temp_new_i64();
3535 s64 = tcg_temp_new_i64();
3536 s32 = tcg_temp_new();
3539 * Note that m68k truncates the shift count modulo 64, not 32.
3540 * In addition, a 64-bit shift makes it easy to find "the last
3541 * bit shifted out", for the carry flag.
3543 tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3544 tcg_gen_extu_i32_i64(s64, s32);
3545 tcg_gen_extu_i32_i64(t64, reg);
3547 /* Optimistically set V=0. Also used as a zero source below. */
3548 tcg_gen_movi_i32(QREG_CC_V, 0);
3549 if (left) {
3550 tcg_gen_shl_i64(t64, t64, s64);
3552 if (opsize == OS_LONG) {
3553 tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3554 /* Note that C=0 if shift count is 0, and we get that for free. */
3555 } else {
3556 TCGv zero = tcg_const_i32(0);
3557 tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3558 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3559 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3560 s32, zero, zero, QREG_CC_C);
3561 tcg_temp_free(zero);
3563 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3565 /* X = C, but only if the shift count was non-zero. */
3566 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3567 QREG_CC_C, QREG_CC_X);
3570 * M68000 sets V if the most significant bit is changed at
3571 * any time during the shift operation. Do this via creating
3572 * an extension of the sign bit, comparing, and discarding
3573 * the bits below the sign bit. I.e.
3574 * int64_t s = (intN_t)reg;
3575 * int64_t t = (int64_t)(intN_t)reg << count;
3576 * V = ((s ^ t) & (-1 << (bits - 1))) != 0
3578 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3579 TCGv_i64 tt = tcg_const_i64(32);
3580 /* if shift is greater than 32, use 32 */
3581 tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3582 tcg_temp_free_i64(tt);
3583 /* Sign extend the input to 64 bits; re-do the shift. */
3584 tcg_gen_ext_i32_i64(t64, reg);
3585 tcg_gen_shl_i64(s64, t64, s64);
3586 /* Clear all bits that are unchanged. */
3587 tcg_gen_xor_i64(t64, t64, s64);
3588 /* Ignore the bits below the sign bit. */
3589 tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3590 /* If any bits remain set, we have overflow. */
3591 tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3592 tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3593 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3595 } else {
3596 tcg_gen_shli_i64(t64, t64, 32);
3597 if (logical) {
3598 tcg_gen_shr_i64(t64, t64, s64);
3599 } else {
3600 tcg_gen_sar_i64(t64, t64, s64);
3602 tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3604 /* Note that C=0 if shift count is 0, and we get that for free. */
3605 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3607 /* X = C, but only if the shift count was non-zero. */
3608 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3609 QREG_CC_C, QREG_CC_X);
3611 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3612 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3614 tcg_temp_free(s32);
3615 tcg_temp_free_i64(s64);
3616 tcg_temp_free_i64(t64);
3618 /* Write back the result. */
3619 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3620 set_cc_op(s, CC_OP_FLAGS);
3623 DISAS_INSN(shift8_im)
3625 shift_im(s, insn, OS_BYTE);
3628 DISAS_INSN(shift16_im)
3630 shift_im(s, insn, OS_WORD);
3633 DISAS_INSN(shift_im)
3635 shift_im(s, insn, OS_LONG);
3638 DISAS_INSN(shift8_reg)
3640 shift_reg(s, insn, OS_BYTE);
3643 DISAS_INSN(shift16_reg)
3645 shift_reg(s, insn, OS_WORD);
3648 DISAS_INSN(shift_reg)
3650 shift_reg(s, insn, OS_LONG);
3653 DISAS_INSN(shift_mem)
3655 int logical = insn & 8;
3656 int left = insn & 0x100;
3657 TCGv src;
3658 TCGv addr;
3660 SRC_EA(env, src, OS_WORD, !logical, &addr);
3661 tcg_gen_movi_i32(QREG_CC_V, 0);
3662 if (left) {
3663 tcg_gen_shri_i32(QREG_CC_C, src, 15);
3664 tcg_gen_shli_i32(QREG_CC_N, src, 1);
3667 * Note that ColdFire always clears V,
3668 * while M68000 sets if the most significant bit is changed at
3669 * any time during the shift operation
3671 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3672 src = gen_extend(s, src, OS_WORD, 1);
3673 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3675 } else {
3676 tcg_gen_mov_i32(QREG_CC_C, src);
3677 if (logical) {
3678 tcg_gen_shri_i32(QREG_CC_N, src, 1);
3679 } else {
3680 tcg_gen_sari_i32(QREG_CC_N, src, 1);
3684 gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3685 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3686 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3687 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3689 DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3690 set_cc_op(s, CC_OP_FLAGS);
3693 static void rotate(TCGv reg, TCGv shift, int left, int size)
3695 switch (size) {
3696 case 8:
3697 /* Replicate the 8-bit input so that a 32-bit rotate works. */
3698 tcg_gen_ext8u_i32(reg, reg);
3699 tcg_gen_muli_i32(reg, reg, 0x01010101);
3700 goto do_long;
3701 case 16:
3702 /* Replicate the 16-bit input so that a 32-bit rotate works. */
3703 tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3704 goto do_long;
3705 do_long:
3706 default:
3707 if (left) {
3708 tcg_gen_rotl_i32(reg, reg, shift);
3709 } else {
3710 tcg_gen_rotr_i32(reg, reg, shift);
3714 /* compute flags */
3716 switch (size) {
3717 case 8:
3718 tcg_gen_ext8s_i32(reg, reg);
3719 break;
3720 case 16:
3721 tcg_gen_ext16s_i32(reg, reg);
3722 break;
3723 default:
3724 break;
3727 /* QREG_CC_X is not affected */
3729 tcg_gen_mov_i32(QREG_CC_N, reg);
3730 tcg_gen_mov_i32(QREG_CC_Z, reg);
3732 if (left) {
3733 tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3734 } else {
3735 tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3738 tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3741 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3743 switch (size) {
3744 case 8:
3745 tcg_gen_ext8s_i32(reg, reg);
3746 break;
3747 case 16:
3748 tcg_gen_ext16s_i32(reg, reg);
3749 break;
3750 default:
3751 break;
3753 tcg_gen_mov_i32(QREG_CC_N, reg);
3754 tcg_gen_mov_i32(QREG_CC_Z, reg);
3755 tcg_gen_mov_i32(QREG_CC_X, X);
3756 tcg_gen_mov_i32(QREG_CC_C, X);
3757 tcg_gen_movi_i32(QREG_CC_V, 0);
3760 /* Result of rotate_x() is valid if 0 <= shift <= size */
3761 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3763 TCGv X, shl, shr, shx, sz, zero;
3765 sz = tcg_const_i32(size);
3767 shr = tcg_temp_new();
3768 shl = tcg_temp_new();
3769 shx = tcg_temp_new();
3770 if (left) {
3771 tcg_gen_mov_i32(shl, shift); /* shl = shift */
3772 tcg_gen_movi_i32(shr, size + 1);
3773 tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3774 tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
3775 /* shx = shx < 0 ? size : shx; */
3776 zero = tcg_const_i32(0);
3777 tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3778 tcg_temp_free(zero);
3779 } else {
3780 tcg_gen_mov_i32(shr, shift); /* shr = shift */
3781 tcg_gen_movi_i32(shl, size + 1);
3782 tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3783 tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3785 tcg_temp_free_i32(sz);
3787 /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3789 tcg_gen_shl_i32(shl, reg, shl);
3790 tcg_gen_shr_i32(shr, reg, shr);
3791 tcg_gen_or_i32(reg, shl, shr);
3792 tcg_temp_free(shl);
3793 tcg_temp_free(shr);
3794 tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3795 tcg_gen_or_i32(reg, reg, shx);
3796 tcg_temp_free(shx);
3798 /* X = (reg >> size) & 1 */
3800 X = tcg_temp_new();
3801 tcg_gen_extract_i32(X, reg, size, 1);
3803 return X;
3806 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3807 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3809 TCGv_i64 t0, shift64;
3810 TCGv X, lo, hi, zero;
3812 shift64 = tcg_temp_new_i64();
3813 tcg_gen_extu_i32_i64(shift64, shift);
3815 t0 = tcg_temp_new_i64();
3817 X = tcg_temp_new();
3818 lo = tcg_temp_new();
3819 hi = tcg_temp_new();
3821 if (left) {
3822 /* create [reg:X:..] */
3824 tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3825 tcg_gen_concat_i32_i64(t0, lo, reg);
3827 /* rotate */
3829 tcg_gen_rotl_i64(t0, t0, shift64);
3830 tcg_temp_free_i64(shift64);
3832 /* result is [reg:..:reg:X] */
3834 tcg_gen_extr_i64_i32(lo, hi, t0);
3835 tcg_gen_andi_i32(X, lo, 1);
3837 tcg_gen_shri_i32(lo, lo, 1);
3838 } else {
3839 /* create [..:X:reg] */
3841 tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3843 tcg_gen_rotr_i64(t0, t0, shift64);
3844 tcg_temp_free_i64(shift64);
3846 /* result is value: [X:reg:..:reg] */
3848 tcg_gen_extr_i64_i32(lo, hi, t0);
3850 /* extract X */
3852 tcg_gen_shri_i32(X, hi, 31);
3854 /* extract result */
3856 tcg_gen_shli_i32(hi, hi, 1);
3858 tcg_temp_free_i64(t0);
3859 tcg_gen_or_i32(lo, lo, hi);
3860 tcg_temp_free(hi);
3862 /* if shift == 0, register and X are not affected */
3864 zero = tcg_const_i32(0);
3865 tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3866 tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3867 tcg_temp_free(zero);
3868 tcg_temp_free(lo);
3870 return X;
3873 DISAS_INSN(rotate_im)
3875 TCGv shift;
3876 int tmp;
3877 int left = (insn & 0x100);
3879 tmp = (insn >> 9) & 7;
3880 if (tmp == 0) {
3881 tmp = 8;
3884 shift = tcg_const_i32(tmp);
3885 if (insn & 8) {
3886 rotate(DREG(insn, 0), shift, left, 32);
3887 } else {
3888 TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3889 rotate_x_flags(DREG(insn, 0), X, 32);
3890 tcg_temp_free(X);
3892 tcg_temp_free(shift);
3894 set_cc_op(s, CC_OP_FLAGS);
3897 DISAS_INSN(rotate8_im)
3899 int left = (insn & 0x100);
3900 TCGv reg;
3901 TCGv shift;
3902 int tmp;
3904 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3906 tmp = (insn >> 9) & 7;
3907 if (tmp == 0) {
3908 tmp = 8;
3911 shift = tcg_const_i32(tmp);
3912 if (insn & 8) {
3913 rotate(reg, shift, left, 8);
3914 } else {
3915 TCGv X = rotate_x(reg, shift, left, 8);
3916 rotate_x_flags(reg, X, 8);
3917 tcg_temp_free(X);
3919 tcg_temp_free(shift);
3920 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3921 set_cc_op(s, CC_OP_FLAGS);
3924 DISAS_INSN(rotate16_im)
3926 int left = (insn & 0x100);
3927 TCGv reg;
3928 TCGv shift;
3929 int tmp;
3931 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3932 tmp = (insn >> 9) & 7;
3933 if (tmp == 0) {
3934 tmp = 8;
3937 shift = tcg_const_i32(tmp);
3938 if (insn & 8) {
3939 rotate(reg, shift, left, 16);
3940 } else {
3941 TCGv X = rotate_x(reg, shift, left, 16);
3942 rotate_x_flags(reg, X, 16);
3943 tcg_temp_free(X);
3945 tcg_temp_free(shift);
3946 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3947 set_cc_op(s, CC_OP_FLAGS);
3950 DISAS_INSN(rotate_reg)
3952 TCGv reg;
3953 TCGv src;
3954 TCGv t0, t1;
3955 int left = (insn & 0x100);
3957 reg = DREG(insn, 0);
3958 src = DREG(insn, 9);
3959 /* shift in [0..63] */
3960 t0 = tcg_temp_new();
3961 tcg_gen_andi_i32(t0, src, 63);
3962 t1 = tcg_temp_new_i32();
3963 if (insn & 8) {
3964 tcg_gen_andi_i32(t1, src, 31);
3965 rotate(reg, t1, left, 32);
3966 /* if shift == 0, clear C */
3967 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3968 t0, QREG_CC_V /* 0 */,
3969 QREG_CC_V /* 0 */, QREG_CC_C);
3970 } else {
3971 TCGv X;
3972 /* modulo 33 */
3973 tcg_gen_movi_i32(t1, 33);
3974 tcg_gen_remu_i32(t1, t0, t1);
3975 X = rotate32_x(DREG(insn, 0), t1, left);
3976 rotate_x_flags(DREG(insn, 0), X, 32);
3977 tcg_temp_free(X);
3979 tcg_temp_free(t1);
3980 tcg_temp_free(t0);
3981 set_cc_op(s, CC_OP_FLAGS);
3984 DISAS_INSN(rotate8_reg)
3986 TCGv reg;
3987 TCGv src;
3988 TCGv t0, t1;
3989 int left = (insn & 0x100);
3991 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3992 src = DREG(insn, 9);
3993 /* shift in [0..63] */
3994 t0 = tcg_temp_new_i32();
3995 tcg_gen_andi_i32(t0, src, 63);
3996 t1 = tcg_temp_new_i32();
3997 if (insn & 8) {
3998 tcg_gen_andi_i32(t1, src, 7);
3999 rotate(reg, t1, left, 8);
4000 /* if shift == 0, clear C */
4001 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
4002 t0, QREG_CC_V /* 0 */,
4003 QREG_CC_V /* 0 */, QREG_CC_C);
4004 } else {
4005 TCGv X;
4006 /* modulo 9 */
4007 tcg_gen_movi_i32(t1, 9);
4008 tcg_gen_remu_i32(t1, t0, t1);
4009 X = rotate_x(reg, t1, left, 8);
4010 rotate_x_flags(reg, X, 8);
4011 tcg_temp_free(X);
4013 tcg_temp_free(t1);
4014 tcg_temp_free(t0);
4015 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
4016 set_cc_op(s, CC_OP_FLAGS);
4019 DISAS_INSN(rotate16_reg)
4021 TCGv reg;
4022 TCGv src;
4023 TCGv t0, t1;
4024 int left = (insn & 0x100);
4026 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
4027 src = DREG(insn, 9);
4028 /* shift in [0..63] */
4029 t0 = tcg_temp_new_i32();
4030 tcg_gen_andi_i32(t0, src, 63);
4031 t1 = tcg_temp_new_i32();
4032 if (insn & 8) {
4033 tcg_gen_andi_i32(t1, src, 15);
4034 rotate(reg, t1, left, 16);
4035 /* if shift == 0, clear C */
4036 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
4037 t0, QREG_CC_V /* 0 */,
4038 QREG_CC_V /* 0 */, QREG_CC_C);
4039 } else {
4040 TCGv X;
4041 /* modulo 17 */
4042 tcg_gen_movi_i32(t1, 17);
4043 tcg_gen_remu_i32(t1, t0, t1);
4044 X = rotate_x(reg, t1, left, 16);
4045 rotate_x_flags(reg, X, 16);
4046 tcg_temp_free(X);
4048 tcg_temp_free(t1);
4049 tcg_temp_free(t0);
4050 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
4051 set_cc_op(s, CC_OP_FLAGS);
4054 DISAS_INSN(rotate_mem)
4056 TCGv src;
4057 TCGv addr;
4058 TCGv shift;
4059 int left = (insn & 0x100);
4061 SRC_EA(env, src, OS_WORD, 0, &addr);
4063 shift = tcg_const_i32(1);
4064 if (insn & 0x0200) {
4065 rotate(src, shift, left, 16);
4066 } else {
4067 TCGv X = rotate_x(src, shift, left, 16);
4068 rotate_x_flags(src, X, 16);
4069 tcg_temp_free(X);
4071 tcg_temp_free(shift);
4072 DEST_EA(env, insn, OS_WORD, src, &addr);
4073 set_cc_op(s, CC_OP_FLAGS);
4076 DISAS_INSN(bfext_reg)
4078 int ext = read_im16(env, s);
4079 int is_sign = insn & 0x200;
4080 TCGv src = DREG(insn, 0);
4081 TCGv dst = DREG(ext, 12);
4082 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4083 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4084 int pos = 32 - ofs - len; /* little bit-endian */
4085 TCGv tmp = tcg_temp_new();
4086 TCGv shift;
4089 * In general, we're going to rotate the field so that it's at the
4090 * top of the word and then right-shift by the complement of the
4091 * width to extend the field.
4093 if (ext & 0x20) {
4094 /* Variable width. */
4095 if (ext & 0x800) {
4096 /* Variable offset. */
4097 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4098 tcg_gen_rotl_i32(tmp, src, tmp);
4099 } else {
4100 tcg_gen_rotli_i32(tmp, src, ofs);
4103 shift = tcg_temp_new();
4104 tcg_gen_neg_i32(shift, DREG(ext, 0));
4105 tcg_gen_andi_i32(shift, shift, 31);
4106 tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
4107 if (is_sign) {
4108 tcg_gen_mov_i32(dst, QREG_CC_N);
4109 } else {
4110 tcg_gen_shr_i32(dst, tmp, shift);
4112 tcg_temp_free(shift);
4113 } else {
4114 /* Immediate width. */
4115 if (ext & 0x800) {
4116 /* Variable offset */
4117 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4118 tcg_gen_rotl_i32(tmp, src, tmp);
4119 src = tmp;
4120 pos = 32 - len;
4121 } else {
4123 * Immediate offset. If the field doesn't wrap around the
4124 * end of the word, rely on (s)extract completely.
4126 if (pos < 0) {
4127 tcg_gen_rotli_i32(tmp, src, ofs);
4128 src = tmp;
4129 pos = 32 - len;
4133 tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
4134 if (is_sign) {
4135 tcg_gen_mov_i32(dst, QREG_CC_N);
4136 } else {
4137 tcg_gen_extract_i32(dst, src, pos, len);
4141 tcg_temp_free(tmp);
4142 set_cc_op(s, CC_OP_LOGIC);
4145 DISAS_INSN(bfext_mem)
4147 int ext = read_im16(env, s);
4148 int is_sign = insn & 0x200;
4149 TCGv dest = DREG(ext, 12);
4150 TCGv addr, len, ofs;
4152 addr = gen_lea(env, s, insn, OS_UNSIZED);
4153 if (IS_NULL_QREG(addr)) {
4154 gen_addr_fault(s);
4155 return;
4158 if (ext & 0x20) {
4159 len = DREG(ext, 0);
4160 } else {
4161 len = tcg_const_i32(extract32(ext, 0, 5));
4163 if (ext & 0x800) {
4164 ofs = DREG(ext, 6);
4165 } else {
4166 ofs = tcg_const_i32(extract32(ext, 6, 5));
4169 if (is_sign) {
4170 gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
4171 tcg_gen_mov_i32(QREG_CC_N, dest);
4172 } else {
4173 TCGv_i64 tmp = tcg_temp_new_i64();
4174 gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
4175 tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4176 tcg_temp_free_i64(tmp);
4178 set_cc_op(s, CC_OP_LOGIC);
4180 if (!(ext & 0x20)) {
4181 tcg_temp_free(len);
4183 if (!(ext & 0x800)) {
4184 tcg_temp_free(ofs);
4188 DISAS_INSN(bfop_reg)
4190 int ext = read_im16(env, s);
4191 TCGv src = DREG(insn, 0);
4192 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4193 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4194 TCGv mask, tofs, tlen;
4196 tofs = NULL;
4197 tlen = NULL;
4198 if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
4199 tofs = tcg_temp_new();
4200 tlen = tcg_temp_new();
4203 if ((ext & 0x820) == 0) {
4204 /* Immediate width and offset. */
4205 uint32_t maski = 0x7fffffffu >> (len - 1);
4206 if (ofs + len <= 32) {
4207 tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4208 } else {
4209 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4211 tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4212 mask = tcg_const_i32(ror32(maski, ofs));
4213 if (tofs) {
4214 tcg_gen_movi_i32(tofs, ofs);
4215 tcg_gen_movi_i32(tlen, len);
4217 } else {
4218 TCGv tmp = tcg_temp_new();
4219 if (ext & 0x20) {
4220 /* Variable width */
4221 tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4222 tcg_gen_andi_i32(tmp, tmp, 31);
4223 mask = tcg_const_i32(0x7fffffffu);
4224 tcg_gen_shr_i32(mask, mask, tmp);
4225 if (tlen) {
4226 tcg_gen_addi_i32(tlen, tmp, 1);
4228 } else {
4229 /* Immediate width */
4230 mask = tcg_const_i32(0x7fffffffu >> (len - 1));
4231 if (tlen) {
4232 tcg_gen_movi_i32(tlen, len);
4235 if (ext & 0x800) {
4236 /* Variable offset */
4237 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4238 tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4239 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4240 tcg_gen_rotr_i32(mask, mask, tmp);
4241 if (tofs) {
4242 tcg_gen_mov_i32(tofs, tmp);
4244 } else {
4245 /* Immediate offset (and variable width) */
4246 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4247 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4248 tcg_gen_rotri_i32(mask, mask, ofs);
4249 if (tofs) {
4250 tcg_gen_movi_i32(tofs, ofs);
4253 tcg_temp_free(tmp);
4255 set_cc_op(s, CC_OP_LOGIC);
4257 switch (insn & 0x0f00) {
4258 case 0x0a00: /* bfchg */
4259 tcg_gen_eqv_i32(src, src, mask);
4260 break;
4261 case 0x0c00: /* bfclr */
4262 tcg_gen_and_i32(src, src, mask);
4263 break;
4264 case 0x0d00: /* bfffo */
4265 gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4266 tcg_temp_free(tlen);
4267 tcg_temp_free(tofs);
4268 break;
4269 case 0x0e00: /* bfset */
4270 tcg_gen_orc_i32(src, src, mask);
4271 break;
4272 case 0x0800: /* bftst */
4273 /* flags already set; no other work to do. */
4274 break;
4275 default:
4276 g_assert_not_reached();
4278 tcg_temp_free(mask);
4281 DISAS_INSN(bfop_mem)
4283 int ext = read_im16(env, s);
4284 TCGv addr, len, ofs;
4285 TCGv_i64 t64;
4287 addr = gen_lea(env, s, insn, OS_UNSIZED);
4288 if (IS_NULL_QREG(addr)) {
4289 gen_addr_fault(s);
4290 return;
4293 if (ext & 0x20) {
4294 len = DREG(ext, 0);
4295 } else {
4296 len = tcg_const_i32(extract32(ext, 0, 5));
4298 if (ext & 0x800) {
4299 ofs = DREG(ext, 6);
4300 } else {
4301 ofs = tcg_const_i32(extract32(ext, 6, 5));
4304 switch (insn & 0x0f00) {
4305 case 0x0a00: /* bfchg */
4306 gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4307 break;
4308 case 0x0c00: /* bfclr */
4309 gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4310 break;
4311 case 0x0d00: /* bfffo */
4312 t64 = tcg_temp_new_i64();
4313 gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4314 tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4315 tcg_temp_free_i64(t64);
4316 break;
4317 case 0x0e00: /* bfset */
4318 gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4319 break;
4320 case 0x0800: /* bftst */
4321 gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4322 break;
4323 default:
4324 g_assert_not_reached();
4326 set_cc_op(s, CC_OP_LOGIC);
4328 if (!(ext & 0x20)) {
4329 tcg_temp_free(len);
4331 if (!(ext & 0x800)) {
4332 tcg_temp_free(ofs);
4336 DISAS_INSN(bfins_reg)
4338 int ext = read_im16(env, s);
4339 TCGv dst = DREG(insn, 0);
4340 TCGv src = DREG(ext, 12);
4341 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4342 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4343 int pos = 32 - ofs - len; /* little bit-endian */
4344 TCGv tmp;
4346 tmp = tcg_temp_new();
4348 if (ext & 0x20) {
4349 /* Variable width */
4350 tcg_gen_neg_i32(tmp, DREG(ext, 0));
4351 tcg_gen_andi_i32(tmp, tmp, 31);
4352 tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4353 } else {
4354 /* Immediate width */
4355 tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4357 set_cc_op(s, CC_OP_LOGIC);
4359 /* Immediate width and offset */
4360 if ((ext & 0x820) == 0) {
4361 /* Check for suitability for deposit. */
4362 if (pos >= 0) {
4363 tcg_gen_deposit_i32(dst, dst, src, pos, len);
4364 } else {
4365 uint32_t maski = -2U << (len - 1);
4366 uint32_t roti = (ofs + len) & 31;
4367 tcg_gen_andi_i32(tmp, src, ~maski);
4368 tcg_gen_rotri_i32(tmp, tmp, roti);
4369 tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4370 tcg_gen_or_i32(dst, dst, tmp);
4372 } else {
4373 TCGv mask = tcg_temp_new();
4374 TCGv rot = tcg_temp_new();
4376 if (ext & 0x20) {
4377 /* Variable width */
4378 tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4379 tcg_gen_andi_i32(rot, rot, 31);
4380 tcg_gen_movi_i32(mask, -2);
4381 tcg_gen_shl_i32(mask, mask, rot);
4382 tcg_gen_mov_i32(rot, DREG(ext, 0));
4383 tcg_gen_andc_i32(tmp, src, mask);
4384 } else {
4385 /* Immediate width (variable offset) */
4386 uint32_t maski = -2U << (len - 1);
4387 tcg_gen_andi_i32(tmp, src, ~maski);
4388 tcg_gen_movi_i32(mask, maski);
4389 tcg_gen_movi_i32(rot, len & 31);
4391 if (ext & 0x800) {
4392 /* Variable offset */
4393 tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4394 } else {
4395 /* Immediate offset (variable width) */
4396 tcg_gen_addi_i32(rot, rot, ofs);
4398 tcg_gen_andi_i32(rot, rot, 31);
4399 tcg_gen_rotr_i32(mask, mask, rot);
4400 tcg_gen_rotr_i32(tmp, tmp, rot);
4401 tcg_gen_and_i32(dst, dst, mask);
4402 tcg_gen_or_i32(dst, dst, tmp);
4404 tcg_temp_free(rot);
4405 tcg_temp_free(mask);
4407 tcg_temp_free(tmp);
4410 DISAS_INSN(bfins_mem)
4412 int ext = read_im16(env, s);
4413 TCGv src = DREG(ext, 12);
4414 TCGv addr, len, ofs;
4416 addr = gen_lea(env, s, insn, OS_UNSIZED);
4417 if (IS_NULL_QREG(addr)) {
4418 gen_addr_fault(s);
4419 return;
4422 if (ext & 0x20) {
4423 len = DREG(ext, 0);
4424 } else {
4425 len = tcg_const_i32(extract32(ext, 0, 5));
4427 if (ext & 0x800) {
4428 ofs = DREG(ext, 6);
4429 } else {
4430 ofs = tcg_const_i32(extract32(ext, 6, 5));
4433 gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4434 set_cc_op(s, CC_OP_LOGIC);
4436 if (!(ext & 0x20)) {
4437 tcg_temp_free(len);
4439 if (!(ext & 0x800)) {
4440 tcg_temp_free(ofs);
4444 DISAS_INSN(ff1)
4446 TCGv reg;
4447 reg = DREG(insn, 0);
4448 gen_logic_cc(s, reg, OS_LONG);
4449 gen_helper_ff1(reg, reg);
4452 DISAS_INSN(chk)
4454 TCGv src, reg;
4455 int opsize;
4457 switch ((insn >> 7) & 3) {
4458 case 3:
4459 opsize = OS_WORD;
4460 break;
4461 case 2:
4462 if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4463 opsize = OS_LONG;
4464 break;
4466 /* fallthru */
4467 default:
4468 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4469 return;
4471 SRC_EA(env, src, opsize, 1, NULL);
4472 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4474 gen_flush_flags(s);
4475 gen_helper_chk(cpu_env, reg, src);
4478 DISAS_INSN(chk2)
4480 uint16_t ext;
4481 TCGv addr1, addr2, bound1, bound2, reg;
4482 int opsize;
4484 switch ((insn >> 9) & 3) {
4485 case 0:
4486 opsize = OS_BYTE;
4487 break;
4488 case 1:
4489 opsize = OS_WORD;
4490 break;
4491 case 2:
4492 opsize = OS_LONG;
4493 break;
4494 default:
4495 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4496 return;
4499 ext = read_im16(env, s);
4500 if ((ext & 0x0800) == 0) {
4501 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4502 return;
4505 addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4506 addr2 = tcg_temp_new();
4507 tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4509 bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4510 tcg_temp_free(addr1);
4511 bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4512 tcg_temp_free(addr2);
4514 reg = tcg_temp_new();
4515 if (ext & 0x8000) {
4516 tcg_gen_mov_i32(reg, AREG(ext, 12));
4517 } else {
4518 gen_ext(reg, DREG(ext, 12), opsize, 1);
4521 gen_flush_flags(s);
4522 gen_helper_chk2(cpu_env, reg, bound1, bound2);
4523 tcg_temp_free(reg);
4524 tcg_temp_free(bound1);
4525 tcg_temp_free(bound2);
4528 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4530 TCGv addr;
4531 TCGv_i64 t0, t1;
4533 addr = tcg_temp_new();
4535 t0 = tcg_temp_new_i64();
4536 t1 = tcg_temp_new_i64();
4538 tcg_gen_andi_i32(addr, src, ~15);
4539 tcg_gen_qemu_ld64(t0, addr, index);
4540 tcg_gen_addi_i32(addr, addr, 8);
4541 tcg_gen_qemu_ld64(t1, addr, index);
4543 tcg_gen_andi_i32(addr, dst, ~15);
4544 tcg_gen_qemu_st64(t0, addr, index);
4545 tcg_gen_addi_i32(addr, addr, 8);
4546 tcg_gen_qemu_st64(t1, addr, index);
4548 tcg_temp_free_i64(t0);
4549 tcg_temp_free_i64(t1);
4550 tcg_temp_free(addr);
4553 DISAS_INSN(move16_reg)
4555 int index = IS_USER(s);
4556 TCGv tmp;
4557 uint16_t ext;
4559 ext = read_im16(env, s);
4560 if ((ext & (1 << 15)) == 0) {
4561 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4564 m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4566 /* Ax can be Ay, so save Ay before incrementing Ax */
4567 tmp = tcg_temp_new();
4568 tcg_gen_mov_i32(tmp, AREG(ext, 12));
4569 tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4570 tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4571 tcg_temp_free(tmp);
4574 DISAS_INSN(move16_mem)
4576 int index = IS_USER(s);
4577 TCGv reg, addr;
4579 reg = AREG(insn, 0);
4580 addr = tcg_const_i32(read_im32(env, s));
4582 if ((insn >> 3) & 1) {
4583 /* MOVE16 (xxx).L, (Ay) */
4584 m68k_copy_line(reg, addr, index);
4585 } else {
4586 /* MOVE16 (Ay), (xxx).L */
4587 m68k_copy_line(addr, reg, index);
4590 tcg_temp_free(addr);
4592 if (((insn >> 3) & 2) == 0) {
4593 /* (Ay)+ */
4594 tcg_gen_addi_i32(reg, reg, 16);
4598 DISAS_INSN(strldsr)
4600 uint16_t ext;
4601 uint32_t addr;
4603 addr = s->pc - 2;
4604 ext = read_im16(env, s);
4605 if (ext != 0x46FC) {
4606 gen_exception(s, addr, EXCP_ILLEGAL);
4607 return;
4609 ext = read_im16(env, s);
4610 if (IS_USER(s) || (ext & SR_S) == 0) {
4611 gen_exception(s, addr, EXCP_PRIVILEGE);
4612 return;
4614 gen_push(s, gen_get_sr(s));
4615 gen_set_sr_im(s, ext, 0);
4618 DISAS_INSN(move_from_sr)
4620 TCGv sr;
4622 if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4623 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4624 return;
4626 sr = gen_get_sr(s);
4627 DEST_EA(env, insn, OS_WORD, sr, NULL);
4630 #if defined(CONFIG_SOFTMMU)
4631 DISAS_INSN(moves)
4633 int opsize;
4634 uint16_t ext;
4635 TCGv reg;
4636 TCGv addr;
4637 int extend;
4639 if (IS_USER(s)) {
4640 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4641 return;
4644 ext = read_im16(env, s);
4646 opsize = insn_opsize(insn);
4648 if (ext & 0x8000) {
4649 /* address register */
4650 reg = AREG(ext, 12);
4651 extend = 1;
4652 } else {
4653 /* data register */
4654 reg = DREG(ext, 12);
4655 extend = 0;
4658 addr = gen_lea(env, s, insn, opsize);
4659 if (IS_NULL_QREG(addr)) {
4660 gen_addr_fault(s);
4661 return;
4664 if (ext & 0x0800) {
4665 /* from reg to ea */
4666 gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4667 } else {
4668 /* from ea to reg */
4669 TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4670 if (extend) {
4671 gen_ext(reg, tmp, opsize, 1);
4672 } else {
4673 gen_partset_reg(opsize, reg, tmp);
4675 tcg_temp_free(tmp);
4677 switch (extract32(insn, 3, 3)) {
4678 case 3: /* Indirect postincrement. */
4679 tcg_gen_addi_i32(AREG(insn, 0), addr,
4680 REG(insn, 0) == 7 && opsize == OS_BYTE
4682 : opsize_bytes(opsize));
4683 break;
4684 case 4: /* Indirect predecrememnt. */
4685 tcg_gen_mov_i32(AREG(insn, 0), addr);
4686 break;
4690 DISAS_INSN(move_to_sr)
4692 if (IS_USER(s)) {
4693 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4694 return;
4696 gen_move_to_sr(env, s, insn, false);
4697 gen_exit_tb(s);
4700 DISAS_INSN(move_from_usp)
4702 if (IS_USER(s)) {
4703 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4704 return;
4706 tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4707 offsetof(CPUM68KState, sp[M68K_USP]));
4710 DISAS_INSN(move_to_usp)
4712 if (IS_USER(s)) {
4713 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4714 return;
4716 tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4717 offsetof(CPUM68KState, sp[M68K_USP]));
4720 DISAS_INSN(halt)
4722 if (IS_USER(s)) {
4723 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4724 return;
4727 gen_exception(s, s->pc, EXCP_HALT_INSN);
4730 DISAS_INSN(stop)
4732 uint16_t ext;
4734 if (IS_USER(s)) {
4735 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4736 return;
4739 ext = read_im16(env, s);
4741 gen_set_sr_im(s, ext, 0);
4742 tcg_gen_movi_i32(cpu_halted, 1);
4743 gen_exception(s, s->pc, EXCP_HLT);
4746 DISAS_INSN(rte)
4748 if (IS_USER(s)) {
4749 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4750 return;
4752 gen_exception(s, s->base.pc_next, EXCP_RTE);
4755 DISAS_INSN(cf_movec)
4757 uint16_t ext;
4758 TCGv reg;
4760 if (IS_USER(s)) {
4761 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4762 return;
4765 ext = read_im16(env, s);
4767 if (ext & 0x8000) {
4768 reg = AREG(ext, 12);
4769 } else {
4770 reg = DREG(ext, 12);
4772 gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4773 gen_exit_tb(s);
4776 DISAS_INSN(m68k_movec)
4778 uint16_t ext;
4779 TCGv reg;
4781 if (IS_USER(s)) {
4782 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4783 return;
4786 ext = read_im16(env, s);
4788 if (ext & 0x8000) {
4789 reg = AREG(ext, 12);
4790 } else {
4791 reg = DREG(ext, 12);
4793 if (insn & 1) {
4794 gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4795 } else {
4796 gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
4798 gen_exit_tb(s);
4801 DISAS_INSN(intouch)
4803 if (IS_USER(s)) {
4804 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4805 return;
4807 /* ICache fetch. Implement as no-op. */
4810 DISAS_INSN(cpushl)
4812 if (IS_USER(s)) {
4813 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4814 return;
4816 /* Cache push/invalidate. Implement as no-op. */
4819 DISAS_INSN(cpush)
4821 if (IS_USER(s)) {
4822 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4823 return;
4825 /* Cache push/invalidate. Implement as no-op. */
4828 DISAS_INSN(cinv)
4830 if (IS_USER(s)) {
4831 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4832 return;
4834 /* Invalidate cache line. Implement as no-op. */
4837 #if defined(CONFIG_SOFTMMU)
4838 DISAS_INSN(pflush)
4840 TCGv opmode;
4842 if (IS_USER(s)) {
4843 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4844 return;
4847 opmode = tcg_const_i32((insn >> 3) & 3);
4848 gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4849 tcg_temp_free(opmode);
4852 DISAS_INSN(ptest)
4854 TCGv is_read;
4856 if (IS_USER(s)) {
4857 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4858 return;
4860 is_read = tcg_const_i32((insn >> 5) & 1);
4861 gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4862 tcg_temp_free(is_read);
4864 #endif
4866 DISAS_INSN(wddata)
4868 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4871 DISAS_INSN(wdebug)
4873 if (IS_USER(s)) {
4874 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4875 return;
4877 /* TODO: Implement wdebug. */
4878 cpu_abort(env_cpu(env), "WDEBUG not implemented");
4880 #endif
4882 DISAS_INSN(trap)
4884 gen_exception(s, s->base.pc_next, EXCP_TRAP0 + (insn & 0xf));
4887 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4889 switch (reg) {
4890 case M68K_FPIAR:
4891 tcg_gen_movi_i32(res, 0);
4892 break;
4893 case M68K_FPSR:
4894 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4895 break;
4896 case M68K_FPCR:
4897 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4898 break;
4902 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4904 switch (reg) {
4905 case M68K_FPIAR:
4906 break;
4907 case M68K_FPSR:
4908 tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4909 break;
4910 case M68K_FPCR:
4911 gen_helper_set_fpcr(cpu_env, val);
4912 break;
4916 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4918 int index = IS_USER(s);
4919 TCGv tmp;
4921 tmp = tcg_temp_new();
4922 gen_load_fcr(s, tmp, reg);
4923 tcg_gen_qemu_st32(tmp, addr, index);
4924 tcg_temp_free(tmp);
4927 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4929 int index = IS_USER(s);
4930 TCGv tmp;
4932 tmp = tcg_temp_new();
4933 tcg_gen_qemu_ld32u(tmp, addr, index);
4934 gen_store_fcr(s, tmp, reg);
4935 tcg_temp_free(tmp);
4939 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4940 uint32_t insn, uint32_t ext)
4942 int mask = (ext >> 10) & 7;
4943 int is_write = (ext >> 13) & 1;
4944 int mode = extract32(insn, 3, 3);
4945 int i;
4946 TCGv addr, tmp;
4948 switch (mode) {
4949 case 0: /* Dn */
4950 if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4951 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4952 return;
4954 if (is_write) {
4955 gen_load_fcr(s, DREG(insn, 0), mask);
4956 } else {
4957 gen_store_fcr(s, DREG(insn, 0), mask);
4959 return;
4960 case 1: /* An, only with FPIAR */
4961 if (mask != M68K_FPIAR) {
4962 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4963 return;
4965 if (is_write) {
4966 gen_load_fcr(s, AREG(insn, 0), mask);
4967 } else {
4968 gen_store_fcr(s, AREG(insn, 0), mask);
4970 return;
4971 case 7: /* Immediate */
4972 if (REG(insn, 0) == 4) {
4973 if (is_write ||
4974 (mask != M68K_FPIAR && mask != M68K_FPSR &&
4975 mask != M68K_FPCR)) {
4976 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4977 return;
4979 tmp = tcg_const_i32(read_im32(env, s));
4980 gen_store_fcr(s, tmp, mask);
4981 tcg_temp_free(tmp);
4982 return;
4984 break;
4985 default:
4986 break;
4989 tmp = gen_lea(env, s, insn, OS_LONG);
4990 if (IS_NULL_QREG(tmp)) {
4991 gen_addr_fault(s);
4992 return;
4995 addr = tcg_temp_new();
4996 tcg_gen_mov_i32(addr, tmp);
4999 * mask:
5001 * 0b100 Floating-Point Control Register
5002 * 0b010 Floating-Point Status Register
5003 * 0b001 Floating-Point Instruction Address Register
5007 if (is_write && mode == 4) {
5008 for (i = 2; i >= 0; i--, mask >>= 1) {
5009 if (mask & 1) {
5010 gen_qemu_store_fcr(s, addr, 1 << i);
5011 if (mask != 1) {
5012 tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
5016 tcg_gen_mov_i32(AREG(insn, 0), addr);
5017 } else {
5018 for (i = 0; i < 3; i++, mask >>= 1) {
5019 if (mask & 1) {
5020 if (is_write) {
5021 gen_qemu_store_fcr(s, addr, 1 << i);
5022 } else {
5023 gen_qemu_load_fcr(s, addr, 1 << i);
5025 if (mask != 1 || mode == 3) {
5026 tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
5030 if (mode == 3) {
5031 tcg_gen_mov_i32(AREG(insn, 0), addr);
5034 tcg_temp_free_i32(addr);
5037 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
5038 uint32_t insn, uint32_t ext)
5040 int opsize;
5041 TCGv addr, tmp;
5042 int mode = (ext >> 11) & 0x3;
5043 int is_load = ((ext & 0x2000) == 0);
5045 if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
5046 opsize = OS_EXTENDED;
5047 } else {
5048 opsize = OS_DOUBLE; /* FIXME */
5051 addr = gen_lea(env, s, insn, opsize);
5052 if (IS_NULL_QREG(addr)) {
5053 gen_addr_fault(s);
5054 return;
5057 tmp = tcg_temp_new();
5058 if (mode & 0x1) {
5059 /* Dynamic register list */
5060 tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
5061 } else {
5062 /* Static register list */
5063 tcg_gen_movi_i32(tmp, ext & 0xff);
5066 if (!is_load && (mode & 2) == 0) {
5068 * predecrement addressing mode
5069 * only available to store register to memory
5071 if (opsize == OS_EXTENDED) {
5072 gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
5073 } else {
5074 gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
5076 } else {
5077 /* postincrement addressing mode */
5078 if (opsize == OS_EXTENDED) {
5079 if (is_load) {
5080 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
5081 } else {
5082 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
5084 } else {
5085 if (is_load) {
5086 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
5087 } else {
5088 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
5092 if ((insn & 070) == 030 || (insn & 070) == 040) {
5093 tcg_gen_mov_i32(AREG(insn, 0), tmp);
5095 tcg_temp_free(tmp);
5099 * ??? FP exceptions are not implemented. Most exceptions are deferred until
5100 * immediately before the next FP instruction is executed.
5102 DISAS_INSN(fpu)
5104 uint16_t ext;
5105 int opmode;
5106 int opsize;
5107 TCGv_ptr cpu_src, cpu_dest;
5109 ext = read_im16(env, s);
5110 opmode = ext & 0x7f;
5111 switch ((ext >> 13) & 7) {
5112 case 0:
5113 break;
5114 case 1:
5115 goto undef;
5116 case 2:
5117 if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
5118 /* fmovecr */
5119 TCGv rom_offset = tcg_const_i32(opmode);
5120 cpu_dest = gen_fp_ptr(REG(ext, 7));
5121 gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
5122 tcg_temp_free_ptr(cpu_dest);
5123 tcg_temp_free(rom_offset);
5124 return;
5126 break;
5127 case 3: /* fmove out */
5128 cpu_src = gen_fp_ptr(REG(ext, 7));
5129 opsize = ext_opsize(ext, 10);
5130 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5131 EA_STORE, IS_USER(s)) == -1) {
5132 gen_addr_fault(s);
5134 gen_helper_ftst(cpu_env, cpu_src);
5135 tcg_temp_free_ptr(cpu_src);
5136 return;
5137 case 4: /* fmove to control register. */
5138 case 5: /* fmove from control register. */
5139 gen_op_fmove_fcr(env, s, insn, ext);
5140 return;
5141 case 6: /* fmovem */
5142 case 7:
5143 if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
5144 goto undef;
5146 gen_op_fmovem(env, s, insn, ext);
5147 return;
5149 if (ext & (1 << 14)) {
5150 /* Source effective address. */
5151 opsize = ext_opsize(ext, 10);
5152 cpu_src = gen_fp_result_ptr();
5153 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5154 EA_LOADS, IS_USER(s)) == -1) {
5155 gen_addr_fault(s);
5156 return;
5158 } else {
5159 /* Source register. */
5160 opsize = OS_EXTENDED;
5161 cpu_src = gen_fp_ptr(REG(ext, 10));
5163 cpu_dest = gen_fp_ptr(REG(ext, 7));
5164 switch (opmode) {
5165 case 0: /* fmove */
5166 gen_fp_move(cpu_dest, cpu_src);
5167 break;
5168 case 0x40: /* fsmove */
5169 gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
5170 break;
5171 case 0x44: /* fdmove */
5172 gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
5173 break;
5174 case 1: /* fint */
5175 gen_helper_firound(cpu_env, cpu_dest, cpu_src);
5176 break;
5177 case 2: /* fsinh */
5178 gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
5179 break;
5180 case 3: /* fintrz */
5181 gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
5182 break;
5183 case 4: /* fsqrt */
5184 gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
5185 break;
5186 case 0x41: /* fssqrt */
5187 gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
5188 break;
5189 case 0x45: /* fdsqrt */
5190 gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
5191 break;
5192 case 0x06: /* flognp1 */
5193 gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
5194 break;
5195 case 0x08: /* fetoxm1 */
5196 gen_helper_fetoxm1(cpu_env, cpu_dest, cpu_src);
5197 break;
5198 case 0x09: /* ftanh */
5199 gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5200 break;
5201 case 0x0a: /* fatan */
5202 gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5203 break;
5204 case 0x0c: /* fasin */
5205 gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5206 break;
5207 case 0x0d: /* fatanh */
5208 gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5209 break;
5210 case 0x0e: /* fsin */
5211 gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5212 break;
5213 case 0x0f: /* ftan */
5214 gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5215 break;
5216 case 0x10: /* fetox */
5217 gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5218 break;
5219 case 0x11: /* ftwotox */
5220 gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5221 break;
5222 case 0x12: /* ftentox */
5223 gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5224 break;
5225 case 0x14: /* flogn */
5226 gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5227 break;
5228 case 0x15: /* flog10 */
5229 gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5230 break;
5231 case 0x16: /* flog2 */
5232 gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5233 break;
5234 case 0x18: /* fabs */
5235 gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5236 break;
5237 case 0x58: /* fsabs */
5238 gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5239 break;
5240 case 0x5c: /* fdabs */
5241 gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5242 break;
5243 case 0x19: /* fcosh */
5244 gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5245 break;
5246 case 0x1a: /* fneg */
5247 gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5248 break;
5249 case 0x5a: /* fsneg */
5250 gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5251 break;
5252 case 0x5e: /* fdneg */
5253 gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5254 break;
5255 case 0x1c: /* facos */
5256 gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5257 break;
5258 case 0x1d: /* fcos */
5259 gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5260 break;
5261 case 0x1e: /* fgetexp */
5262 gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5263 break;
5264 case 0x1f: /* fgetman */
5265 gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5266 break;
5267 case 0x20: /* fdiv */
5268 gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5269 break;
5270 case 0x60: /* fsdiv */
5271 gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5272 break;
5273 case 0x64: /* fddiv */
5274 gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5275 break;
5276 case 0x21: /* fmod */
5277 gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5278 break;
5279 case 0x22: /* fadd */
5280 gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5281 break;
5282 case 0x62: /* fsadd */
5283 gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5284 break;
5285 case 0x66: /* fdadd */
5286 gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5287 break;
5288 case 0x23: /* fmul */
5289 gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5290 break;
5291 case 0x63: /* fsmul */
5292 gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5293 break;
5294 case 0x67: /* fdmul */
5295 gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5296 break;
5297 case 0x24: /* fsgldiv */
5298 gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5299 break;
5300 case 0x25: /* frem */
5301 gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5302 break;
5303 case 0x26: /* fscale */
5304 gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5305 break;
5306 case 0x27: /* fsglmul */
5307 gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5308 break;
5309 case 0x28: /* fsub */
5310 gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5311 break;
5312 case 0x68: /* fssub */
5313 gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5314 break;
5315 case 0x6c: /* fdsub */
5316 gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5317 break;
5318 case 0x30: case 0x31: case 0x32:
5319 case 0x33: case 0x34: case 0x35:
5320 case 0x36: case 0x37: {
5321 TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5322 gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5323 tcg_temp_free_ptr(cpu_dest2);
5325 break;
5326 case 0x38: /* fcmp */
5327 gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5328 return;
5329 case 0x3a: /* ftst */
5330 gen_helper_ftst(cpu_env, cpu_src);
5331 return;
5332 default:
5333 goto undef;
5335 tcg_temp_free_ptr(cpu_src);
5336 gen_helper_ftst(cpu_env, cpu_dest);
5337 tcg_temp_free_ptr(cpu_dest);
5338 return;
5339 undef:
5340 /* FIXME: Is this right for offset addressing modes? */
5341 s->pc -= 2;
5342 disas_undef_fpu(env, s, insn);
5345 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5347 TCGv fpsr;
5349 c->g1 = 1;
5350 c->v2 = tcg_const_i32(0);
5351 c->g2 = 0;
5352 /* TODO: Raise BSUN exception. */
5353 fpsr = tcg_temp_new();
5354 gen_load_fcr(s, fpsr, M68K_FPSR);
5355 switch (cond) {
5356 case 0: /* False */
5357 case 16: /* Signaling False */
5358 c->v1 = c->v2;
5359 c->tcond = TCG_COND_NEVER;
5360 break;
5361 case 1: /* EQual Z */
5362 case 17: /* Signaling EQual Z */
5363 c->v1 = tcg_temp_new();
5364 c->g1 = 0;
5365 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5366 c->tcond = TCG_COND_NE;
5367 break;
5368 case 2: /* Ordered Greater Than !(A || Z || N) */
5369 case 18: /* Greater Than !(A || Z || N) */
5370 c->v1 = tcg_temp_new();
5371 c->g1 = 0;
5372 tcg_gen_andi_i32(c->v1, fpsr,
5373 FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5374 c->tcond = TCG_COND_EQ;
5375 break;
5376 case 3: /* Ordered Greater than or Equal Z || !(A || N) */
5377 case 19: /* Greater than or Equal Z || !(A || N) */
5378 c->v1 = tcg_temp_new();
5379 c->g1 = 0;
5380 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5381 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5382 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5383 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5384 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5385 c->tcond = TCG_COND_NE;
5386 break;
5387 case 4: /* Ordered Less Than !(!N || A || Z); */
5388 case 20: /* Less Than !(!N || A || Z); */
5389 c->v1 = tcg_temp_new();
5390 c->g1 = 0;
5391 tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5392 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5393 c->tcond = TCG_COND_EQ;
5394 break;
5395 case 5: /* Ordered Less than or Equal Z || (N && !A) */
5396 case 21: /* Less than or Equal Z || (N && !A) */
5397 c->v1 = tcg_temp_new();
5398 c->g1 = 0;
5399 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5400 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5401 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5402 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5403 c->tcond = TCG_COND_NE;
5404 break;
5405 case 6: /* Ordered Greater or Less than !(A || Z) */
5406 case 22: /* Greater or Less than !(A || Z) */
5407 c->v1 = tcg_temp_new();
5408 c->g1 = 0;
5409 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5410 c->tcond = TCG_COND_EQ;
5411 break;
5412 case 7: /* Ordered !A */
5413 case 23: /* Greater, Less or Equal !A */
5414 c->v1 = tcg_temp_new();
5415 c->g1 = 0;
5416 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5417 c->tcond = TCG_COND_EQ;
5418 break;
5419 case 8: /* Unordered A */
5420 case 24: /* Not Greater, Less or Equal A */
5421 c->v1 = tcg_temp_new();
5422 c->g1 = 0;
5423 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5424 c->tcond = TCG_COND_NE;
5425 break;
5426 case 9: /* Unordered or Equal A || Z */
5427 case 25: /* Not Greater or Less then A || Z */
5428 c->v1 = tcg_temp_new();
5429 c->g1 = 0;
5430 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5431 c->tcond = TCG_COND_NE;
5432 break;
5433 case 10: /* Unordered or Greater Than A || !(N || Z)) */
5434 case 26: /* Not Less or Equal A || !(N || Z)) */
5435 c->v1 = tcg_temp_new();
5436 c->g1 = 0;
5437 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5438 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5439 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5440 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5441 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5442 c->tcond = TCG_COND_NE;
5443 break;
5444 case 11: /* Unordered or Greater or Equal A || Z || !N */
5445 case 27: /* Not Less Than A || Z || !N */
5446 c->v1 = tcg_temp_new();
5447 c->g1 = 0;
5448 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5449 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5450 c->tcond = TCG_COND_NE;
5451 break;
5452 case 12: /* Unordered or Less Than A || (N && !Z) */
5453 case 28: /* Not Greater than or Equal A || (N && !Z) */
5454 c->v1 = tcg_temp_new();
5455 c->g1 = 0;
5456 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5457 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5458 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5459 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5460 c->tcond = TCG_COND_NE;
5461 break;
5462 case 13: /* Unordered or Less or Equal A || Z || N */
5463 case 29: /* Not Greater Than A || Z || N */
5464 c->v1 = tcg_temp_new();
5465 c->g1 = 0;
5466 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5467 c->tcond = TCG_COND_NE;
5468 break;
5469 case 14: /* Not Equal !Z */
5470 case 30: /* Signaling Not Equal !Z */
5471 c->v1 = tcg_temp_new();
5472 c->g1 = 0;
5473 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5474 c->tcond = TCG_COND_EQ;
5475 break;
5476 case 15: /* True */
5477 case 31: /* Signaling True */
5478 c->v1 = c->v2;
5479 c->tcond = TCG_COND_ALWAYS;
5480 break;
5482 tcg_temp_free(fpsr);
5485 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5487 DisasCompare c;
5489 gen_fcc_cond(&c, s, cond);
5490 update_cc_op(s);
5491 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5492 free_cond(&c);
5495 DISAS_INSN(fbcc)
5497 uint32_t offset;
5498 uint32_t base;
5499 TCGLabel *l1;
5501 base = s->pc;
5502 offset = (int16_t)read_im16(env, s);
5503 if (insn & (1 << 6)) {
5504 offset = (offset << 16) | read_im16(env, s);
5507 l1 = gen_new_label();
5508 update_cc_op(s);
5509 gen_fjmpcc(s, insn & 0x3f, l1);
5510 gen_jmp_tb(s, 0, s->pc);
5511 gen_set_label(l1);
5512 gen_jmp_tb(s, 1, base + offset);
5515 DISAS_INSN(fscc)
5517 DisasCompare c;
5518 int cond;
5519 TCGv tmp;
5520 uint16_t ext;
5522 ext = read_im16(env, s);
5523 cond = ext & 0x3f;
5524 gen_fcc_cond(&c, s, cond);
5526 tmp = tcg_temp_new();
5527 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5528 free_cond(&c);
5530 tcg_gen_neg_i32(tmp, tmp);
5531 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5532 tcg_temp_free(tmp);
5535 #if defined(CONFIG_SOFTMMU)
5536 DISAS_INSN(frestore)
5538 TCGv addr;
5540 if (IS_USER(s)) {
5541 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5542 return;
5544 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5545 SRC_EA(env, addr, OS_LONG, 0, NULL);
5546 /* FIXME: check the state frame */
5547 } else {
5548 disas_undef(env, s, insn);
5552 DISAS_INSN(fsave)
5554 if (IS_USER(s)) {
5555 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5556 return;
5559 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5560 /* always write IDLE */
5561 TCGv idle = tcg_const_i32(0x41000000);
5562 DEST_EA(env, insn, OS_LONG, idle, NULL);
5563 tcg_temp_free(idle);
5564 } else {
5565 disas_undef(env, s, insn);
5568 #endif
5570 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5572 TCGv tmp = tcg_temp_new();
5573 if (s->env->macsr & MACSR_FI) {
5574 if (upper)
5575 tcg_gen_andi_i32(tmp, val, 0xffff0000);
5576 else
5577 tcg_gen_shli_i32(tmp, val, 16);
5578 } else if (s->env->macsr & MACSR_SU) {
5579 if (upper)
5580 tcg_gen_sari_i32(tmp, val, 16);
5581 else
5582 tcg_gen_ext16s_i32(tmp, val);
5583 } else {
5584 if (upper)
5585 tcg_gen_shri_i32(tmp, val, 16);
5586 else
5587 tcg_gen_ext16u_i32(tmp, val);
5589 return tmp;
5592 static void gen_mac_clear_flags(void)
5594 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5595 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5598 DISAS_INSN(mac)
5600 TCGv rx;
5601 TCGv ry;
5602 uint16_t ext;
5603 int acc;
5604 TCGv tmp;
5605 TCGv addr;
5606 TCGv loadval;
5607 int dual;
5608 TCGv saved_flags;
5610 if (!s->done_mac) {
5611 s->mactmp = tcg_temp_new_i64();
5612 s->done_mac = 1;
5615 ext = read_im16(env, s);
5617 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5618 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5619 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5620 disas_undef(env, s, insn);
5621 return;
5623 if (insn & 0x30) {
5624 /* MAC with load. */
5625 tmp = gen_lea(env, s, insn, OS_LONG);
5626 addr = tcg_temp_new();
5627 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5629 * Load the value now to ensure correct exception behavior.
5630 * Perform writeback after reading the MAC inputs.
5632 loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5634 acc ^= 1;
5635 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5636 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5637 } else {
5638 loadval = addr = NULL_QREG;
5639 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5640 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5643 gen_mac_clear_flags();
5644 #if 0
5645 l1 = -1;
5646 /* Disabled because conditional branches clobber temporary vars. */
5647 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5648 /* Skip the multiply if we know we will ignore it. */
5649 l1 = gen_new_label();
5650 tmp = tcg_temp_new();
5651 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5652 gen_op_jmp_nz32(tmp, l1);
5654 #endif
5656 if ((ext & 0x0800) == 0) {
5657 /* Word. */
5658 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5659 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5661 if (s->env->macsr & MACSR_FI) {
5662 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5663 } else {
5664 if (s->env->macsr & MACSR_SU)
5665 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5666 else
5667 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5668 switch ((ext >> 9) & 3) {
5669 case 1:
5670 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5671 break;
5672 case 3:
5673 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5674 break;
5678 if (dual) {
5679 /* Save the overflow flag from the multiply. */
5680 saved_flags = tcg_temp_new();
5681 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5682 } else {
5683 saved_flags = NULL_QREG;
5686 #if 0
5687 /* Disabled because conditional branches clobber temporary vars. */
5688 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5689 /* Skip the accumulate if the value is already saturated. */
5690 l1 = gen_new_label();
5691 tmp = tcg_temp_new();
5692 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5693 gen_op_jmp_nz32(tmp, l1);
5695 #endif
5697 if (insn & 0x100)
5698 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5699 else
5700 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5702 if (s->env->macsr & MACSR_FI)
5703 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5704 else if (s->env->macsr & MACSR_SU)
5705 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5706 else
5707 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5709 #if 0
5710 /* Disabled because conditional branches clobber temporary vars. */
5711 if (l1 != -1)
5712 gen_set_label(l1);
5713 #endif
5715 if (dual) {
5716 /* Dual accumulate variant. */
5717 acc = (ext >> 2) & 3;
5718 /* Restore the overflow flag from the multiplier. */
5719 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5720 #if 0
5721 /* Disabled because conditional branches clobber temporary vars. */
5722 if ((s->env->macsr & MACSR_OMC) != 0) {
5723 /* Skip the accumulate if the value is already saturated. */
5724 l1 = gen_new_label();
5725 tmp = tcg_temp_new();
5726 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5727 gen_op_jmp_nz32(tmp, l1);
5729 #endif
5730 if (ext & 2)
5731 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5732 else
5733 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5734 if (s->env->macsr & MACSR_FI)
5735 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5736 else if (s->env->macsr & MACSR_SU)
5737 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5738 else
5739 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5740 #if 0
5741 /* Disabled because conditional branches clobber temporary vars. */
5742 if (l1 != -1)
5743 gen_set_label(l1);
5744 #endif
5746 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5748 if (insn & 0x30) {
5749 TCGv rw;
5750 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5751 tcg_gen_mov_i32(rw, loadval);
5753 * FIXME: Should address writeback happen with the masked or
5754 * unmasked value?
5756 switch ((insn >> 3) & 7) {
5757 case 3: /* Post-increment. */
5758 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5759 break;
5760 case 4: /* Pre-decrement. */
5761 tcg_gen_mov_i32(AREG(insn, 0), addr);
5763 tcg_temp_free(loadval);
5767 DISAS_INSN(from_mac)
5769 TCGv rx;
5770 TCGv_i64 acc;
5771 int accnum;
5773 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5774 accnum = (insn >> 9) & 3;
5775 acc = MACREG(accnum);
5776 if (s->env->macsr & MACSR_FI) {
5777 gen_helper_get_macf(rx, cpu_env, acc);
5778 } else if ((s->env->macsr & MACSR_OMC) == 0) {
5779 tcg_gen_extrl_i64_i32(rx, acc);
5780 } else if (s->env->macsr & MACSR_SU) {
5781 gen_helper_get_macs(rx, acc);
5782 } else {
5783 gen_helper_get_macu(rx, acc);
5785 if (insn & 0x40) {
5786 tcg_gen_movi_i64(acc, 0);
5787 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5791 DISAS_INSN(move_mac)
5793 /* FIXME: This can be done without a helper. */
5794 int src;
5795 TCGv dest;
5796 src = insn & 3;
5797 dest = tcg_const_i32((insn >> 9) & 3);
5798 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5799 gen_mac_clear_flags();
5800 gen_helper_mac_set_flags(cpu_env, dest);
5803 DISAS_INSN(from_macsr)
5805 TCGv reg;
5807 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5808 tcg_gen_mov_i32(reg, QREG_MACSR);
5811 DISAS_INSN(from_mask)
5813 TCGv reg;
5814 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5815 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5818 DISAS_INSN(from_mext)
5820 TCGv reg;
5821 TCGv acc;
5822 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5823 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5824 if (s->env->macsr & MACSR_FI)
5825 gen_helper_get_mac_extf(reg, cpu_env, acc);
5826 else
5827 gen_helper_get_mac_exti(reg, cpu_env, acc);
5830 DISAS_INSN(macsr_to_ccr)
5832 TCGv tmp = tcg_temp_new();
5833 tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5834 gen_helper_set_sr(cpu_env, tmp);
5835 tcg_temp_free(tmp);
5836 set_cc_op(s, CC_OP_FLAGS);
5839 DISAS_INSN(to_mac)
5841 TCGv_i64 acc;
5842 TCGv val;
5843 int accnum;
5844 accnum = (insn >> 9) & 3;
5845 acc = MACREG(accnum);
5846 SRC_EA(env, val, OS_LONG, 0, NULL);
5847 if (s->env->macsr & MACSR_FI) {
5848 tcg_gen_ext_i32_i64(acc, val);
5849 tcg_gen_shli_i64(acc, acc, 8);
5850 } else if (s->env->macsr & MACSR_SU) {
5851 tcg_gen_ext_i32_i64(acc, val);
5852 } else {
5853 tcg_gen_extu_i32_i64(acc, val);
5855 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5856 gen_mac_clear_flags();
5857 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5860 DISAS_INSN(to_macsr)
5862 TCGv val;
5863 SRC_EA(env, val, OS_LONG, 0, NULL);
5864 gen_helper_set_macsr(cpu_env, val);
5865 gen_exit_tb(s);
5868 DISAS_INSN(to_mask)
5870 TCGv val;
5871 SRC_EA(env, val, OS_LONG, 0, NULL);
5872 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5875 DISAS_INSN(to_mext)
5877 TCGv val;
5878 TCGv acc;
5879 SRC_EA(env, val, OS_LONG, 0, NULL);
5880 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5881 if (s->env->macsr & MACSR_FI)
5882 gen_helper_set_mac_extf(cpu_env, val, acc);
5883 else if (s->env->macsr & MACSR_SU)
5884 gen_helper_set_mac_exts(cpu_env, val, acc);
5885 else
5886 gen_helper_set_mac_extu(cpu_env, val, acc);
5889 static disas_proc opcode_table[65536];
5891 static void
5892 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5894 int i;
5895 int from;
5896 int to;
5898 /* Sanity check. All set bits must be included in the mask. */
5899 if (opcode & ~mask) {
5900 fprintf(stderr,
5901 "qemu internal error: bogus opcode definition %04x/%04x\n",
5902 opcode, mask);
5903 abort();
5906 * This could probably be cleverer. For now just optimize the case where
5907 * the top bits are known.
5909 /* Find the first zero bit in the mask. */
5910 i = 0x8000;
5911 while ((i & mask) != 0)
5912 i >>= 1;
5913 /* Iterate over all combinations of this and lower bits. */
5914 if (i == 0)
5915 i = 1;
5916 else
5917 i <<= 1;
5918 from = opcode & ~(i - 1);
5919 to = from + i;
5920 for (i = from; i < to; i++) {
5921 if ((i & mask) == opcode)
5922 opcode_table[i] = proc;
5927 * Register m68k opcode handlers. Order is important.
5928 * Later insn override earlier ones.
5930 void register_m68k_insns (CPUM68KState *env)
5933 * Build the opcode table only once to avoid
5934 * multithreading issues.
5936 if (opcode_table[0] != NULL) {
5937 return;
5941 * use BASE() for instruction available
5942 * for CF_ISA_A and M68000.
5944 #define BASE(name, opcode, mask) \
5945 register_opcode(disas_##name, 0x##opcode, 0x##mask)
5946 #define INSN(name, opcode, mask, feature) do { \
5947 if (m68k_feature(env, M68K_FEATURE_##feature)) \
5948 BASE(name, opcode, mask); \
5949 } while(0)
5950 BASE(undef, 0000, 0000);
5951 INSN(arith_im, 0080, fff8, CF_ISA_A);
5952 INSN(arith_im, 0000, ff00, M68000);
5953 INSN(chk2, 00c0, f9c0, CHK2);
5954 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
5955 BASE(bitop_reg, 0100, f1c0);
5956 BASE(bitop_reg, 0140, f1c0);
5957 BASE(bitop_reg, 0180, f1c0);
5958 BASE(bitop_reg, 01c0, f1c0);
5959 INSN(movep, 0108, f138, MOVEP);
5960 INSN(arith_im, 0280, fff8, CF_ISA_A);
5961 INSN(arith_im, 0200, ff00, M68000);
5962 INSN(undef, 02c0, ffc0, M68000);
5963 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
5964 INSN(arith_im, 0480, fff8, CF_ISA_A);
5965 INSN(arith_im, 0400, ff00, M68000);
5966 INSN(undef, 04c0, ffc0, M68000);
5967 INSN(arith_im, 0600, ff00, M68000);
5968 INSN(undef, 06c0, ffc0, M68000);
5969 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
5970 INSN(arith_im, 0680, fff8, CF_ISA_A);
5971 INSN(arith_im, 0c00, ff38, CF_ISA_A);
5972 INSN(arith_im, 0c00, ff00, M68000);
5973 BASE(bitop_im, 0800, ffc0);
5974 BASE(bitop_im, 0840, ffc0);
5975 BASE(bitop_im, 0880, ffc0);
5976 BASE(bitop_im, 08c0, ffc0);
5977 INSN(arith_im, 0a80, fff8, CF_ISA_A);
5978 INSN(arith_im, 0a00, ff00, M68000);
5979 #if defined(CONFIG_SOFTMMU)
5980 INSN(moves, 0e00, ff00, M68000);
5981 #endif
5982 INSN(cas, 0ac0, ffc0, CAS);
5983 INSN(cas, 0cc0, ffc0, CAS);
5984 INSN(cas, 0ec0, ffc0, CAS);
5985 INSN(cas2w, 0cfc, ffff, CAS);
5986 INSN(cas2l, 0efc, ffff, CAS);
5987 BASE(move, 1000, f000);
5988 BASE(move, 2000, f000);
5989 BASE(move, 3000, f000);
5990 INSN(chk, 4000, f040, M68000);
5991 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
5992 INSN(negx, 4080, fff8, CF_ISA_A);
5993 INSN(negx, 4000, ff00, M68000);
5994 INSN(undef, 40c0, ffc0, M68000);
5995 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5996 INSN(move_from_sr, 40c0, ffc0, M68000);
5997 BASE(lea, 41c0, f1c0);
5998 BASE(clr, 4200, ff00);
5999 BASE(undef, 42c0, ffc0);
6000 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
6001 INSN(move_from_ccr, 42c0, ffc0, M68000);
6002 INSN(neg, 4480, fff8, CF_ISA_A);
6003 INSN(neg, 4400, ff00, M68000);
6004 INSN(undef, 44c0, ffc0, M68000);
6005 BASE(move_to_ccr, 44c0, ffc0);
6006 INSN(not, 4680, fff8, CF_ISA_A);
6007 INSN(not, 4600, ff00, M68000);
6008 #if defined(CONFIG_SOFTMMU)
6009 BASE(move_to_sr, 46c0, ffc0);
6010 #endif
6011 INSN(nbcd, 4800, ffc0, M68000);
6012 INSN(linkl, 4808, fff8, M68000);
6013 BASE(pea, 4840, ffc0);
6014 BASE(swap, 4840, fff8);
6015 INSN(bkpt, 4848, fff8, BKPT);
6016 INSN(movem, 48d0, fbf8, CF_ISA_A);
6017 INSN(movem, 48e8, fbf8, CF_ISA_A);
6018 INSN(movem, 4880, fb80, M68000);
6019 BASE(ext, 4880, fff8);
6020 BASE(ext, 48c0, fff8);
6021 BASE(ext, 49c0, fff8);
6022 BASE(tst, 4a00, ff00);
6023 INSN(tas, 4ac0, ffc0, CF_ISA_B);
6024 INSN(tas, 4ac0, ffc0, M68000);
6025 #if defined(CONFIG_SOFTMMU)
6026 INSN(halt, 4ac8, ffff, CF_ISA_A);
6027 #endif
6028 INSN(pulse, 4acc, ffff, CF_ISA_A);
6029 BASE(illegal, 4afc, ffff);
6030 INSN(mull, 4c00, ffc0, CF_ISA_A);
6031 INSN(mull, 4c00, ffc0, LONG_MULDIV);
6032 INSN(divl, 4c40, ffc0, CF_ISA_A);
6033 INSN(divl, 4c40, ffc0, LONG_MULDIV);
6034 INSN(sats, 4c80, fff8, CF_ISA_B);
6035 BASE(trap, 4e40, fff0);
6036 BASE(link, 4e50, fff8);
6037 BASE(unlk, 4e58, fff8);
6038 #if defined(CONFIG_SOFTMMU)
6039 INSN(move_to_usp, 4e60, fff8, USP);
6040 INSN(move_from_usp, 4e68, fff8, USP);
6041 INSN(reset, 4e70, ffff, M68000);
6042 BASE(stop, 4e72, ffff);
6043 BASE(rte, 4e73, ffff);
6044 INSN(cf_movec, 4e7b, ffff, CF_ISA_A);
6045 INSN(m68k_movec, 4e7a, fffe, MOVEC);
6046 #endif
6047 BASE(nop, 4e71, ffff);
6048 INSN(rtd, 4e74, ffff, RTD);
6049 BASE(rts, 4e75, ffff);
6050 INSN(rtr, 4e77, ffff, M68000);
6051 BASE(jump, 4e80, ffc0);
6052 BASE(jump, 4ec0, ffc0);
6053 INSN(addsubq, 5000, f080, M68000);
6054 BASE(addsubq, 5080, f0c0);
6055 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
6056 INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */
6057 INSN(dbcc, 50c8, f0f8, M68000);
6058 INSN(tpf, 51f8, fff8, CF_ISA_A);
6060 /* Branch instructions. */
6061 BASE(branch, 6000, f000);
6062 /* Disable long branch instructions, then add back the ones we want. */
6063 BASE(undef, 60ff, f0ff); /* All long branches. */
6064 INSN(branch, 60ff, f0ff, CF_ISA_B);
6065 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
6066 INSN(branch, 60ff, ffff, BRAL);
6067 INSN(branch, 60ff, f0ff, BCCL);
6069 BASE(moveq, 7000, f100);
6070 INSN(mvzs, 7100, f100, CF_ISA_B);
6071 BASE(or, 8000, f000);
6072 BASE(divw, 80c0, f0c0);
6073 INSN(sbcd_reg, 8100, f1f8, M68000);
6074 INSN(sbcd_mem, 8108, f1f8, M68000);
6075 BASE(addsub, 9000, f000);
6076 INSN(undef, 90c0, f0c0, CF_ISA_A);
6077 INSN(subx_reg, 9180, f1f8, CF_ISA_A);
6078 INSN(subx_reg, 9100, f138, M68000);
6079 INSN(subx_mem, 9108, f138, M68000);
6080 INSN(suba, 91c0, f1c0, CF_ISA_A);
6081 INSN(suba, 90c0, f0c0, M68000);
6083 BASE(undef_mac, a000, f000);
6084 INSN(mac, a000, f100, CF_EMAC);
6085 INSN(from_mac, a180, f9b0, CF_EMAC);
6086 INSN(move_mac, a110, f9fc, CF_EMAC);
6087 INSN(from_macsr,a980, f9f0, CF_EMAC);
6088 INSN(from_mask, ad80, fff0, CF_EMAC);
6089 INSN(from_mext, ab80, fbf0, CF_EMAC);
6090 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
6091 INSN(to_mac, a100, f9c0, CF_EMAC);
6092 INSN(to_macsr, a900, ffc0, CF_EMAC);
6093 INSN(to_mext, ab00, fbc0, CF_EMAC);
6094 INSN(to_mask, ad00, ffc0, CF_EMAC);
6096 INSN(mov3q, a140, f1c0, CF_ISA_B);
6097 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
6098 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
6099 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
6100 INSN(cmp, b080, f1c0, CF_ISA_A);
6101 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
6102 INSN(cmp, b000, f100, M68000);
6103 INSN(eor, b100, f100, M68000);
6104 INSN(cmpm, b108, f138, M68000);
6105 INSN(cmpa, b0c0, f0c0, M68000);
6106 INSN(eor, b180, f1c0, CF_ISA_A);
6107 BASE(and, c000, f000);
6108 INSN(exg_dd, c140, f1f8, M68000);
6109 INSN(exg_aa, c148, f1f8, M68000);
6110 INSN(exg_da, c188, f1f8, M68000);
6111 BASE(mulw, c0c0, f0c0);
6112 INSN(abcd_reg, c100, f1f8, M68000);
6113 INSN(abcd_mem, c108, f1f8, M68000);
6114 BASE(addsub, d000, f000);
6115 INSN(undef, d0c0, f0c0, CF_ISA_A);
6116 INSN(addx_reg, d180, f1f8, CF_ISA_A);
6117 INSN(addx_reg, d100, f138, M68000);
6118 INSN(addx_mem, d108, f138, M68000);
6119 INSN(adda, d1c0, f1c0, CF_ISA_A);
6120 INSN(adda, d0c0, f0c0, M68000);
6121 INSN(shift_im, e080, f0f0, CF_ISA_A);
6122 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
6123 INSN(shift8_im, e000, f0f0, M68000);
6124 INSN(shift16_im, e040, f0f0, M68000);
6125 INSN(shift_im, e080, f0f0, M68000);
6126 INSN(shift8_reg, e020, f0f0, M68000);
6127 INSN(shift16_reg, e060, f0f0, M68000);
6128 INSN(shift_reg, e0a0, f0f0, M68000);
6129 INSN(shift_mem, e0c0, fcc0, M68000);
6130 INSN(rotate_im, e090, f0f0, M68000);
6131 INSN(rotate8_im, e010, f0f0, M68000);
6132 INSN(rotate16_im, e050, f0f0, M68000);
6133 INSN(rotate_reg, e0b0, f0f0, M68000);
6134 INSN(rotate8_reg, e030, f0f0, M68000);
6135 INSN(rotate16_reg, e070, f0f0, M68000);
6136 INSN(rotate_mem, e4c0, fcc0, M68000);
6137 INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
6138 INSN(bfext_reg, e9c0, fdf8, BITFIELD);
6139 INSN(bfins_mem, efc0, ffc0, BITFIELD);
6140 INSN(bfins_reg, efc0, fff8, BITFIELD);
6141 INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
6142 INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
6143 INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
6144 INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
6145 INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
6146 INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
6147 INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
6148 INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
6149 INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
6150 INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
6151 BASE(undef_fpu, f000, f000);
6152 INSN(fpu, f200, ffc0, CF_FPU);
6153 INSN(fbcc, f280, ffc0, CF_FPU);
6154 INSN(fpu, f200, ffc0, FPU);
6155 INSN(fscc, f240, ffc0, FPU);
6156 INSN(fbcc, f280, ff80, FPU);
6157 #if defined(CONFIG_SOFTMMU)
6158 INSN(frestore, f340, ffc0, CF_FPU);
6159 INSN(fsave, f300, ffc0, CF_FPU);
6160 INSN(frestore, f340, ffc0, FPU);
6161 INSN(fsave, f300, ffc0, FPU);
6162 INSN(intouch, f340, ffc0, CF_ISA_A);
6163 INSN(cpushl, f428, ff38, CF_ISA_A);
6164 INSN(cpush, f420, ff20, M68040);
6165 INSN(cinv, f400, ff20, M68040);
6166 INSN(pflush, f500, ffe0, M68040);
6167 INSN(ptest, f548, ffd8, M68040);
6168 INSN(wddata, fb00, ff00, CF_ISA_A);
6169 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
6170 #endif
6171 INSN(move16_mem, f600, ffe0, M68040);
6172 INSN(move16_reg, f620, fff8, M68040);
6173 #undef INSN
6176 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6178 DisasContext *dc = container_of(dcbase, DisasContext, base);
6179 CPUM68KState *env = cpu->env_ptr;
6181 dc->env = env;
6182 dc->pc = dc->base.pc_first;
6183 dc->cc_op = CC_OP_DYNAMIC;
6184 dc->cc_op_synced = 1;
6185 dc->done_mac = 0;
6186 dc->writeback_mask = 0;
6187 init_release_array(dc);
6190 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6194 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6196 DisasContext *dc = container_of(dcbase, DisasContext, base);
6197 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6200 static bool m68k_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
6201 const CPUBreakpoint *bp)
6203 DisasContext *dc = container_of(dcbase, DisasContext, base);
6205 gen_exception(dc, dc->base.pc_next, EXCP_DEBUG);
6207 * The address covered by the breakpoint must be included in
6208 * [tb->pc, tb->pc + tb->size) in order to for it to be
6209 * properly cleared -- thus we increment the PC here so that
6210 * the logic setting tb->size below does the right thing.
6212 dc->base.pc_next += 2;
6214 return true;
6217 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6219 DisasContext *dc = container_of(dcbase, DisasContext, base);
6220 CPUM68KState *env = cpu->env_ptr;
6221 uint16_t insn = read_im16(env, dc);
6223 opcode_table[insn](env, dc, insn);
6224 do_writebacks(dc);
6225 do_release(dc);
6227 dc->base.pc_next = dc->pc;
6229 if (dc->base.is_jmp == DISAS_NEXT) {
6231 * Stop translation when the next insn might touch a new page.
6232 * This ensures that prefetch aborts at the right place.
6234 * We cannot determine the size of the next insn without
6235 * completely decoding it. However, the maximum insn size
6236 * is 32 bytes, so end if we do not have that much remaining.
6237 * This may produce several small TBs at the end of each page,
6238 * but they will all be linked with goto_tb.
6240 * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6241 * smaller than MC68020's.
6243 target_ulong start_page_offset
6244 = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6246 if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6247 dc->base.is_jmp = DISAS_TOO_MANY;
6252 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6254 DisasContext *dc = container_of(dcbase, DisasContext, base);
6256 switch (dc->base.is_jmp) {
6257 case DISAS_NORETURN:
6258 break;
6259 case DISAS_TOO_MANY:
6260 update_cc_op(dc);
6261 if (is_singlestepping(dc)) {
6262 tcg_gen_movi_i32(QREG_PC, dc->pc);
6263 gen_raise_exception(EXCP_DEBUG);
6264 } else {
6265 gen_jmp_tb(dc, 0, dc->pc);
6267 break;
6268 case DISAS_JUMP:
6269 /* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */
6270 if (is_singlestepping(dc)) {
6271 gen_raise_exception(EXCP_DEBUG);
6272 } else {
6273 tcg_gen_lookup_and_goto_ptr();
6275 break;
6276 case DISAS_EXIT:
6278 * We updated CC_OP and PC in gen_exit_tb, but also modified
6279 * other state that may require returning to the main loop.
6281 if (is_singlestepping(dc)) {
6282 gen_raise_exception(EXCP_DEBUG);
6283 } else {
6284 tcg_gen_exit_tb(NULL, 0);
6286 break;
6287 default:
6288 g_assert_not_reached();
6292 static void m68k_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
6294 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
6295 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
6298 static const TranslatorOps m68k_tr_ops = {
6299 .init_disas_context = m68k_tr_init_disas_context,
6300 .tb_start = m68k_tr_tb_start,
6301 .insn_start = m68k_tr_insn_start,
6302 .breakpoint_check = m68k_tr_breakpoint_check,
6303 .translate_insn = m68k_tr_translate_insn,
6304 .tb_stop = m68k_tr_tb_stop,
6305 .disas_log = m68k_tr_disas_log,
6308 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
6310 DisasContext dc;
6311 translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
6314 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6316 floatx80 a = { .high = high, .low = low };
6317 union {
6318 float64 f64;
6319 double d;
6320 } u;
6322 u.f64 = floatx80_to_float64(a, &env->fp_status);
6323 return u.d;
6326 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6328 M68kCPU *cpu = M68K_CPU(cs);
6329 CPUM68KState *env = &cpu->env;
6330 int i;
6331 uint16_t sr;
6332 for (i = 0; i < 8; i++) {
6333 qemu_fprintf(f, "D%d = %08x A%d = %08x "
6334 "F%d = %04x %016"PRIx64" (%12g)\n",
6335 i, env->dregs[i], i, env->aregs[i],
6336 i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6337 floatx80_to_double(env, env->fregs[i].l.upper,
6338 env->fregs[i].l.lower));
6340 qemu_fprintf(f, "PC = %08x ", env->pc);
6341 sr = env->sr | cpu_m68k_get_ccr(env);
6342 qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6343 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6344 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6345 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6346 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6347 (sr & CCF_C) ? 'C' : '-');
6348 qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6349 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6350 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6351 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6352 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6353 qemu_fprintf(f, "\n "
6354 "FPCR = %04x ", env->fpcr);
6355 switch (env->fpcr & FPCR_PREC_MASK) {
6356 case FPCR_PREC_X:
6357 qemu_fprintf(f, "X ");
6358 break;
6359 case FPCR_PREC_S:
6360 qemu_fprintf(f, "S ");
6361 break;
6362 case FPCR_PREC_D:
6363 qemu_fprintf(f, "D ");
6364 break;
6366 switch (env->fpcr & FPCR_RND_MASK) {
6367 case FPCR_RND_N:
6368 qemu_fprintf(f, "RN ");
6369 break;
6370 case FPCR_RND_Z:
6371 qemu_fprintf(f, "RZ ");
6372 break;
6373 case FPCR_RND_M:
6374 qemu_fprintf(f, "RM ");
6375 break;
6376 case FPCR_RND_P:
6377 qemu_fprintf(f, "RP ");
6378 break;
6380 qemu_fprintf(f, "\n");
6381 #ifdef CONFIG_SOFTMMU
6382 qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6383 env->current_sp == M68K_SSP ? "->" : " ", env->sp[M68K_SSP],
6384 env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP],
6385 env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]);
6386 qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6387 qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6388 qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6389 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6390 qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6391 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6392 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6393 qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6394 env->mmu.mmusr, env->mmu.ar);
6395 #endif
6398 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
6399 target_ulong *data)
6401 int cc_op = data[1];
6402 env->pc = data[0];
6403 if (cc_op != CC_OP_DYNAMIC) {
6404 env->cc_op = cc_op;