4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 //#define DEBUG_DISPATCH 1
38 /* Fake floating point. */
39 #define tcg_gen_mov_f64 tcg_gen_mov_i64
40 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
41 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
43 #define DEFO32(name, offset) static TCGv QREG_##name;
44 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
45 #define DEFF64(name, offset) static TCGv_i64 QREG_##name;
51 static TCGv_i32 cpu_halted
;
52 static TCGv_i32 cpu_exception_index
;
54 static TCGv_env cpu_env
;
56 static char cpu_reg_names
[3*8*3 + 5*4];
57 static TCGv cpu_dregs
[8];
58 static TCGv cpu_aregs
[8];
59 static TCGv_i64 cpu_fregs
[8];
60 static TCGv_i64 cpu_macc
[4];
62 #define REG(insn, pos) (((insn) >> (pos)) & 7)
63 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
64 #define AREG(insn, pos) cpu_aregs[REG(insn, pos)]
65 #define FREG(insn, pos) cpu_fregs[REG(insn, pos)]
66 #define MACREG(acc) cpu_macc[acc]
67 #define QREG_SP cpu_aregs[7]
69 static TCGv NULL_QREG
;
70 #define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
71 /* Used to distinguish stores from bad addressing modes. */
72 static TCGv store_dummy
;
74 #include "exec/gen-icount.h"
76 void m68k_tcg_init(void)
81 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
82 tcg_ctx
.tcg_env
= cpu_env
;
84 #define DEFO32(name, offset) \
85 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
86 offsetof(CPUM68KState, offset), #name);
87 #define DEFO64(name, offset) \
88 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
89 offsetof(CPUM68KState, offset), #name);
90 #define DEFF64(name, offset) DEFO64(name, offset)
96 cpu_halted
= tcg_global_mem_new_i32(cpu_env
,
97 -offsetof(M68kCPU
, env
) +
98 offsetof(CPUState
, halted
), "HALTED");
99 cpu_exception_index
= tcg_global_mem_new_i32(cpu_env
,
100 -offsetof(M68kCPU
, env
) +
101 offsetof(CPUState
, exception_index
),
105 for (i
= 0; i
< 8; i
++) {
106 sprintf(p
, "D%d", i
);
107 cpu_dregs
[i
] = tcg_global_mem_new(cpu_env
,
108 offsetof(CPUM68KState
, dregs
[i
]), p
);
110 sprintf(p
, "A%d", i
);
111 cpu_aregs
[i
] = tcg_global_mem_new(cpu_env
,
112 offsetof(CPUM68KState
, aregs
[i
]), p
);
114 sprintf(p
, "F%d", i
);
115 cpu_fregs
[i
] = tcg_global_mem_new_i64(cpu_env
,
116 offsetof(CPUM68KState
, fregs
[i
]), p
);
119 for (i
= 0; i
< 4; i
++) {
120 sprintf(p
, "ACC%d", i
);
121 cpu_macc
[i
] = tcg_global_mem_new_i64(cpu_env
,
122 offsetof(CPUM68KState
, macc
[i
]), p
);
126 NULL_QREG
= tcg_global_mem_new(cpu_env
, -4, "NULL");
127 store_dummy
= tcg_global_mem_new(cpu_env
, -8, "NULL");
130 /* internal defines */
131 typedef struct DisasContext
{
133 target_ulong insn_pc
; /* Start of the current instruction. */
136 CCOp cc_op
; /* Current CC operation */
140 struct TranslationBlock
*tb
;
141 int singlestep_enabled
;
146 #define DISAS_JUMP_NEXT 4
148 #if defined(CONFIG_USER_ONLY)
151 #define IS_USER(s) s->user
154 /* XXX: move that elsewhere */
155 /* ??? Fix exceptions. */
156 static void *gen_throws_exception
;
157 #define gen_last_qop NULL
159 typedef void (*disas_proc
)(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
);
161 #ifdef DEBUG_DISPATCH
162 #define DISAS_INSN(name) \
163 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
165 static void disas_##name(CPUM68KState *env, DisasContext *s, \
168 qemu_log("Dispatch " #name "\n"); \
169 real_disas_##name(env, s, insn); \
171 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
174 #define DISAS_INSN(name) \
175 static void disas_##name(CPUM68KState *env, DisasContext *s, \
179 static const uint8_t cc_op_live
[CC_OP_NB
] = {
180 [CC_OP_FLAGS
] = CCF_C
| CCF_V
| CCF_Z
| CCF_N
| CCF_X
,
181 [CC_OP_ADD
] = CCF_X
| CCF_N
| CCF_V
,
182 [CC_OP_SUB
] = CCF_X
| CCF_N
| CCF_V
,
183 [CC_OP_CMP
] = CCF_X
| CCF_N
| CCF_V
,
184 [CC_OP_LOGIC
] = CCF_X
| CCF_N
187 static void set_cc_op(DisasContext
*s
, CCOp op
)
189 CCOp old_op
= s
->cc_op
;
198 /* Discard CC computation that will no longer be used.
199 Note that X and N are never dead. */
200 dead
= cc_op_live
[old_op
] & ~cc_op_live
[op
];
202 tcg_gen_discard_i32(QREG_CC_C
);
205 tcg_gen_discard_i32(QREG_CC_Z
);
208 tcg_gen_discard_i32(QREG_CC_V
);
212 /* Update the CPU env CC_OP state. */
213 static void update_cc_op(DisasContext
*s
)
215 if (!s
->cc_op_synced
) {
217 tcg_gen_movi_i32(QREG_CC_OP
, s
->cc_op
);
221 /* Generate a load from the specified address. Narrow values are
222 sign extended to full register width. */
223 static inline TCGv
gen_load(DisasContext
* s
, int opsize
, TCGv addr
, int sign
)
226 int index
= IS_USER(s
);
227 tmp
= tcg_temp_new_i32();
231 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
233 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
237 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
239 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
243 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
246 g_assert_not_reached();
248 gen_throws_exception
= gen_last_qop
;
252 static inline TCGv_i64
gen_load64(DisasContext
* s
, TCGv addr
)
255 int index
= IS_USER(s
);
256 tmp
= tcg_temp_new_i64();
257 tcg_gen_qemu_ldf64(tmp
, addr
, index
);
258 gen_throws_exception
= gen_last_qop
;
262 /* Generate a store. */
263 static inline void gen_store(DisasContext
*s
, int opsize
, TCGv addr
, TCGv val
)
265 int index
= IS_USER(s
);
268 tcg_gen_qemu_st8(val
, addr
, index
);
271 tcg_gen_qemu_st16(val
, addr
, index
);
275 tcg_gen_qemu_st32(val
, addr
, index
);
278 g_assert_not_reached();
280 gen_throws_exception
= gen_last_qop
;
283 static inline void gen_store64(DisasContext
*s
, TCGv addr
, TCGv_i64 val
)
285 int index
= IS_USER(s
);
286 tcg_gen_qemu_stf64(val
, addr
, index
);
287 gen_throws_exception
= gen_last_qop
;
296 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
297 otherwise generate a store. */
298 static TCGv
gen_ldst(DisasContext
*s
, int opsize
, TCGv addr
, TCGv val
,
301 if (what
== EA_STORE
) {
302 gen_store(s
, opsize
, addr
, val
);
305 return gen_load(s
, opsize
, addr
, what
== EA_LOADS
);
309 /* Read a 16-bit immediate constant */
310 static inline uint16_t read_im16(CPUM68KState
*env
, DisasContext
*s
)
313 im
= cpu_lduw_code(env
, s
->pc
);
318 /* Read an 8-bit immediate constant */
319 static inline uint8_t read_im8(CPUM68KState
*env
, DisasContext
*s
)
321 return read_im16(env
, s
);
324 /* Read a 32-bit immediate constant. */
325 static inline uint32_t read_im32(CPUM68KState
*env
, DisasContext
*s
)
328 im
= read_im16(env
, s
) << 16;
329 im
|= 0xffff & read_im16(env
, s
);
333 /* Calculate and address index. */
334 static TCGv
gen_addr_index(uint16_t ext
, TCGv tmp
)
339 add
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(ext
, 12);
340 if ((ext
& 0x800) == 0) {
341 tcg_gen_ext16s_i32(tmp
, add
);
344 scale
= (ext
>> 9) & 3;
346 tcg_gen_shli_i32(tmp
, add
, scale
);
352 /* Handle a base + index + displacement effective addresss.
353 A NULL_QREG base means pc-relative. */
354 static TCGv
gen_lea_indexed(CPUM68KState
*env
, DisasContext
*s
, TCGv base
)
363 ext
= read_im16(env
, s
);
365 if ((ext
& 0x800) == 0 && !m68k_feature(s
->env
, M68K_FEATURE_WORD_INDEX
))
368 if (m68k_feature(s
->env
, M68K_FEATURE_M68000
) &&
369 !m68k_feature(s
->env
, M68K_FEATURE_SCALED_INDEX
)) {
374 /* full extension word format */
375 if (!m68k_feature(s
->env
, M68K_FEATURE_EXT_FULL
))
378 if ((ext
& 0x30) > 0x10) {
379 /* base displacement */
380 if ((ext
& 0x30) == 0x20) {
381 bd
= (int16_t)read_im16(env
, s
);
383 bd
= read_im32(env
, s
);
388 tmp
= tcg_temp_new();
389 if ((ext
& 0x44) == 0) {
391 add
= gen_addr_index(ext
, tmp
);
395 if ((ext
& 0x80) == 0) {
396 /* base not suppressed */
397 if (IS_NULL_QREG(base
)) {
398 base
= tcg_const_i32(offset
+ bd
);
401 if (!IS_NULL_QREG(add
)) {
402 tcg_gen_add_i32(tmp
, add
, base
);
408 if (!IS_NULL_QREG(add
)) {
410 tcg_gen_addi_i32(tmp
, add
, bd
);
414 add
= tcg_const_i32(bd
);
416 if ((ext
& 3) != 0) {
417 /* memory indirect */
418 base
= gen_load(s
, OS_LONG
, add
, 0);
419 if ((ext
& 0x44) == 4) {
420 add
= gen_addr_index(ext
, tmp
);
421 tcg_gen_add_i32(tmp
, add
, base
);
427 /* outer displacement */
428 if ((ext
& 3) == 2) {
429 od
= (int16_t)read_im16(env
, s
);
431 od
= read_im32(env
, s
);
437 tcg_gen_addi_i32(tmp
, add
, od
);
442 /* brief extension word format */
443 tmp
= tcg_temp_new();
444 add
= gen_addr_index(ext
, tmp
);
445 if (!IS_NULL_QREG(base
)) {
446 tcg_gen_add_i32(tmp
, add
, base
);
448 tcg_gen_addi_i32(tmp
, tmp
, (int8_t)ext
);
450 tcg_gen_addi_i32(tmp
, add
, offset
+ (int8_t)ext
);
457 /* Evaluate all the CC flags. */
459 static void gen_flush_flags(DisasContext
*s
)
468 tcg_gen_mov_i32(QREG_CC_C
, QREG_CC_X
);
469 tcg_gen_mov_i32(QREG_CC_Z
, QREG_CC_N
);
470 /* Compute signed overflow for addition. */
473 tcg_gen_sub_i32(t0
, QREG_CC_N
, QREG_CC_V
);
474 tcg_gen_xor_i32(t1
, QREG_CC_N
, QREG_CC_V
);
475 tcg_gen_xor_i32(QREG_CC_V
, QREG_CC_V
, t0
);
477 tcg_gen_andc_i32(QREG_CC_V
, t1
, QREG_CC_V
);
482 tcg_gen_mov_i32(QREG_CC_C
, QREG_CC_X
);
483 tcg_gen_mov_i32(QREG_CC_Z
, QREG_CC_N
);
484 /* Compute signed overflow for subtraction. */
487 tcg_gen_add_i32(t0
, QREG_CC_N
, QREG_CC_V
);
488 tcg_gen_xor_i32(t1
, QREG_CC_N
, QREG_CC_V
);
489 tcg_gen_xor_i32(QREG_CC_V
, QREG_CC_V
, t0
);
491 tcg_gen_and_i32(QREG_CC_V
, QREG_CC_V
, t1
);
496 tcg_gen_setcond_i32(TCG_COND_LTU
, QREG_CC_C
, QREG_CC_N
, QREG_CC_V
);
497 tcg_gen_sub_i32(QREG_CC_Z
, QREG_CC_N
, QREG_CC_V
);
498 /* Compute signed overflow for subtraction. */
500 tcg_gen_xor_i32(t0
, QREG_CC_Z
, QREG_CC_N
);
501 tcg_gen_xor_i32(QREG_CC_V
, QREG_CC_V
, QREG_CC_N
);
502 tcg_gen_and_i32(QREG_CC_V
, QREG_CC_V
, t0
);
504 tcg_gen_mov_i32(QREG_CC_N
, QREG_CC_Z
);
508 tcg_gen_mov_i32(QREG_CC_Z
, QREG_CC_N
);
509 tcg_gen_movi_i32(QREG_CC_C
, 0);
510 tcg_gen_movi_i32(QREG_CC_V
, 0);
514 gen_helper_flush_flags(cpu_env
, QREG_CC_OP
);
518 t0
= tcg_const_i32(s
->cc_op
);
519 gen_helper_flush_flags(cpu_env
, t0
);
524 /* Note that flush_flags also assigned to env->cc_op. */
525 s
->cc_op
= CC_OP_FLAGS
;
529 /* Sign or zero extend a value. */
531 static inline void gen_ext(TCGv res
, TCGv val
, int opsize
, int sign
)
536 tcg_gen_ext8s_i32(res
, val
);
538 tcg_gen_ext8u_i32(res
, val
);
543 tcg_gen_ext16s_i32(res
, val
);
545 tcg_gen_ext16u_i32(res
, val
);
549 tcg_gen_mov_i32(res
, val
);
552 g_assert_not_reached();
556 static TCGv
gen_extend(TCGv val
, int opsize
, int sign
)
560 if (opsize
== OS_LONG
) {
563 tmp
= tcg_temp_new();
564 gen_ext(tmp
, val
, opsize
, sign
);
570 static void gen_logic_cc(DisasContext
*s
, TCGv val
, int opsize
)
572 gen_ext(QREG_CC_N
, val
, opsize
, 1);
573 set_cc_op(s
, CC_OP_LOGIC
);
576 static void gen_update_cc_add(TCGv dest
, TCGv src
)
578 tcg_gen_mov_i32(QREG_CC_N
, dest
);
579 tcg_gen_mov_i32(QREG_CC_V
, src
);
582 static inline int opsize_bytes(int opsize
)
585 case OS_BYTE
: return 1;
586 case OS_WORD
: return 2;
587 case OS_LONG
: return 4;
588 case OS_SINGLE
: return 4;
589 case OS_DOUBLE
: return 8;
590 case OS_EXTENDED
: return 12;
591 case OS_PACKED
: return 12;
593 g_assert_not_reached();
597 static inline int insn_opsize(int insn
)
599 switch ((insn
>> 6) & 3) {
600 case 0: return OS_BYTE
;
601 case 1: return OS_WORD
;
602 case 2: return OS_LONG
;
604 g_assert_not_reached();
608 /* Assign value to a register. If the width is less than the register width
609 only the low part of the register is set. */
610 static void gen_partset_reg(int opsize
, TCGv reg
, TCGv val
)
615 tcg_gen_andi_i32(reg
, reg
, 0xffffff00);
616 tmp
= tcg_temp_new();
617 tcg_gen_ext8u_i32(tmp
, val
);
618 tcg_gen_or_i32(reg
, reg
, tmp
);
621 tcg_gen_andi_i32(reg
, reg
, 0xffff0000);
622 tmp
= tcg_temp_new();
623 tcg_gen_ext16u_i32(tmp
, val
);
624 tcg_gen_or_i32(reg
, reg
, tmp
);
628 tcg_gen_mov_i32(reg
, val
);
631 g_assert_not_reached();
635 /* Generate code for an "effective address". Does not adjust the base
636 register for autoincrement addressing modes. */
637 static TCGv
gen_lea(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
,
645 switch ((insn
>> 3) & 7) {
646 case 0: /* Data register direct. */
647 case 1: /* Address register direct. */
649 case 2: /* Indirect register */
650 case 3: /* Indirect postincrement. */
651 return AREG(insn
, 0);
652 case 4: /* Indirect predecrememnt. */
654 tmp
= tcg_temp_new();
655 tcg_gen_subi_i32(tmp
, reg
, opsize_bytes(opsize
));
657 case 5: /* Indirect displacement. */
659 tmp
= tcg_temp_new();
660 ext
= read_im16(env
, s
);
661 tcg_gen_addi_i32(tmp
, reg
, (int16_t)ext
);
663 case 6: /* Indirect index + displacement. */
665 return gen_lea_indexed(env
, s
, reg
);
668 case 0: /* Absolute short. */
669 offset
= (int16_t)read_im16(env
, s
);
670 return tcg_const_i32(offset
);
671 case 1: /* Absolute long. */
672 offset
= read_im32(env
, s
);
673 return tcg_const_i32(offset
);
674 case 2: /* pc displacement */
676 offset
+= (int16_t)read_im16(env
, s
);
677 return tcg_const_i32(offset
);
678 case 3: /* pc index+displacement. */
679 return gen_lea_indexed(env
, s
, NULL_QREG
);
680 case 4: /* Immediate. */
685 /* Should never happen. */
689 /* Helper function for gen_ea. Reuse the computed address between the
690 for read/write operands. */
691 static inline TCGv
gen_ea_once(CPUM68KState
*env
, DisasContext
*s
,
692 uint16_t insn
, int opsize
, TCGv val
,
693 TCGv
*addrp
, ea_what what
)
697 if (addrp
&& what
== EA_STORE
) {
700 tmp
= gen_lea(env
, s
, insn
, opsize
);
701 if (IS_NULL_QREG(tmp
))
706 return gen_ldst(s
, opsize
, tmp
, val
, what
);
709 /* Generate code to load/store a value from/into an EA. If VAL > 0 this is
710 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
711 ADDRP is non-null for readwrite operands. */
712 static TCGv
gen_ea(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
,
713 int opsize
, TCGv val
, TCGv
*addrp
, ea_what what
)
719 switch ((insn
>> 3) & 7) {
720 case 0: /* Data register direct. */
722 if (what
== EA_STORE
) {
723 gen_partset_reg(opsize
, reg
, val
);
726 return gen_extend(reg
, opsize
, what
== EA_LOADS
);
728 case 1: /* Address register direct. */
730 if (what
== EA_STORE
) {
731 tcg_gen_mov_i32(reg
, val
);
734 return gen_extend(reg
, opsize
, what
== EA_LOADS
);
736 case 2: /* Indirect register */
738 return gen_ldst(s
, opsize
, reg
, val
, what
);
739 case 3: /* Indirect postincrement. */
741 result
= gen_ldst(s
, opsize
, reg
, val
, what
);
742 /* ??? This is not exception safe. The instruction may still
743 fault after this point. */
744 if (what
== EA_STORE
|| !addrp
)
745 tcg_gen_addi_i32(reg
, reg
, opsize_bytes(opsize
));
747 case 4: /* Indirect predecrememnt. */
750 if (addrp
&& what
== EA_STORE
) {
753 tmp
= gen_lea(env
, s
, insn
, opsize
);
754 if (IS_NULL_QREG(tmp
))
759 result
= gen_ldst(s
, opsize
, tmp
, val
, what
);
760 /* ??? This is not exception safe. The instruction may still
761 fault after this point. */
762 if (what
== EA_STORE
|| !addrp
) {
764 tcg_gen_mov_i32(reg
, tmp
);
768 case 5: /* Indirect displacement. */
769 case 6: /* Indirect index + displacement. */
770 return gen_ea_once(env
, s
, insn
, opsize
, val
, addrp
, what
);
773 case 0: /* Absolute short. */
774 case 1: /* Absolute long. */
775 case 2: /* pc displacement */
776 case 3: /* pc index+displacement. */
777 return gen_ea_once(env
, s
, insn
, opsize
, val
, addrp
, what
);
778 case 4: /* Immediate. */
779 /* Sign extend values for consistency. */
782 if (what
== EA_LOADS
) {
783 offset
= (int8_t)read_im8(env
, s
);
785 offset
= read_im8(env
, s
);
789 if (what
== EA_LOADS
) {
790 offset
= (int16_t)read_im16(env
, s
);
792 offset
= read_im16(env
, s
);
796 offset
= read_im32(env
, s
);
799 g_assert_not_reached();
801 return tcg_const_i32(offset
);
806 /* Should never happen. */
818 static void gen_cc_cond(DisasCompare
*c
, DisasContext
*s
, int cond
)
824 /* The CC_OP_CMP form can handle most normal comparisons directly. */
825 if (op
== CC_OP_CMP
) {
832 tcond
= TCG_COND_LEU
;
836 tcond
= TCG_COND_LTU
;
845 c
->v2
= tcg_const_i32(0);
846 c
->v1
= tmp
= tcg_temp_new();
847 tcg_gen_sub_i32(tmp
, QREG_CC_N
, QREG_CC_V
);
862 c
->v2
= tcg_const_i32(0);
868 tcond
= TCG_COND_NEVER
;
870 case 14: /* GT (!(Z || (N ^ V))) */
871 case 15: /* LE (Z || (N ^ V)) */
872 /* Logic operations clear V, which simplifies LE to (Z || N),
873 and since Z and N are co-located, this becomes a normal
875 if (op
== CC_OP_LOGIC
) {
881 case 12: /* GE (!(N ^ V)) */
882 case 13: /* LT (N ^ V) */
883 /* Logic operations clear V, which simplifies this to N. */
884 if (op
!= CC_OP_LOGIC
) {
888 case 10: /* PL (!N) */
889 case 11: /* MI (N) */
890 /* Several cases represent N normally. */
891 if (op
== CC_OP_ADD
|| op
== CC_OP_SUB
|| op
== CC_OP_LOGIC
) {
897 case 6: /* NE (!Z) */
899 /* Some cases fold Z into N. */
900 if (op
== CC_OP_ADD
|| op
== CC_OP_SUB
|| op
== CC_OP_LOGIC
) {
906 case 4: /* CC (!C) */
908 /* Some cases fold C into X. */
909 if (op
== CC_OP_ADD
|| op
== CC_OP_SUB
) {
915 case 8: /* VC (!V) */
917 /* Logic operations clear V and C. */
918 if (op
== CC_OP_LOGIC
) {
919 tcond
= TCG_COND_NEVER
;
926 /* Otherwise, flush flag state to CC_OP_FLAGS. */
933 /* Invalid, or handled above. */
935 case 2: /* HI (!C && !Z) -> !(C || Z)*/
936 case 3: /* LS (C || Z) */
937 c
->v1
= tmp
= tcg_temp_new();
939 tcg_gen_setcond_i32(TCG_COND_EQ
, tmp
, QREG_CC_Z
, c
->v2
);
940 tcg_gen_or_i32(tmp
, tmp
, QREG_CC_C
);
943 case 4: /* CC (!C) */
948 case 6: /* NE (!Z) */
953 case 8: /* VC (!V) */
958 case 10: /* PL (!N) */
959 case 11: /* MI (N) */
963 case 12: /* GE (!(N ^ V)) */
964 case 13: /* LT (N ^ V) */
965 c
->v1
= tmp
= tcg_temp_new();
967 tcg_gen_xor_i32(tmp
, QREG_CC_N
, QREG_CC_V
);
970 case 14: /* GT (!(Z || (N ^ V))) */
971 case 15: /* LE (Z || (N ^ V)) */
972 c
->v1
= tmp
= tcg_temp_new();
974 tcg_gen_setcond_i32(TCG_COND_EQ
, tmp
, QREG_CC_Z
, c
->v2
);
975 tcg_gen_neg_i32(tmp
, tmp
);
976 tmp2
= tcg_temp_new();
977 tcg_gen_xor_i32(tmp2
, QREG_CC_N
, QREG_CC_V
);
978 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
985 if ((cond
& 1) == 0) {
986 tcond
= tcg_invert_cond(tcond
);
991 static void free_cond(DisasCompare
*c
)
994 tcg_temp_free(c
->v1
);
997 tcg_temp_free(c
->v2
);
1001 static void gen_jmpcc(DisasContext
*s
, int cond
, TCGLabel
*l1
)
1005 gen_cc_cond(&c
, s
, cond
);
1007 tcg_gen_brcond_i32(c
.tcond
, c
.v1
, c
.v2
, l1
);
1017 cond
= (insn
>> 8) & 0xf;
1018 gen_cc_cond(&c
, s
, cond
);
1020 tmp
= tcg_temp_new();
1021 tcg_gen_setcond_i32(c
.tcond
, tmp
, c
.v1
, c
.v2
);
1024 reg
= DREG(insn
, 0);
1025 tcg_gen_neg_i32(tmp
, tmp
);
1026 tcg_gen_deposit_i32(reg
, reg
, tmp
, 0, 8);
1030 /* Force a TB lookup after an instruction that changes the CPU state. */
1031 static void gen_lookup_tb(DisasContext
*s
)
1034 tcg_gen_movi_i32(QREG_PC
, s
->pc
);
1035 s
->is_jmp
= DISAS_UPDATE
;
1038 /* Generate a jump to an immediate address. */
1039 static void gen_jmp_im(DisasContext
*s
, uint32_t dest
)
1042 tcg_gen_movi_i32(QREG_PC
, dest
);
1043 s
->is_jmp
= DISAS_JUMP
;
1046 /* Generate a jump to the address in qreg DEST. */
1047 static void gen_jmp(DisasContext
*s
, TCGv dest
)
1050 tcg_gen_mov_i32(QREG_PC
, dest
);
1051 s
->is_jmp
= DISAS_JUMP
;
1054 static void gen_exception(DisasContext
*s
, uint32_t where
, int nr
)
1057 gen_jmp_im(s
, where
);
1058 gen_helper_raise_exception(cpu_env
, tcg_const_i32(nr
));
1061 static inline void gen_addr_fault(DisasContext
*s
)
1063 gen_exception(s
, s
->insn_pc
, EXCP_ADDRESS
);
1066 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1067 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1068 op_sign ? EA_LOADS : EA_LOADU); \
1069 if (IS_NULL_QREG(result)) { \
1070 gen_addr_fault(s); \
1075 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1076 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
1077 if (IS_NULL_QREG(ea_result)) { \
1078 gen_addr_fault(s); \
1083 static inline bool use_goto_tb(DisasContext
*s
, uint32_t dest
)
1085 #ifndef CONFIG_USER_ONLY
1086 return (s
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
1087 (s
->insn_pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
1093 /* Generate a jump to an immediate address. */
1094 static void gen_jmp_tb(DisasContext
*s
, int n
, uint32_t dest
)
1096 if (unlikely(s
->singlestep_enabled
)) {
1097 gen_exception(s
, dest
, EXCP_DEBUG
);
1098 } else if (use_goto_tb(s
, dest
)) {
1100 tcg_gen_movi_i32(QREG_PC
, dest
);
1101 tcg_gen_exit_tb((uintptr_t)s
->tb
+ n
);
1103 gen_jmp_im(s
, dest
);
1106 s
->is_jmp
= DISAS_TB_JUMP
;
1109 DISAS_INSN(undef_mac
)
1111 gen_exception(s
, s
->pc
- 2, EXCP_LINEA
);
1114 DISAS_INSN(undef_fpu
)
1116 gen_exception(s
, s
->pc
- 2, EXCP_LINEF
);
1121 M68kCPU
*cpu
= m68k_env_get_cpu(env
);
1123 gen_exception(s
, s
->pc
- 2, EXCP_UNSUPPORTED
);
1124 cpu_abort(CPU(cpu
), "Illegal instruction: %04x @ %08x", insn
, s
->pc
- 2);
1134 sign
= (insn
& 0x100) != 0;
1135 reg
= DREG(insn
, 9);
1136 tmp
= tcg_temp_new();
1138 tcg_gen_ext16s_i32(tmp
, reg
);
1140 tcg_gen_ext16u_i32(tmp
, reg
);
1141 SRC_EA(env
, src
, OS_WORD
, sign
, NULL
);
1142 tcg_gen_mul_i32(tmp
, tmp
, src
);
1143 tcg_gen_mov_i32(reg
, tmp
);
1144 gen_logic_cc(s
, tmp
, OS_WORD
);
1154 sign
= (insn
& 0x100) != 0;
1155 reg
= DREG(insn
, 9);
1157 tcg_gen_ext16s_i32(QREG_DIV1
, reg
);
1159 tcg_gen_ext16u_i32(QREG_DIV1
, reg
);
1161 SRC_EA(env
, src
, OS_WORD
, sign
, NULL
);
1162 tcg_gen_mov_i32(QREG_DIV2
, src
);
1164 gen_helper_divs(cpu_env
, tcg_const_i32(1));
1166 gen_helper_divu(cpu_env
, tcg_const_i32(1));
1169 tmp
= tcg_temp_new();
1170 src
= tcg_temp_new();
1171 tcg_gen_ext16u_i32(tmp
, QREG_DIV1
);
1172 tcg_gen_shli_i32(src
, QREG_DIV2
, 16);
1173 tcg_gen_or_i32(reg
, tmp
, src
);
1175 set_cc_op(s
, CC_OP_FLAGS
);
1185 ext
= read_im16(env
, s
);
1187 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
1190 num
= DREG(ext
, 12);
1192 tcg_gen_mov_i32(QREG_DIV1
, num
);
1193 SRC_EA(env
, den
, OS_LONG
, 0, NULL
);
1194 tcg_gen_mov_i32(QREG_DIV2
, den
);
1196 gen_helper_divs(cpu_env
, tcg_const_i32(0));
1198 gen_helper_divu(cpu_env
, tcg_const_i32(0));
1200 if ((ext
& 7) == ((ext
>> 12) & 7)) {
1202 tcg_gen_mov_i32 (reg
, QREG_DIV1
);
1205 tcg_gen_mov_i32 (reg
, QREG_DIV2
);
1207 set_cc_op(s
, CC_OP_FLAGS
);
1219 add
= (insn
& 0x4000) != 0;
1220 reg
= DREG(insn
, 9);
1221 dest
= tcg_temp_new();
1223 SRC_EA(env
, tmp
, OS_LONG
, 0, &addr
);
1227 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1230 tcg_gen_add_i32(dest
, tmp
, src
);
1231 tcg_gen_setcond_i32(TCG_COND_LTU
, QREG_CC_X
, dest
, src
);
1232 set_cc_op(s
, CC_OP_ADD
);
1234 tcg_gen_setcond_i32(TCG_COND_LTU
, QREG_CC_X
, tmp
, src
);
1235 tcg_gen_sub_i32(dest
, tmp
, src
);
1236 set_cc_op(s
, CC_OP_SUB
);
1238 gen_update_cc_add(dest
, src
);
1240 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1242 tcg_gen_mov_i32(reg
, dest
);
1247 /* Reverse the order of the bits in REG. */
1251 reg
= DREG(insn
, 0);
1252 gen_helper_bitrev(reg
, reg
);
1255 DISAS_INSN(bitop_reg
)
1265 if ((insn
& 0x38) != 0)
1269 op
= (insn
>> 6) & 3;
1273 SRC_EA(env
, src1
, opsize
, 0, op
? &addr
: NULL
);
1274 src2
= DREG(insn
, 9);
1275 dest
= tcg_temp_new();
1277 tmp
= tcg_temp_new();
1278 if (opsize
== OS_BYTE
)
1279 tcg_gen_andi_i32(tmp
, src2
, 7);
1281 tcg_gen_andi_i32(tmp
, src2
, 31);
1283 src2
= tcg_const_i32(1);
1284 tcg_gen_shl_i32(src2
, src2
, tmp
);
1287 tcg_gen_and_i32(QREG_CC_Z
, src1
, src2
);
1291 tcg_gen_xor_i32(dest
, src1
, src2
);
1294 tcg_gen_andc_i32(dest
, src1
, src2
);
1297 tcg_gen_or_i32(dest
, src1
, src2
);
1302 tcg_temp_free(src2
);
1304 DEST_EA(env
, insn
, opsize
, dest
, &addr
);
1306 tcg_temp_free(dest
);
1312 reg
= DREG(insn
, 0);
1314 gen_helper_sats(reg
, reg
, QREG_CC_V
);
1315 gen_logic_cc(s
, reg
, OS_LONG
);
1318 static void gen_push(DisasContext
*s
, TCGv val
)
1322 tmp
= tcg_temp_new();
1323 tcg_gen_subi_i32(tmp
, QREG_SP
, 4);
1324 gen_store(s
, OS_LONG
, tmp
, val
);
1325 tcg_gen_mov_i32(QREG_SP
, tmp
);
1337 mask
= read_im16(env
, s
);
1338 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1339 if (IS_NULL_QREG(tmp
)) {
1343 addr
= tcg_temp_new();
1344 tcg_gen_mov_i32(addr
, tmp
);
1345 is_load
= ((insn
& 0x0400) != 0);
1346 for (i
= 0; i
< 16; i
++, mask
>>= 1) {
1353 tmp
= gen_load(s
, OS_LONG
, addr
, 0);
1354 tcg_gen_mov_i32(reg
, tmp
);
1356 gen_store(s
, OS_LONG
, addr
, reg
);
1359 tcg_gen_addi_i32(addr
, addr
, 4);
1364 DISAS_INSN(bitop_im
)
1374 if ((insn
& 0x38) != 0)
1378 op
= (insn
>> 6) & 3;
1380 bitnum
= read_im16(env
, s
);
1381 if (bitnum
& 0xff00) {
1382 disas_undef(env
, s
, insn
);
1388 SRC_EA(env
, src1
, opsize
, 0, op
? &addr
: NULL
);
1390 if (opsize
== OS_BYTE
)
1396 tcg_gen_andi_i32(QREG_CC_Z
, src1
, mask
);
1399 tmp
= tcg_temp_new();
1402 tcg_gen_xori_i32(tmp
, src1
, mask
);
1405 tcg_gen_andi_i32(tmp
, src1
, ~mask
);
1408 tcg_gen_ori_i32(tmp
, src1
, mask
);
1413 DEST_EA(env
, insn
, opsize
, tmp
, &addr
);
1418 DISAS_INSN(arith_im
)
1426 op
= (insn
>> 9) & 7;
1427 SRC_EA(env
, src1
, OS_LONG
, 0, (op
== 6) ? NULL
: &addr
);
1428 im
= read_im32(env
, s
);
1429 dest
= tcg_temp_new();
1432 tcg_gen_ori_i32(dest
, src1
, im
);
1433 gen_logic_cc(s
, dest
, OS_LONG
);
1436 tcg_gen_andi_i32(dest
, src1
, im
);
1437 gen_logic_cc(s
, dest
, OS_LONG
);
1440 tcg_gen_mov_i32(dest
, src1
);
1441 tcg_gen_setcondi_i32(TCG_COND_LTU
, QREG_CC_X
, dest
, im
);
1442 tcg_gen_subi_i32(dest
, dest
, im
);
1443 gen_update_cc_add(dest
, tcg_const_i32(im
));
1444 set_cc_op(s
, CC_OP_SUB
);
1447 tcg_gen_mov_i32(dest
, src1
);
1448 tcg_gen_addi_i32(dest
, dest
, im
);
1449 gen_update_cc_add(dest
, tcg_const_i32(im
));
1450 tcg_gen_setcondi_i32(TCG_COND_LTU
, QREG_CC_X
, dest
, im
);
1451 set_cc_op(s
, CC_OP_ADD
);
1454 tcg_gen_xori_i32(dest
, src1
, im
);
1455 gen_logic_cc(s
, dest
, OS_LONG
);
1458 gen_update_cc_add(src1
, tcg_const_i32(im
));
1459 set_cc_op(s
, CC_OP_CMP
);
1465 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1473 reg
= DREG(insn
, 0);
1474 tcg_gen_bswap32_i32(reg
, reg
);
1484 switch (insn
>> 12) {
1485 case 1: /* move.b */
1488 case 2: /* move.l */
1491 case 3: /* move.w */
1497 SRC_EA(env
, src
, opsize
, 1, NULL
);
1498 op
= (insn
>> 6) & 7;
1501 /* The value will already have been sign extended. */
1502 dest
= AREG(insn
, 9);
1503 tcg_gen_mov_i32(dest
, src
);
1507 dest_ea
= ((insn
>> 9) & 7) | (op
<< 3);
1508 DEST_EA(env
, dest_ea
, opsize
, src
, NULL
);
1509 /* This will be correct because loads sign extend. */
1510 gen_logic_cc(s
, src
, opsize
);
1519 reg
= DREG(insn
, 0);
1520 gen_helper_subx_cc(reg
, cpu_env
, tcg_const_i32(0), reg
);
1528 reg
= AREG(insn
, 9);
1529 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1530 if (IS_NULL_QREG(tmp
)) {
1534 tcg_gen_mov_i32(reg
, tmp
);
1541 opsize
= insn_opsize(insn
);
1542 DEST_EA(env
, insn
, opsize
, tcg_const_i32(0), NULL
);
1543 gen_logic_cc(s
, tcg_const_i32(0), opsize
);
1546 static TCGv
gen_get_ccr(DisasContext
*s
)
1552 dest
= tcg_temp_new();
1553 gen_helper_get_ccr(dest
, cpu_env
);
1557 DISAS_INSN(move_from_ccr
)
1561 ccr
= gen_get_ccr(s
);
1562 DEST_EA(env
, insn
, OS_WORD
, ccr
, NULL
);
1570 reg
= DREG(insn
, 0);
1571 src1
= tcg_temp_new();
1572 tcg_gen_mov_i32(src1
, reg
);
1573 tcg_gen_neg_i32(reg
, src1
);
1574 gen_update_cc_add(reg
, src1
);
1575 tcg_gen_setcondi_i32(TCG_COND_NE
, QREG_CC_X
, src1
, 0);
1576 set_cc_op(s
, CC_OP_SUB
);
1579 static void gen_set_sr_im(DisasContext
*s
, uint16_t val
, int ccr_only
)
1582 tcg_gen_movi_i32(QREG_CC_C
, val
& CCF_C
? 1 : 0);
1583 tcg_gen_movi_i32(QREG_CC_V
, val
& CCF_V
? -1 : 0);
1584 tcg_gen_movi_i32(QREG_CC_Z
, val
& CCF_Z
? 0 : 1);
1585 tcg_gen_movi_i32(QREG_CC_N
, val
& CCF_N
? -1 : 0);
1586 tcg_gen_movi_i32(QREG_CC_X
, val
& CCF_X
? 1 : 0);
1588 gen_helper_set_sr(cpu_env
, tcg_const_i32(val
));
1590 set_cc_op(s
, CC_OP_FLAGS
);
1593 static void gen_set_sr(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
,
1596 if ((insn
& 0x38) == 0) {
1598 gen_helper_set_ccr(cpu_env
, DREG(insn
, 0));
1600 gen_helper_set_sr(cpu_env
, DREG(insn
, 0));
1602 set_cc_op(s
, CC_OP_FLAGS
);
1603 } else if ((insn
& 0x3f) == 0x3c) {
1605 val
= read_im16(env
, s
);
1606 gen_set_sr_im(s
, val
, ccr_only
);
1608 disas_undef(env
, s
, insn
);
1613 DISAS_INSN(move_to_ccr
)
1615 gen_set_sr(env
, s
, insn
, 1);
1622 reg
= DREG(insn
, 0);
1623 tcg_gen_not_i32(reg
, reg
);
1624 gen_logic_cc(s
, reg
, OS_LONG
);
1633 src1
= tcg_temp_new();
1634 src2
= tcg_temp_new();
1635 reg
= DREG(insn
, 0);
1636 tcg_gen_shli_i32(src1
, reg
, 16);
1637 tcg_gen_shri_i32(src2
, reg
, 16);
1638 tcg_gen_or_i32(reg
, src1
, src2
);
1639 gen_logic_cc(s
, reg
, OS_LONG
);
1646 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1647 if (IS_NULL_QREG(tmp
)) {
1660 reg
= DREG(insn
, 0);
1661 op
= (insn
>> 6) & 7;
1662 tmp
= tcg_temp_new();
1664 tcg_gen_ext16s_i32(tmp
, reg
);
1666 tcg_gen_ext8s_i32(tmp
, reg
);
1668 gen_partset_reg(OS_WORD
, reg
, tmp
);
1670 tcg_gen_mov_i32(reg
, tmp
);
1671 gen_logic_cc(s
, tmp
, OS_LONG
);
1679 opsize
= insn_opsize(insn
);
1680 SRC_EA(env
, tmp
, opsize
, 1, NULL
);
1681 gen_logic_cc(s
, tmp
, opsize
);
1686 /* Implemented as a NOP. */
1691 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1694 /* ??? This should be atomic. */
1701 dest
= tcg_temp_new();
1702 SRC_EA(env
, src1
, OS_BYTE
, 1, &addr
);
1703 gen_logic_cc(s
, src1
, OS_BYTE
);
1704 tcg_gen_ori_i32(dest
, src1
, 0x80);
1705 DEST_EA(env
, insn
, OS_BYTE
, dest
, &addr
);
1715 /* The upper 32 bits of the product are discarded, so
1716 muls.l and mulu.l are functionally equivalent. */
1717 ext
= read_im16(env
, s
);
1719 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
1722 reg
= DREG(ext
, 12);
1723 SRC_EA(env
, src1
, OS_LONG
, 0, NULL
);
1724 dest
= tcg_temp_new();
1725 tcg_gen_mul_i32(dest
, src1
, reg
);
1726 tcg_gen_mov_i32(reg
, dest
);
1727 /* Unlike m68k, coldfire always clears the overflow bit. */
1728 gen_logic_cc(s
, dest
, OS_LONG
);
1737 offset
= cpu_ldsw_code(env
, s
->pc
);
1739 reg
= AREG(insn
, 0);
1740 tmp
= tcg_temp_new();
1741 tcg_gen_subi_i32(tmp
, QREG_SP
, 4);
1742 gen_store(s
, OS_LONG
, tmp
, reg
);
1743 if ((insn
& 7) != 7)
1744 tcg_gen_mov_i32(reg
, tmp
);
1745 tcg_gen_addi_i32(QREG_SP
, tmp
, offset
);
1754 src
= tcg_temp_new();
1755 reg
= AREG(insn
, 0);
1756 tcg_gen_mov_i32(src
, reg
);
1757 tmp
= gen_load(s
, OS_LONG
, src
, 0);
1758 tcg_gen_mov_i32(reg
, tmp
);
1759 tcg_gen_addi_i32(QREG_SP
, src
, 4);
1770 tmp
= gen_load(s
, OS_LONG
, QREG_SP
, 0);
1771 tcg_gen_addi_i32(QREG_SP
, QREG_SP
, 4);
1779 /* Load the target address first to ensure correct exception
1781 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1782 if (IS_NULL_QREG(tmp
)) {
1786 if ((insn
& 0x40) == 0) {
1788 gen_push(s
, tcg_const_i32(s
->pc
));
1801 SRC_EA(env
, src1
, OS_LONG
, 0, &addr
);
1802 val
= (insn
>> 9) & 7;
1805 dest
= tcg_temp_new();
1806 tcg_gen_mov_i32(dest
, src1
);
1807 if ((insn
& 0x38) == 0x08) {
1808 /* Don't update condition codes if the destination is an
1809 address register. */
1810 if (insn
& 0x0100) {
1811 tcg_gen_subi_i32(dest
, dest
, val
);
1813 tcg_gen_addi_i32(dest
, dest
, val
);
1816 src2
= tcg_const_i32(val
);
1817 if (insn
& 0x0100) {
1818 tcg_gen_setcond_i32(TCG_COND_LTU
, QREG_CC_X
, dest
, src2
);
1819 tcg_gen_sub_i32(dest
, dest
, src2
);
1820 set_cc_op(s
, CC_OP_SUB
);
1822 tcg_gen_add_i32(dest
, dest
, src2
);
1823 tcg_gen_setcond_i32(TCG_COND_LTU
, QREG_CC_X
, dest
, src2
);
1824 set_cc_op(s
, CC_OP_ADD
);
1826 gen_update_cc_add(dest
, src2
);
1828 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1834 case 2: /* One extension word. */
1837 case 3: /* Two extension words. */
1840 case 4: /* No extension words. */
1843 disas_undef(env
, s
, insn
);
1855 op
= (insn
>> 8) & 0xf;
1856 offset
= (int8_t)insn
;
1858 offset
= (int16_t)read_im16(env
, s
);
1859 } else if (offset
== -1) {
1860 offset
= read_im32(env
, s
);
1864 gen_push(s
, tcg_const_i32(s
->pc
));
1868 l1
= gen_new_label();
1869 gen_jmpcc(s
, ((insn
>> 8) & 0xf) ^ 1, l1
);
1870 gen_jmp_tb(s
, 1, base
+ offset
);
1872 gen_jmp_tb(s
, 0, s
->pc
);
1874 /* Unconditional branch. */
1875 gen_jmp_tb(s
, 0, base
+ offset
);
1884 tcg_gen_movi_i32(DREG(insn
, 9), val
);
1885 gen_logic_cc(s
, tcg_const_i32(val
), OS_LONG
);
1898 SRC_EA(env
, src
, opsize
, (insn
& 0x80) == 0, NULL
);
1899 reg
= DREG(insn
, 9);
1900 tcg_gen_mov_i32(reg
, src
);
1901 gen_logic_cc(s
, src
, opsize
);
1911 reg
= DREG(insn
, 9);
1912 dest
= tcg_temp_new();
1914 SRC_EA(env
, src
, OS_LONG
, 0, &addr
);
1915 tcg_gen_or_i32(dest
, src
, reg
);
1916 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1918 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1919 tcg_gen_or_i32(dest
, src
, reg
);
1920 tcg_gen_mov_i32(reg
, dest
);
1922 gen_logic_cc(s
, dest
, OS_LONG
);
1930 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1931 reg
= AREG(insn
, 9);
1932 tcg_gen_sub_i32(reg
, reg
, src
);
1941 reg
= DREG(insn
, 9);
1942 src
= DREG(insn
, 0);
1943 gen_helper_subx_cc(reg
, cpu_env
, reg
, src
);
1951 val
= (insn
>> 9) & 7;
1954 src
= tcg_const_i32(val
);
1955 gen_logic_cc(s
, src
, OS_LONG
);
1956 DEST_EA(env
, insn
, OS_LONG
, src
, NULL
);
1965 opsize
= insn_opsize(insn
);
1966 SRC_EA(env
, src
, opsize
, -1, NULL
);
1967 reg
= DREG(insn
, 9);
1968 gen_update_cc_add(reg
, src
);
1969 set_cc_op(s
, CC_OP_CMP
);
1983 SRC_EA(env
, src
, opsize
, 1, NULL
);
1984 reg
= AREG(insn
, 9);
1985 gen_update_cc_add(reg
, src
);
1986 set_cc_op(s
, CC_OP_CMP
);
1996 SRC_EA(env
, src
, OS_LONG
, 0, &addr
);
1997 reg
= DREG(insn
, 9);
1998 dest
= tcg_temp_new();
1999 tcg_gen_xor_i32(dest
, src
, reg
);
2000 gen_logic_cc(s
, dest
, OS_LONG
);
2001 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
2011 reg
= DREG(insn
, 9);
2012 dest
= tcg_temp_new();
2014 SRC_EA(env
, src
, OS_LONG
, 0, &addr
);
2015 tcg_gen_and_i32(dest
, src
, reg
);
2016 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
2018 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
2019 tcg_gen_and_i32(dest
, src
, reg
);
2020 tcg_gen_mov_i32(reg
, dest
);
2022 gen_logic_cc(s
, dest
, OS_LONG
);
2030 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
2031 reg
= AREG(insn
, 9);
2032 tcg_gen_add_i32(reg
, reg
, src
);
2041 reg
= DREG(insn
, 9);
2042 src
= DREG(insn
, 0);
2043 gen_helper_addx_cc(reg
, cpu_env
, reg
, src
);
2046 /* TODO: This could be implemented without helper functions. */
2047 DISAS_INSN(shift_im
)
2053 set_cc_op(s
, CC_OP_FLAGS
);
2055 reg
= DREG(insn
, 0);
2056 tmp
= (insn
>> 9) & 7;
2059 shift
= tcg_const_i32(tmp
);
2060 /* No need to flush flags becuse we know we will set C flag. */
2062 gen_helper_shl_cc(reg
, cpu_env
, reg
, shift
);
2065 gen_helper_shr_cc(reg
, cpu_env
, reg
, shift
);
2067 gen_helper_sar_cc(reg
, cpu_env
, reg
, shift
);
2072 DISAS_INSN(shift_reg
)
2077 reg
= DREG(insn
, 0);
2078 shift
= DREG(insn
, 9);
2080 gen_helper_shl_cc(reg
, cpu_env
, reg
, shift
);
2083 gen_helper_shr_cc(reg
, cpu_env
, reg
, shift
);
2085 gen_helper_sar_cc(reg
, cpu_env
, reg
, shift
);
2088 set_cc_op(s
, CC_OP_FLAGS
);
2094 reg
= DREG(insn
, 0);
2095 gen_logic_cc(s
, reg
, OS_LONG
);
2096 gen_helper_ff1(reg
, reg
);
2099 static TCGv
gen_get_sr(DisasContext
*s
)
2104 ccr
= gen_get_ccr(s
);
2105 sr
= tcg_temp_new();
2106 tcg_gen_andi_i32(sr
, QREG_SR
, 0xffe0);
2107 tcg_gen_or_i32(sr
, sr
, ccr
);
2117 ext
= read_im16(env
, s
);
2118 if (ext
!= 0x46FC) {
2119 gen_exception(s
, addr
, EXCP_UNSUPPORTED
);
2122 ext
= read_im16(env
, s
);
2123 if (IS_USER(s
) || (ext
& SR_S
) == 0) {
2124 gen_exception(s
, addr
, EXCP_PRIVILEGE
);
2127 gen_push(s
, gen_get_sr(s
));
2128 gen_set_sr_im(s
, ext
, 0);
2131 DISAS_INSN(move_from_sr
)
2135 if (IS_USER(s
) && !m68k_feature(env
, M68K_FEATURE_M68000
)) {
2136 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2140 DEST_EA(env
, insn
, OS_WORD
, sr
, NULL
);
2143 DISAS_INSN(move_to_sr
)
2146 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2149 gen_set_sr(env
, s
, insn
, 0);
2153 DISAS_INSN(move_from_usp
)
2156 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2159 tcg_gen_ld_i32(AREG(insn
, 0), cpu_env
,
2160 offsetof(CPUM68KState
, sp
[M68K_USP
]));
2163 DISAS_INSN(move_to_usp
)
2166 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2169 tcg_gen_st_i32(AREG(insn
, 0), cpu_env
,
2170 offsetof(CPUM68KState
, sp
[M68K_USP
]));
2175 gen_exception(s
, s
->pc
, EXCP_HALT_INSN
);
2183 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2187 ext
= read_im16(env
, s
);
2189 gen_set_sr_im(s
, ext
, 0);
2190 tcg_gen_movi_i32(cpu_halted
, 1);
2191 gen_exception(s
, s
->pc
, EXCP_HLT
);
2197 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2200 gen_exception(s
, s
->pc
- 2, EXCP_RTE
);
2209 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2213 ext
= read_im16(env
, s
);
2216 reg
= AREG(ext
, 12);
2218 reg
= DREG(ext
, 12);
2220 gen_helper_movec(cpu_env
, tcg_const_i32(ext
& 0xfff), reg
);
2227 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2230 /* ICache fetch. Implement as no-op. */
2236 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2239 /* Cache push/invalidate. Implement as no-op. */
2244 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2249 M68kCPU
*cpu
= m68k_env_get_cpu(env
);
2252 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2255 /* TODO: Implement wdebug. */
2256 cpu_abort(CPU(cpu
), "WDEBUG not implemented");
2261 gen_exception(s
, s
->pc
- 2, EXCP_TRAP0
+ (insn
& 0xf));
2264 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2265 immediately before the next FP instruction is executed. */
2279 ext
= read_im16(env
, s
);
2280 opmode
= ext
& 0x7f;
2281 switch ((ext
>> 13) & 7) {
2286 case 3: /* fmove out */
2288 tmp32
= tcg_temp_new_i32();
2290 /* ??? TODO: Proper behavior on overflow. */
2291 switch ((ext
>> 10) & 7) {
2294 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2298 gen_helper_f64_to_f32(tmp32
, cpu_env
, src
);
2302 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2304 case 5: /* OS_DOUBLE */
2305 tcg_gen_mov_i32(tmp32
, AREG(insn
, 0));
2306 switch ((insn
>> 3) & 7) {
2311 tcg_gen_addi_i32(tmp32
, tmp32
, -8);
2314 offset
= cpu_ldsw_code(env
, s
->pc
);
2316 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2321 gen_store64(s
, tmp32
, src
);
2322 switch ((insn
>> 3) & 7) {
2324 tcg_gen_addi_i32(tmp32
, tmp32
, 8);
2325 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2328 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2331 tcg_temp_free_i32(tmp32
);
2335 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2340 DEST_EA(env
, insn
, opsize
, tmp32
, NULL
);
2341 tcg_temp_free_i32(tmp32
);
2343 case 4: /* fmove to control register. */
2344 switch ((ext
>> 10) & 7) {
2346 /* Not implemented. Ignore writes. */
2351 cpu_abort(NULL
, "Unimplemented: fmove to control %d",
2355 case 5: /* fmove from control register. */
2356 switch ((ext
>> 10) & 7) {
2358 /* Not implemented. Always return zero. */
2359 tmp32
= tcg_const_i32(0);
2364 cpu_abort(NULL
, "Unimplemented: fmove from control %d",
2368 DEST_EA(env
, insn
, OS_LONG
, tmp32
, NULL
);
2370 case 6: /* fmovem */
2376 if ((ext
& 0x1f00) != 0x1000 || (ext
& 0xff) == 0)
2378 tmp32
= gen_lea(env
, s
, insn
, OS_LONG
);
2379 if (IS_NULL_QREG(tmp32
)) {
2383 addr
= tcg_temp_new_i32();
2384 tcg_gen_mov_i32(addr
, tmp32
);
2386 for (i
= 0; i
< 8; i
++) {
2389 if (ext
& (1 << 13)) {
2391 tcg_gen_qemu_stf64(dest
, addr
, IS_USER(s
));
2394 tcg_gen_qemu_ldf64(dest
, addr
, IS_USER(s
));
2396 if (ext
& (mask
- 1))
2397 tcg_gen_addi_i32(addr
, addr
, 8);
2401 tcg_temp_free_i32(addr
);
2405 if (ext
& (1 << 14)) {
2406 /* Source effective address. */
2407 switch ((ext
>> 10) & 7) {
2408 case 0: opsize
= OS_LONG
; break;
2409 case 1: opsize
= OS_SINGLE
; break;
2410 case 4: opsize
= OS_WORD
; break;
2411 case 5: opsize
= OS_DOUBLE
; break;
2412 case 6: opsize
= OS_BYTE
; break;
2416 if (opsize
== OS_DOUBLE
) {
2417 tmp32
= tcg_temp_new_i32();
2418 tcg_gen_mov_i32(tmp32
, AREG(insn
, 0));
2419 switch ((insn
>> 3) & 7) {
2424 tcg_gen_addi_i32(tmp32
, tmp32
, -8);
2427 offset
= cpu_ldsw_code(env
, s
->pc
);
2429 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2432 offset
= cpu_ldsw_code(env
, s
->pc
);
2433 offset
+= s
->pc
- 2;
2435 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2440 src
= gen_load64(s
, tmp32
);
2441 switch ((insn
>> 3) & 7) {
2443 tcg_gen_addi_i32(tmp32
, tmp32
, 8);
2444 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2447 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2450 tcg_temp_free_i32(tmp32
);
2452 SRC_EA(env
, tmp32
, opsize
, 1, NULL
);
2453 src
= tcg_temp_new_i64();
2458 gen_helper_i32_to_f64(src
, cpu_env
, tmp32
);
2461 gen_helper_f32_to_f64(src
, cpu_env
, tmp32
);
2466 /* Source register. */
2467 src
= FREG(ext
, 10);
2469 dest
= FREG(ext
, 7);
2470 res
= tcg_temp_new_i64();
2472 tcg_gen_mov_f64(res
, dest
);
2476 case 0: case 0x40: case 0x44: /* fmove */
2477 tcg_gen_mov_f64(res
, src
);
2480 gen_helper_iround_f64(res
, cpu_env
, src
);
2483 case 3: /* fintrz */
2484 gen_helper_itrunc_f64(res
, cpu_env
, src
);
2487 case 4: case 0x41: case 0x45: /* fsqrt */
2488 gen_helper_sqrt_f64(res
, cpu_env
, src
);
2490 case 0x18: case 0x58: case 0x5c: /* fabs */
2491 gen_helper_abs_f64(res
, src
);
2493 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2494 gen_helper_chs_f64(res
, src
);
2496 case 0x20: case 0x60: case 0x64: /* fdiv */
2497 gen_helper_div_f64(res
, cpu_env
, res
, src
);
2499 case 0x22: case 0x62: case 0x66: /* fadd */
2500 gen_helper_add_f64(res
, cpu_env
, res
, src
);
2502 case 0x23: case 0x63: case 0x67: /* fmul */
2503 gen_helper_mul_f64(res
, cpu_env
, res
, src
);
2505 case 0x28: case 0x68: case 0x6c: /* fsub */
2506 gen_helper_sub_f64(res
, cpu_env
, res
, src
);
2508 case 0x38: /* fcmp */
2509 gen_helper_sub_cmp_f64(res
, cpu_env
, res
, src
);
2513 case 0x3a: /* ftst */
2514 tcg_gen_mov_f64(res
, src
);
2521 if (ext
& (1 << 14)) {
2522 tcg_temp_free_i64(src
);
2525 if (opmode
& 0x40) {
2526 if ((opmode
& 0x4) != 0)
2528 } else if ((s
->fpcr
& M68K_FPCR_PREC
) == 0) {
2533 TCGv tmp
= tcg_temp_new_i32();
2534 gen_helper_f64_to_f32(tmp
, cpu_env
, res
);
2535 gen_helper_f32_to_f64(res
, cpu_env
, tmp
);
2536 tcg_temp_free_i32(tmp
);
2538 tcg_gen_mov_f64(QREG_FP_RESULT
, res
);
2540 tcg_gen_mov_f64(dest
, res
);
2542 tcg_temp_free_i64(res
);
2545 /* FIXME: Is this right for offset addressing modes? */
2547 disas_undef_fpu(env
, s
, insn
);
2558 offset
= cpu_ldsw_code(env
, s
->pc
);
2560 if (insn
& (1 << 6)) {
2561 offset
= (offset
<< 16) | read_im16(env
, s
);
2564 l1
= gen_new_label();
2565 /* TODO: Raise BSUN exception. */
2566 flag
= tcg_temp_new();
2567 gen_helper_compare_f64(flag
, cpu_env
, QREG_FP_RESULT
);
2568 /* Jump to l1 if condition is true. */
2569 switch (insn
& 0xf) {
2572 case 1: /* eq (=0) */
2573 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(0), l1
);
2575 case 2: /* ogt (=1) */
2576 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(1), l1
);
2578 case 3: /* oge (=0 or =1) */
2579 tcg_gen_brcond_i32(TCG_COND_LEU
, flag
, tcg_const_i32(1), l1
);
2581 case 4: /* olt (=-1) */
2582 tcg_gen_brcond_i32(TCG_COND_LT
, flag
, tcg_const_i32(0), l1
);
2584 case 5: /* ole (=-1 or =0) */
2585 tcg_gen_brcond_i32(TCG_COND_LE
, flag
, tcg_const_i32(0), l1
);
2587 case 6: /* ogl (=-1 or =1) */
2588 tcg_gen_andi_i32(flag
, flag
, 1);
2589 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(0), l1
);
2591 case 7: /* or (=2) */
2592 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(2), l1
);
2594 case 8: /* un (<2) */
2595 tcg_gen_brcond_i32(TCG_COND_LT
, flag
, tcg_const_i32(2), l1
);
2597 case 9: /* ueq (=0 or =2) */
2598 tcg_gen_andi_i32(flag
, flag
, 1);
2599 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(0), l1
);
2601 case 10: /* ugt (>0) */
2602 tcg_gen_brcond_i32(TCG_COND_GT
, flag
, tcg_const_i32(0), l1
);
2604 case 11: /* uge (>=0) */
2605 tcg_gen_brcond_i32(TCG_COND_GE
, flag
, tcg_const_i32(0), l1
);
2607 case 12: /* ult (=-1 or =2) */
2608 tcg_gen_brcond_i32(TCG_COND_GEU
, flag
, tcg_const_i32(2), l1
);
2610 case 13: /* ule (!=1) */
2611 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(1), l1
);
2613 case 14: /* ne (!=0) */
2614 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(0), l1
);
2620 gen_jmp_tb(s
, 0, s
->pc
);
2622 gen_jmp_tb(s
, 1, addr
+ offset
);
2625 DISAS_INSN(frestore
)
2627 M68kCPU
*cpu
= m68k_env_get_cpu(env
);
2629 /* TODO: Implement frestore. */
2630 cpu_abort(CPU(cpu
), "FRESTORE not implemented");
2635 M68kCPU
*cpu
= m68k_env_get_cpu(env
);
2637 /* TODO: Implement fsave. */
2638 cpu_abort(CPU(cpu
), "FSAVE not implemented");
2641 static inline TCGv
gen_mac_extract_word(DisasContext
*s
, TCGv val
, int upper
)
2643 TCGv tmp
= tcg_temp_new();
2644 if (s
->env
->macsr
& MACSR_FI
) {
2646 tcg_gen_andi_i32(tmp
, val
, 0xffff0000);
2648 tcg_gen_shli_i32(tmp
, val
, 16);
2649 } else if (s
->env
->macsr
& MACSR_SU
) {
2651 tcg_gen_sari_i32(tmp
, val
, 16);
2653 tcg_gen_ext16s_i32(tmp
, val
);
2656 tcg_gen_shri_i32(tmp
, val
, 16);
2658 tcg_gen_ext16u_i32(tmp
, val
);
2663 static void gen_mac_clear_flags(void)
2665 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
,
2666 ~(MACSR_V
| MACSR_Z
| MACSR_N
| MACSR_EV
));
2682 s
->mactmp
= tcg_temp_new_i64();
2686 ext
= read_im16(env
, s
);
2688 acc
= ((insn
>> 7) & 1) | ((ext
>> 3) & 2);
2689 dual
= ((insn
& 0x30) != 0 && (ext
& 3) != 0);
2690 if (dual
&& !m68k_feature(s
->env
, M68K_FEATURE_CF_EMAC_B
)) {
2691 disas_undef(env
, s
, insn
);
2695 /* MAC with load. */
2696 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
2697 addr
= tcg_temp_new();
2698 tcg_gen_and_i32(addr
, tmp
, QREG_MAC_MASK
);
2699 /* Load the value now to ensure correct exception behavior.
2700 Perform writeback after reading the MAC inputs. */
2701 loadval
= gen_load(s
, OS_LONG
, addr
, 0);
2704 rx
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(insn
, 12);
2705 ry
= (ext
& 8) ? AREG(ext
, 0) : DREG(ext
, 0);
2707 loadval
= addr
= NULL_QREG
;
2708 rx
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2709 ry
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2712 gen_mac_clear_flags();
2715 /* Disabled because conditional branches clobber temporary vars. */
2716 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && !dual
) {
2717 /* Skip the multiply if we know we will ignore it. */
2718 l1
= gen_new_label();
2719 tmp
= tcg_temp_new();
2720 tcg_gen_andi_i32(tmp
, QREG_MACSR
, 1 << (acc
+ 8));
2721 gen_op_jmp_nz32(tmp
, l1
);
2725 if ((ext
& 0x0800) == 0) {
2727 rx
= gen_mac_extract_word(s
, rx
, (ext
& 0x80) != 0);
2728 ry
= gen_mac_extract_word(s
, ry
, (ext
& 0x40) != 0);
2730 if (s
->env
->macsr
& MACSR_FI
) {
2731 gen_helper_macmulf(s
->mactmp
, cpu_env
, rx
, ry
);
2733 if (s
->env
->macsr
& MACSR_SU
)
2734 gen_helper_macmuls(s
->mactmp
, cpu_env
, rx
, ry
);
2736 gen_helper_macmulu(s
->mactmp
, cpu_env
, rx
, ry
);
2737 switch ((ext
>> 9) & 3) {
2739 tcg_gen_shli_i64(s
->mactmp
, s
->mactmp
, 1);
2742 tcg_gen_shri_i64(s
->mactmp
, s
->mactmp
, 1);
2748 /* Save the overflow flag from the multiply. */
2749 saved_flags
= tcg_temp_new();
2750 tcg_gen_mov_i32(saved_flags
, QREG_MACSR
);
2752 saved_flags
= NULL_QREG
;
2756 /* Disabled because conditional branches clobber temporary vars. */
2757 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && dual
) {
2758 /* Skip the accumulate if the value is already saturated. */
2759 l1
= gen_new_label();
2760 tmp
= tcg_temp_new();
2761 gen_op_and32(tmp
, QREG_MACSR
, tcg_const_i32(MACSR_PAV0
<< acc
));
2762 gen_op_jmp_nz32(tmp
, l1
);
2767 tcg_gen_sub_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2769 tcg_gen_add_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2771 if (s
->env
->macsr
& MACSR_FI
)
2772 gen_helper_macsatf(cpu_env
, tcg_const_i32(acc
));
2773 else if (s
->env
->macsr
& MACSR_SU
)
2774 gen_helper_macsats(cpu_env
, tcg_const_i32(acc
));
2776 gen_helper_macsatu(cpu_env
, tcg_const_i32(acc
));
2779 /* Disabled because conditional branches clobber temporary vars. */
2785 /* Dual accumulate variant. */
2786 acc
= (ext
>> 2) & 3;
2787 /* Restore the overflow flag from the multiplier. */
2788 tcg_gen_mov_i32(QREG_MACSR
, saved_flags
);
2790 /* Disabled because conditional branches clobber temporary vars. */
2791 if ((s
->env
->macsr
& MACSR_OMC
) != 0) {
2792 /* Skip the accumulate if the value is already saturated. */
2793 l1
= gen_new_label();
2794 tmp
= tcg_temp_new();
2795 gen_op_and32(tmp
, QREG_MACSR
, tcg_const_i32(MACSR_PAV0
<< acc
));
2796 gen_op_jmp_nz32(tmp
, l1
);
2800 tcg_gen_sub_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2802 tcg_gen_add_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2803 if (s
->env
->macsr
& MACSR_FI
)
2804 gen_helper_macsatf(cpu_env
, tcg_const_i32(acc
));
2805 else if (s
->env
->macsr
& MACSR_SU
)
2806 gen_helper_macsats(cpu_env
, tcg_const_i32(acc
));
2808 gen_helper_macsatu(cpu_env
, tcg_const_i32(acc
));
2810 /* Disabled because conditional branches clobber temporary vars. */
2815 gen_helper_mac_set_flags(cpu_env
, tcg_const_i32(acc
));
2819 rw
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2820 tcg_gen_mov_i32(rw
, loadval
);
2821 /* FIXME: Should address writeback happen with the masked or
2823 switch ((insn
>> 3) & 7) {
2824 case 3: /* Post-increment. */
2825 tcg_gen_addi_i32(AREG(insn
, 0), addr
, 4);
2827 case 4: /* Pre-decrement. */
2828 tcg_gen_mov_i32(AREG(insn
, 0), addr
);
2833 DISAS_INSN(from_mac
)
2839 rx
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2840 accnum
= (insn
>> 9) & 3;
2841 acc
= MACREG(accnum
);
2842 if (s
->env
->macsr
& MACSR_FI
) {
2843 gen_helper_get_macf(rx
, cpu_env
, acc
);
2844 } else if ((s
->env
->macsr
& MACSR_OMC
) == 0) {
2845 tcg_gen_extrl_i64_i32(rx
, acc
);
2846 } else if (s
->env
->macsr
& MACSR_SU
) {
2847 gen_helper_get_macs(rx
, acc
);
2849 gen_helper_get_macu(rx
, acc
);
2852 tcg_gen_movi_i64(acc
, 0);
2853 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
, ~(MACSR_PAV0
<< accnum
));
2857 DISAS_INSN(move_mac
)
2859 /* FIXME: This can be done without a helper. */
2863 dest
= tcg_const_i32((insn
>> 9) & 3);
2864 gen_helper_mac_move(cpu_env
, dest
, tcg_const_i32(src
));
2865 gen_mac_clear_flags();
2866 gen_helper_mac_set_flags(cpu_env
, dest
);
2869 DISAS_INSN(from_macsr
)
2873 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2874 tcg_gen_mov_i32(reg
, QREG_MACSR
);
2877 DISAS_INSN(from_mask
)
2880 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2881 tcg_gen_mov_i32(reg
, QREG_MAC_MASK
);
2884 DISAS_INSN(from_mext
)
2888 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2889 acc
= tcg_const_i32((insn
& 0x400) ? 2 : 0);
2890 if (s
->env
->macsr
& MACSR_FI
)
2891 gen_helper_get_mac_extf(reg
, cpu_env
, acc
);
2893 gen_helper_get_mac_exti(reg
, cpu_env
, acc
);
2896 DISAS_INSN(macsr_to_ccr
)
2898 TCGv tmp
= tcg_temp_new();
2899 tcg_gen_andi_i32(tmp
, QREG_MACSR
, 0xf);
2900 gen_helper_set_sr(cpu_env
, tmp
);
2902 set_cc_op(s
, CC_OP_FLAGS
);
2910 accnum
= (insn
>> 9) & 3;
2911 acc
= MACREG(accnum
);
2912 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2913 if (s
->env
->macsr
& MACSR_FI
) {
2914 tcg_gen_ext_i32_i64(acc
, val
);
2915 tcg_gen_shli_i64(acc
, acc
, 8);
2916 } else if (s
->env
->macsr
& MACSR_SU
) {
2917 tcg_gen_ext_i32_i64(acc
, val
);
2919 tcg_gen_extu_i32_i64(acc
, val
);
2921 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
, ~(MACSR_PAV0
<< accnum
));
2922 gen_mac_clear_flags();
2923 gen_helper_mac_set_flags(cpu_env
, tcg_const_i32(accnum
));
2926 DISAS_INSN(to_macsr
)
2929 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2930 gen_helper_set_macsr(cpu_env
, val
);
2937 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2938 tcg_gen_ori_i32(QREG_MAC_MASK
, val
, 0xffff0000);
2945 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2946 acc
= tcg_const_i32((insn
& 0x400) ? 2 : 0);
2947 if (s
->env
->macsr
& MACSR_FI
)
2948 gen_helper_set_mac_extf(cpu_env
, val
, acc
);
2949 else if (s
->env
->macsr
& MACSR_SU
)
2950 gen_helper_set_mac_exts(cpu_env
, val
, acc
);
2952 gen_helper_set_mac_extu(cpu_env
, val
, acc
);
2955 static disas_proc opcode_table
[65536];
2958 register_opcode (disas_proc proc
, uint16_t opcode
, uint16_t mask
)
2964 /* Sanity check. All set bits must be included in the mask. */
2965 if (opcode
& ~mask
) {
2967 "qemu internal error: bogus opcode definition %04x/%04x\n",
2971 /* This could probably be cleverer. For now just optimize the case where
2972 the top bits are known. */
2973 /* Find the first zero bit in the mask. */
2975 while ((i
& mask
) != 0)
2977 /* Iterate over all combinations of this and lower bits. */
2982 from
= opcode
& ~(i
- 1);
2984 for (i
= from
; i
< to
; i
++) {
2985 if ((i
& mask
) == opcode
)
2986 opcode_table
[i
] = proc
;
2990 /* Register m68k opcode handlers. Order is important.
2991 Later insn override earlier ones. */
2992 void register_m68k_insns (CPUM68KState
*env
)
2994 /* Build the opcode table only once to avoid
2995 multithreading issues. */
2996 if (opcode_table
[0] != NULL
) {
3000 /* use BASE() for instruction available
3001 * for CF_ISA_A and M68000.
3003 #define BASE(name, opcode, mask) \
3004 register_opcode(disas_##name, 0x##opcode, 0x##mask)
3005 #define INSN(name, opcode, mask, feature) do { \
3006 if (m68k_feature(env, M68K_FEATURE_##feature)) \
3007 BASE(name, opcode, mask); \
3009 BASE(undef
, 0000, 0000);
3010 INSN(arith_im
, 0080, fff8
, CF_ISA_A
);
3011 INSN(arith_im
, 0000, ff00
, M68000
);
3012 INSN(undef
, 00c0
, ffc0
, M68000
);
3013 INSN(bitrev
, 00c0
, fff8
, CF_ISA_APLUSC
);
3014 BASE(bitop_reg
, 0100, f1c0
);
3015 BASE(bitop_reg
, 0140, f1c0
);
3016 BASE(bitop_reg
, 0180, f1c0
);
3017 BASE(bitop_reg
, 01c0
, f1c0
);
3018 INSN(arith_im
, 0280, fff8
, CF_ISA_A
);
3019 INSN(arith_im
, 0200, ff00
, M68000
);
3020 INSN(undef
, 02c0
, ffc0
, M68000
);
3021 INSN(byterev
, 02c0
, fff8
, CF_ISA_APLUSC
);
3022 INSN(arith_im
, 0480, fff8
, CF_ISA_A
);
3023 INSN(arith_im
, 0400, ff00
, M68000
);
3024 INSN(undef
, 04c0
, ffc0
, M68000
);
3025 INSN(arith_im
, 0600, ff00
, M68000
);
3026 INSN(undef
, 06c0
, ffc0
, M68000
);
3027 INSN(ff1
, 04c0
, fff8
, CF_ISA_APLUSC
);
3028 INSN(arith_im
, 0680, fff8
, CF_ISA_A
);
3029 INSN(arith_im
, 0c00
, ff38
, CF_ISA_A
);
3030 INSN(arith_im
, 0c00
, ff00
, M68000
);
3031 BASE(bitop_im
, 0800, ffc0
);
3032 BASE(bitop_im
, 0840, ffc0
);
3033 BASE(bitop_im
, 0880, ffc0
);
3034 BASE(bitop_im
, 08c0
, ffc0
);
3035 INSN(arith_im
, 0a80
, fff8
, CF_ISA_A
);
3036 INSN(arith_im
, 0a00
, ff00
, M68000
);
3037 BASE(move
, 1000, f000
);
3038 BASE(move
, 2000, f000
);
3039 BASE(move
, 3000, f000
);
3040 INSN(strldsr
, 40e7
, ffff
, CF_ISA_APLUSC
);
3041 INSN(negx
, 4080, fff8
, CF_ISA_A
);
3042 INSN(move_from_sr
, 40c0
, fff8
, CF_ISA_A
);
3043 INSN(move_from_sr
, 40c0
, ffc0
, M68000
);
3044 BASE(lea
, 41c0
, f1c0
);
3045 BASE(clr
, 4200, ff00
);
3046 BASE(undef
, 42c0
, ffc0
);
3047 INSN(move_from_ccr
, 42c0
, fff8
, CF_ISA_A
);
3048 INSN(move_from_ccr
, 42c0
, ffc0
, M68000
);
3049 INSN(neg
, 4480, fff8
, CF_ISA_A
);
3050 INSN(neg
, 4400, ff00
, M68000
);
3051 INSN(undef
, 44c0
, ffc0
, M68000
);
3052 BASE(move_to_ccr
, 44c0
, ffc0
);
3053 INSN(not, 4680, fff8
, CF_ISA_A
);
3054 INSN(not, 4600, ff00
, M68000
);
3055 INSN(undef
, 46c0
, ffc0
, M68000
);
3056 INSN(move_to_sr
, 46c0
, ffc0
, CF_ISA_A
);
3057 BASE(pea
, 4840, ffc0
);
3058 BASE(swap
, 4840, fff8
);
3059 BASE(movem
, 48c0
, fbc0
);
3060 BASE(ext
, 4880, fff8
);
3061 BASE(ext
, 48c0
, fff8
);
3062 BASE(ext
, 49c0
, fff8
);
3063 BASE(tst
, 4a00
, ff00
);
3064 INSN(tas
, 4ac0
, ffc0
, CF_ISA_B
);
3065 INSN(tas
, 4ac0
, ffc0
, M68000
);
3066 INSN(halt
, 4ac8
, ffff
, CF_ISA_A
);
3067 INSN(pulse
, 4acc
, ffff
, CF_ISA_A
);
3068 BASE(illegal
, 4afc
, ffff
);
3069 INSN(mull
, 4c00
, ffc0
, CF_ISA_A
);
3070 INSN(mull
, 4c00
, ffc0
, LONG_MULDIV
);
3071 INSN(divl
, 4c40
, ffc0
, CF_ISA_A
);
3072 INSN(divl
, 4c40
, ffc0
, LONG_MULDIV
);
3073 INSN(sats
, 4c80
, fff8
, CF_ISA_B
);
3074 BASE(trap
, 4e40
, fff0
);
3075 BASE(link
, 4e50
, fff8
);
3076 BASE(unlk
, 4e58
, fff8
);
3077 INSN(move_to_usp
, 4e60
, fff8
, USP
);
3078 INSN(move_from_usp
, 4e68
, fff8
, USP
);
3079 BASE(nop
, 4e71
, ffff
);
3080 BASE(stop
, 4e72
, ffff
);
3081 BASE(rte
, 4e73
, ffff
);
3082 BASE(rts
, 4e75
, ffff
);
3083 INSN(movec
, 4e7b
, ffff
, CF_ISA_A
);
3084 BASE(jump
, 4e80
, ffc0
);
3085 INSN(jump
, 4ec0
, ffc0
, CF_ISA_A
);
3086 INSN(addsubq
, 5180, f1c0
, CF_ISA_A
);
3087 INSN(jump
, 4ec0
, ffc0
, M68000
);
3088 INSN(addsubq
, 5000, f080
, M68000
);
3089 INSN(addsubq
, 5080, f0c0
, M68000
);
3090 INSN(scc
, 50c0
, f0f8
, CF_ISA_A
);
3091 INSN(addsubq
, 5080, f1c0
, CF_ISA_A
);
3092 INSN(tpf
, 51f8
, fff8
, CF_ISA_A
);
3094 /* Branch instructions. */
3095 BASE(branch
, 6000, f000
);
3096 /* Disable long branch instructions, then add back the ones we want. */
3097 BASE(undef
, 60ff
, f0ff
); /* All long branches. */
3098 INSN(branch
, 60ff
, f0ff
, CF_ISA_B
);
3099 INSN(undef
, 60ff
, ffff
, CF_ISA_B
); /* bra.l */
3100 INSN(branch
, 60ff
, ffff
, BRAL
);
3101 INSN(branch
, 60ff
, f0ff
, BCCL
);
3103 BASE(moveq
, 7000, f100
);
3104 INSN(mvzs
, 7100, f100
, CF_ISA_B
);
3105 BASE(or, 8000, f000
);
3106 BASE(divw
, 80c0
, f0c0
);
3107 BASE(addsub
, 9000, f000
);
3108 INSN(subx
, 9180, f1f8
, CF_ISA_A
);
3109 INSN(suba
, 91c0
, f1c0
, CF_ISA_A
);
3111 BASE(undef_mac
, a000
, f000
);
3112 INSN(mac
, a000
, f100
, CF_EMAC
);
3113 INSN(from_mac
, a180
, f9b0
, CF_EMAC
);
3114 INSN(move_mac
, a110
, f9fc
, CF_EMAC
);
3115 INSN(from_macsr
,a980
, f9f0
, CF_EMAC
);
3116 INSN(from_mask
, ad80
, fff0
, CF_EMAC
);
3117 INSN(from_mext
, ab80
, fbf0
, CF_EMAC
);
3118 INSN(macsr_to_ccr
, a9c0
, ffff
, CF_EMAC
);
3119 INSN(to_mac
, a100
, f9c0
, CF_EMAC
);
3120 INSN(to_macsr
, a900
, ffc0
, CF_EMAC
);
3121 INSN(to_mext
, ab00
, fbc0
, CF_EMAC
);
3122 INSN(to_mask
, ad00
, ffc0
, CF_EMAC
);
3124 INSN(mov3q
, a140
, f1c0
, CF_ISA_B
);
3125 INSN(cmp
, b000
, f1c0
, CF_ISA_B
); /* cmp.b */
3126 INSN(cmp
, b040
, f1c0
, CF_ISA_B
); /* cmp.w */
3127 INSN(cmpa
, b0c0
, f1c0
, CF_ISA_B
); /* cmpa.w */
3128 INSN(cmp
, b080
, f1c0
, CF_ISA_A
);
3129 INSN(cmpa
, b1c0
, f1c0
, CF_ISA_A
);
3130 INSN(cmp
, b000
, f100
, M68000
);
3131 INSN(eor
, b100
, f100
, M68000
);
3132 INSN(cmpa
, b0c0
, f0c0
, M68000
);
3133 INSN(eor
, b180
, f1c0
, CF_ISA_A
);
3134 BASE(and, c000
, f000
);
3135 BASE(mulw
, c0c0
, f0c0
);
3136 BASE(addsub
, d000
, f000
);
3137 INSN(addx
, d180
, f1f8
, CF_ISA_A
);
3138 INSN(adda
, d1c0
, f1c0
, CF_ISA_A
);
3139 INSN(adda
, d0c0
, f0c0
, M68000
);
3140 INSN(shift_im
, e080
, f0f0
, CF_ISA_A
);
3141 INSN(shift_reg
, e0a0
, f0f0
, CF_ISA_A
);
3142 INSN(undef_fpu
, f000
, f000
, CF_ISA_A
);
3143 INSN(fpu
, f200
, ffc0
, CF_FPU
);
3144 INSN(fbcc
, f280
, ffc0
, CF_FPU
);
3145 INSN(frestore
, f340
, ffc0
, CF_FPU
);
3146 INSN(fsave
, f340
, ffc0
, CF_FPU
);
3147 INSN(intouch
, f340
, ffc0
, CF_ISA_A
);
3148 INSN(cpushl
, f428
, ff38
, CF_ISA_A
);
3149 INSN(wddata
, fb00
, ff00
, CF_ISA_A
);
3150 INSN(wdebug
, fbc0
, ffc0
, CF_ISA_A
);
3154 /* ??? Some of this implementation is not exception safe. We should always
3155 write back the result to memory before setting the condition codes. */
3156 static void disas_m68k_insn(CPUM68KState
* env
, DisasContext
*s
)
3160 insn
= read_im16(env
, s
);
3162 opcode_table
[insn
](env
, s
, insn
);
3165 /* generate intermediate code for basic block 'tb'. */
3166 void gen_intermediate_code(CPUM68KState
*env
, TranslationBlock
*tb
)
3168 M68kCPU
*cpu
= m68k_env_get_cpu(env
);
3169 CPUState
*cs
= CPU(cpu
);
3170 DisasContext dc1
, *dc
= &dc1
;
3171 target_ulong pc_start
;
3176 /* generate intermediate code */
3182 dc
->is_jmp
= DISAS_NEXT
;
3184 dc
->cc_op
= CC_OP_DYNAMIC
;
3185 dc
->cc_op_synced
= 1;
3186 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
3187 dc
->fpcr
= env
->fpcr
;
3188 dc
->user
= (env
->sr
& SR_S
) == 0;
3191 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3192 if (max_insns
== 0) {
3193 max_insns
= CF_COUNT_MASK
;
3195 if (max_insns
> TCG_MAX_INSNS
) {
3196 max_insns
= TCG_MAX_INSNS
;
3201 pc_offset
= dc
->pc
- pc_start
;
3202 gen_throws_exception
= NULL
;
3203 tcg_gen_insn_start(dc
->pc
, dc
->cc_op
);
3206 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
3207 gen_exception(dc
, dc
->pc
, EXCP_DEBUG
);
3208 dc
->is_jmp
= DISAS_JUMP
;
3209 /* The address covered by the breakpoint must be included in
3210 [tb->pc, tb->pc + tb->size) in order to for it to be
3211 properly cleared -- thus we increment the PC here so that
3212 the logic setting tb->size below does the right thing. */
3217 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
3221 dc
->insn_pc
= dc
->pc
;
3222 disas_m68k_insn(env
, dc
);
3223 } while (!dc
->is_jmp
&& !tcg_op_buf_full() &&
3224 !cs
->singlestep_enabled
&&
3226 (pc_offset
) < (TARGET_PAGE_SIZE
- 32) &&
3227 num_insns
< max_insns
);
3229 if (tb
->cflags
& CF_LAST_IO
)
3231 if (unlikely(cs
->singlestep_enabled
)) {
3232 /* Make sure the pc is updated, and raise a debug exception. */
3235 tcg_gen_movi_i32(QREG_PC
, dc
->pc
);
3237 gen_helper_raise_exception(cpu_env
, tcg_const_i32(EXCP_DEBUG
));
3239 switch(dc
->is_jmp
) {
3242 gen_jmp_tb(dc
, 0, dc
->pc
);
3248 /* indicate that the hash table must be used to find the next TB */
3252 /* nothing more to generate */
3256 gen_tb_end(tb
, num_insns
);
3259 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
3260 && qemu_log_in_addr_range(pc_start
)) {
3261 qemu_log("----------------\n");
3262 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3263 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
, 0);
3267 tb
->size
= dc
->pc
- pc_start
;
3268 tb
->icount
= num_insns
;
3271 void m68k_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
3274 M68kCPU
*cpu
= M68K_CPU(cs
);
3275 CPUM68KState
*env
= &cpu
->env
;
3279 for (i
= 0; i
< 8; i
++)
3281 u
.d
= env
->fregs
[i
];
3282 cpu_fprintf(f
, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3283 i
, env
->dregs
[i
], i
, env
->aregs
[i
],
3284 i
, u
.l
.upper
, u
.l
.lower
, *(double *)&u
.d
);
3286 cpu_fprintf (f
, "PC = %08x ", env
->pc
);
3287 sr
= env
->sr
| cpu_m68k_get_ccr(env
);
3288 cpu_fprintf(f
, "SR = %04x %c%c%c%c%c ", sr
, (sr
& CCF_X
) ? 'X' : '-',
3289 (sr
& CCF_N
) ? 'N' : '-', (sr
& CCF_Z
) ? 'Z' : '-',
3290 (sr
& CCF_V
) ? 'V' : '-', (sr
& CCF_C
) ? 'C' : '-');
3291 cpu_fprintf (f
, "FPRESULT = %12g\n", *(double *)&env
->fp_result
);
3294 void restore_state_to_opc(CPUM68KState
*env
, TranslationBlock
*tb
,
3297 int cc_op
= data
[1];
3299 if (cc_op
!= CC_OP_DYNAMIC
) {