4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 //#define DEBUG_DISPATCH 1
38 /* Fake floating point. */
39 #define tcg_gen_mov_f64 tcg_gen_mov_i64
40 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
41 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
43 #define DEFO32(name, offset) static TCGv QREG_##name;
44 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
45 #define DEFF64(name, offset) static TCGv_i64 QREG_##name;
51 static TCGv_i32 cpu_halted
;
52 static TCGv_i32 cpu_exception_index
;
54 static TCGv_env cpu_env
;
56 static char cpu_reg_names
[3*8*3 + 5*4];
57 static TCGv cpu_dregs
[8];
58 static TCGv cpu_aregs
[8];
59 static TCGv_i64 cpu_fregs
[8];
60 static TCGv_i64 cpu_macc
[4];
62 #define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
63 #define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
64 #define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
65 #define MACREG(acc) cpu_macc[acc]
66 #define QREG_SP cpu_aregs[7]
68 static TCGv NULL_QREG
;
69 #define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
70 /* Used to distinguish stores from bad addressing modes. */
71 static TCGv store_dummy
;
73 #include "exec/gen-icount.h"
75 void m68k_tcg_init(void)
80 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
81 tcg_ctx
.tcg_env
= cpu_env
;
83 #define DEFO32(name, offset) \
84 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
85 offsetof(CPUM68KState, offset), #name);
86 #define DEFO64(name, offset) \
87 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
88 offsetof(CPUM68KState, offset), #name);
89 #define DEFF64(name, offset) DEFO64(name, offset)
95 cpu_halted
= tcg_global_mem_new_i32(cpu_env
,
96 -offsetof(M68kCPU
, env
) +
97 offsetof(CPUState
, halted
), "HALTED");
98 cpu_exception_index
= tcg_global_mem_new_i32(cpu_env
,
99 -offsetof(M68kCPU
, env
) +
100 offsetof(CPUState
, exception_index
),
104 for (i
= 0; i
< 8; i
++) {
105 sprintf(p
, "D%d", i
);
106 cpu_dregs
[i
] = tcg_global_mem_new(cpu_env
,
107 offsetof(CPUM68KState
, dregs
[i
]), p
);
109 sprintf(p
, "A%d", i
);
110 cpu_aregs
[i
] = tcg_global_mem_new(cpu_env
,
111 offsetof(CPUM68KState
, aregs
[i
]), p
);
113 sprintf(p
, "F%d", i
);
114 cpu_fregs
[i
] = tcg_global_mem_new_i64(cpu_env
,
115 offsetof(CPUM68KState
, fregs
[i
]), p
);
118 for (i
= 0; i
< 4; i
++) {
119 sprintf(p
, "ACC%d", i
);
120 cpu_macc
[i
] = tcg_global_mem_new_i64(cpu_env
,
121 offsetof(CPUM68KState
, macc
[i
]), p
);
125 NULL_QREG
= tcg_global_mem_new(cpu_env
, -4, "NULL");
126 store_dummy
= tcg_global_mem_new(cpu_env
, -8, "NULL");
129 /* internal defines */
130 typedef struct DisasContext
{
132 target_ulong insn_pc
; /* Start of the current instruction. */
138 struct TranslationBlock
*tb
;
139 int singlestep_enabled
;
144 #define DISAS_JUMP_NEXT 4
146 #if defined(CONFIG_USER_ONLY)
149 #define IS_USER(s) s->user
152 /* XXX: move that elsewhere */
153 /* ??? Fix exceptions. */
154 static void *gen_throws_exception
;
155 #define gen_last_qop NULL
163 typedef void (*disas_proc
)(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
);
165 #ifdef DEBUG_DISPATCH
166 #define DISAS_INSN(name) \
167 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
169 static void disas_##name(CPUM68KState *env, DisasContext *s, \
172 qemu_log("Dispatch " #name "\n"); \
173 real_disas_##name(env, s, insn); \
175 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
178 #define DISAS_INSN(name) \
179 static void disas_##name(CPUM68KState *env, DisasContext *s, \
183 /* Generate a load from the specified address. Narrow values are
184 sign extended to full register width. */
185 static inline TCGv
gen_load(DisasContext
* s
, int opsize
, TCGv addr
, int sign
)
188 int index
= IS_USER(s
);
189 tmp
= tcg_temp_new_i32();
193 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
195 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
199 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
201 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
205 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
208 g_assert_not_reached();
210 gen_throws_exception
= gen_last_qop
;
214 static inline TCGv_i64
gen_load64(DisasContext
* s
, TCGv addr
)
217 int index
= IS_USER(s
);
218 tmp
= tcg_temp_new_i64();
219 tcg_gen_qemu_ldf64(tmp
, addr
, index
);
220 gen_throws_exception
= gen_last_qop
;
224 /* Generate a store. */
225 static inline void gen_store(DisasContext
*s
, int opsize
, TCGv addr
, TCGv val
)
227 int index
= IS_USER(s
);
230 tcg_gen_qemu_st8(val
, addr
, index
);
233 tcg_gen_qemu_st16(val
, addr
, index
);
237 tcg_gen_qemu_st32(val
, addr
, index
);
240 g_assert_not_reached();
242 gen_throws_exception
= gen_last_qop
;
245 static inline void gen_store64(DisasContext
*s
, TCGv addr
, TCGv_i64 val
)
247 int index
= IS_USER(s
);
248 tcg_gen_qemu_stf64(val
, addr
, index
);
249 gen_throws_exception
= gen_last_qop
;
258 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
259 otherwise generate a store. */
260 static TCGv
gen_ldst(DisasContext
*s
, int opsize
, TCGv addr
, TCGv val
,
263 if (what
== EA_STORE
) {
264 gen_store(s
, opsize
, addr
, val
);
267 return gen_load(s
, opsize
, addr
, what
== EA_LOADS
);
271 /* Read a 16-bit immediate constant */
272 static inline uint16_t read_im16(CPUM68KState
*env
, DisasContext
*s
)
275 im
= cpu_lduw_code(env
, s
->pc
);
280 /* Read an 8-bit immediate constant */
281 static inline uint8_t read_im8(CPUM68KState
*env
, DisasContext
*s
)
283 return read_im16(env
, s
);
286 /* Read a 32-bit immediate constant. */
287 static inline uint32_t read_im32(CPUM68KState
*env
, DisasContext
*s
)
290 im
= read_im16(env
, s
) << 16;
291 im
|= 0xffff & read_im16(env
, s
);
295 /* Calculate and address index. */
296 static TCGv
gen_addr_index(uint16_t ext
, TCGv tmp
)
301 add
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(ext
, 12);
302 if ((ext
& 0x800) == 0) {
303 tcg_gen_ext16s_i32(tmp
, add
);
306 scale
= (ext
>> 9) & 3;
308 tcg_gen_shli_i32(tmp
, add
, scale
);
314 /* Handle a base + index + displacement effective addresss.
315 A NULL_QREG base means pc-relative. */
316 static TCGv
gen_lea_indexed(CPUM68KState
*env
, DisasContext
*s
, TCGv base
)
325 ext
= read_im16(env
, s
);
327 if ((ext
& 0x800) == 0 && !m68k_feature(s
->env
, M68K_FEATURE_WORD_INDEX
))
330 if (m68k_feature(s
->env
, M68K_FEATURE_M68000
) &&
331 !m68k_feature(s
->env
, M68K_FEATURE_SCALED_INDEX
)) {
336 /* full extension word format */
337 if (!m68k_feature(s
->env
, M68K_FEATURE_EXT_FULL
))
340 if ((ext
& 0x30) > 0x10) {
341 /* base displacement */
342 if ((ext
& 0x30) == 0x20) {
343 bd
= (int16_t)read_im16(env
, s
);
345 bd
= read_im32(env
, s
);
350 tmp
= tcg_temp_new();
351 if ((ext
& 0x44) == 0) {
353 add
= gen_addr_index(ext
, tmp
);
357 if ((ext
& 0x80) == 0) {
358 /* base not suppressed */
359 if (IS_NULL_QREG(base
)) {
360 base
= tcg_const_i32(offset
+ bd
);
363 if (!IS_NULL_QREG(add
)) {
364 tcg_gen_add_i32(tmp
, add
, base
);
370 if (!IS_NULL_QREG(add
)) {
372 tcg_gen_addi_i32(tmp
, add
, bd
);
376 add
= tcg_const_i32(bd
);
378 if ((ext
& 3) != 0) {
379 /* memory indirect */
380 base
= gen_load(s
, OS_LONG
, add
, 0);
381 if ((ext
& 0x44) == 4) {
382 add
= gen_addr_index(ext
, tmp
);
383 tcg_gen_add_i32(tmp
, add
, base
);
389 /* outer displacement */
390 if ((ext
& 3) == 2) {
391 od
= (int16_t)read_im16(env
, s
);
393 od
= read_im32(env
, s
);
399 tcg_gen_addi_i32(tmp
, add
, od
);
404 /* brief extension word format */
405 tmp
= tcg_temp_new();
406 add
= gen_addr_index(ext
, tmp
);
407 if (!IS_NULL_QREG(base
)) {
408 tcg_gen_add_i32(tmp
, add
, base
);
410 tcg_gen_addi_i32(tmp
, tmp
, (int8_t)ext
);
412 tcg_gen_addi_i32(tmp
, add
, offset
+ (int8_t)ext
);
419 /* Update the CPU env CC_OP state. */
420 static inline void gen_flush_cc_op(DisasContext
*s
)
422 if (s
->cc_op
!= CC_OP_DYNAMIC
)
423 tcg_gen_movi_i32(QREG_CC_OP
, s
->cc_op
);
426 /* Evaluate all the CC flags. */
427 static inline void gen_flush_flags(DisasContext
*s
)
429 if (s
->cc_op
== CC_OP_FLAGS
)
432 gen_helper_flush_flags(cpu_env
, QREG_CC_OP
);
433 s
->cc_op
= CC_OP_FLAGS
;
436 static void gen_logic_cc(DisasContext
*s
, TCGv val
)
438 tcg_gen_mov_i32(QREG_CC_DEST
, val
);
439 s
->cc_op
= CC_OP_LOGIC
;
442 static void gen_update_cc_add(TCGv dest
, TCGv src
)
444 tcg_gen_mov_i32(QREG_CC_DEST
, dest
);
445 tcg_gen_mov_i32(QREG_CC_SRC
, src
);
448 static inline int opsize_bytes(int opsize
)
451 case OS_BYTE
: return 1;
452 case OS_WORD
: return 2;
453 case OS_LONG
: return 4;
454 case OS_SINGLE
: return 4;
455 case OS_DOUBLE
: return 8;
457 g_assert_not_reached();
461 /* Assign value to a register. If the width is less than the register width
462 only the low part of the register is set. */
463 static void gen_partset_reg(int opsize
, TCGv reg
, TCGv val
)
468 tcg_gen_andi_i32(reg
, reg
, 0xffffff00);
469 tmp
= tcg_temp_new();
470 tcg_gen_ext8u_i32(tmp
, val
);
471 tcg_gen_or_i32(reg
, reg
, tmp
);
474 tcg_gen_andi_i32(reg
, reg
, 0xffff0000);
475 tmp
= tcg_temp_new();
476 tcg_gen_ext16u_i32(tmp
, val
);
477 tcg_gen_or_i32(reg
, reg
, tmp
);
481 tcg_gen_mov_i32(reg
, val
);
484 g_assert_not_reached();
488 /* Sign or zero extend a value. */
489 static inline TCGv
gen_extend(TCGv val
, int opsize
, int sign
)
495 tmp
= tcg_temp_new();
497 tcg_gen_ext8s_i32(tmp
, val
);
499 tcg_gen_ext8u_i32(tmp
, val
);
502 tmp
= tcg_temp_new();
504 tcg_gen_ext16s_i32(tmp
, val
);
506 tcg_gen_ext16u_i32(tmp
, val
);
513 g_assert_not_reached();
518 /* Generate code for an "effective address". Does not adjust the base
519 register for autoincrement addressing modes. */
520 static TCGv
gen_lea(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
,
528 switch ((insn
>> 3) & 7) {
529 case 0: /* Data register direct. */
530 case 1: /* Address register direct. */
532 case 2: /* Indirect register */
533 case 3: /* Indirect postincrement. */
534 return AREG(insn
, 0);
535 case 4: /* Indirect predecrememnt. */
537 tmp
= tcg_temp_new();
538 tcg_gen_subi_i32(tmp
, reg
, opsize_bytes(opsize
));
540 case 5: /* Indirect displacement. */
542 tmp
= tcg_temp_new();
543 ext
= read_im16(env
, s
);
544 tcg_gen_addi_i32(tmp
, reg
, (int16_t)ext
);
546 case 6: /* Indirect index + displacement. */
548 return gen_lea_indexed(env
, s
, reg
);
551 case 0: /* Absolute short. */
552 offset
= (int16_t)read_im16(env
, s
);
553 return tcg_const_i32(offset
);
554 case 1: /* Absolute long. */
555 offset
= read_im32(env
, s
);
556 return tcg_const_i32(offset
);
557 case 2: /* pc displacement */
559 offset
+= (int16_t)read_im16(env
, s
);
560 return tcg_const_i32(offset
);
561 case 3: /* pc index+displacement. */
562 return gen_lea_indexed(env
, s
, NULL_QREG
);
563 case 4: /* Immediate. */
568 /* Should never happen. */
572 /* Helper function for gen_ea. Reuse the computed address between the
573 for read/write operands. */
574 static inline TCGv
gen_ea_once(CPUM68KState
*env
, DisasContext
*s
,
575 uint16_t insn
, int opsize
, TCGv val
,
576 TCGv
*addrp
, ea_what what
)
580 if (addrp
&& what
== EA_STORE
) {
583 tmp
= gen_lea(env
, s
, insn
, opsize
);
584 if (IS_NULL_QREG(tmp
))
589 return gen_ldst(s
, opsize
, tmp
, val
, what
);
592 /* Generate code to load/store a value from/into an EA. If VAL > 0 this is
593 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
594 ADDRP is non-null for readwrite operands. */
595 static TCGv
gen_ea(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
,
596 int opsize
, TCGv val
, TCGv
*addrp
, ea_what what
)
602 switch ((insn
>> 3) & 7) {
603 case 0: /* Data register direct. */
605 if (what
== EA_STORE
) {
606 gen_partset_reg(opsize
, reg
, val
);
609 return gen_extend(reg
, opsize
, what
== EA_LOADS
);
611 case 1: /* Address register direct. */
613 if (what
== EA_STORE
) {
614 tcg_gen_mov_i32(reg
, val
);
617 return gen_extend(reg
, opsize
, what
== EA_LOADS
);
619 case 2: /* Indirect register */
621 return gen_ldst(s
, opsize
, reg
, val
, what
);
622 case 3: /* Indirect postincrement. */
624 result
= gen_ldst(s
, opsize
, reg
, val
, what
);
625 /* ??? This is not exception safe. The instruction may still
626 fault after this point. */
627 if (what
== EA_STORE
|| !addrp
)
628 tcg_gen_addi_i32(reg
, reg
, opsize_bytes(opsize
));
630 case 4: /* Indirect predecrememnt. */
633 if (addrp
&& what
== EA_STORE
) {
636 tmp
= gen_lea(env
, s
, insn
, opsize
);
637 if (IS_NULL_QREG(tmp
))
642 result
= gen_ldst(s
, opsize
, tmp
, val
, what
);
643 /* ??? This is not exception safe. The instruction may still
644 fault after this point. */
645 if (what
== EA_STORE
|| !addrp
) {
647 tcg_gen_mov_i32(reg
, tmp
);
651 case 5: /* Indirect displacement. */
652 case 6: /* Indirect index + displacement. */
653 return gen_ea_once(env
, s
, insn
, opsize
, val
, addrp
, what
);
656 case 0: /* Absolute short. */
657 case 1: /* Absolute long. */
658 case 2: /* pc displacement */
659 case 3: /* pc index+displacement. */
660 return gen_ea_once(env
, s
, insn
, opsize
, val
, addrp
, what
);
661 case 4: /* Immediate. */
662 /* Sign extend values for consistency. */
665 if (what
== EA_LOADS
) {
666 offset
= (int8_t)read_im8(env
, s
);
668 offset
= read_im8(env
, s
);
672 if (what
== EA_LOADS
) {
673 offset
= (int16_t)read_im16(env
, s
);
675 offset
= read_im16(env
, s
);
679 offset
= read_im32(env
, s
);
682 g_assert_not_reached();
684 return tcg_const_i32(offset
);
689 /* Should never happen. */
693 /* This generates a conditional branch, clobbering all temporaries. */
694 static void gen_jmpcc(DisasContext
*s
, int cond
, TCGLabel
*l1
)
698 /* TODO: Optimize compare/branch pairs rather than always flushing
699 flag state to CC_OP_FLAGS. */
707 case 2: /* HI (!C && !Z) */
708 tmp
= tcg_temp_new();
709 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
| CCF_Z
);
710 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
712 case 3: /* LS (C || Z) */
713 tmp
= tcg_temp_new();
714 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
| CCF_Z
);
715 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
717 case 4: /* CC (!C) */
718 tmp
= tcg_temp_new();
719 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
);
720 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
723 tmp
= tcg_temp_new();
724 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
);
725 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
727 case 6: /* NE (!Z) */
728 tmp
= tcg_temp_new();
729 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_Z
);
730 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
733 tmp
= tcg_temp_new();
734 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_Z
);
735 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
737 case 8: /* VC (!V) */
738 tmp
= tcg_temp_new();
739 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_V
);
740 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
743 tmp
= tcg_temp_new();
744 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_V
);
745 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
747 case 10: /* PL (!N) */
748 tmp
= tcg_temp_new();
749 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
750 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
752 case 11: /* MI (N) */
753 tmp
= tcg_temp_new();
754 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
755 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
757 case 12: /* GE (!(N ^ V)) */
758 tmp
= tcg_temp_new();
759 assert(CCF_V
== (CCF_N
>> 2));
760 tcg_gen_shri_i32(tmp
, QREG_CC_DEST
, 2);
761 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
762 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
);
763 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
765 case 13: /* LT (N ^ V) */
766 tmp
= tcg_temp_new();
767 assert(CCF_V
== (CCF_N
>> 2));
768 tcg_gen_shri_i32(tmp
, QREG_CC_DEST
, 2);
769 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
770 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
);
771 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
773 case 14: /* GT (!(Z || (N ^ V))) */
774 tmp
= tcg_temp_new();
775 assert(CCF_V
== (CCF_N
>> 2));
776 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
777 tcg_gen_shri_i32(tmp
, tmp
, 2);
778 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
779 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
| CCF_Z
);
780 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
782 case 15: /* LE (Z || (N ^ V)) */
783 tmp
= tcg_temp_new();
784 assert(CCF_V
== (CCF_N
>> 2));
785 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
786 tcg_gen_shri_i32(tmp
, tmp
, 2);
787 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
788 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
| CCF_Z
);
789 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
792 /* Should ever happen. */
803 l1
= gen_new_label();
804 cond
= (insn
>> 8) & 0xf;
806 tcg_gen_andi_i32(reg
, reg
, 0xffffff00);
807 /* This is safe because we modify the reg directly, with no other values
809 gen_jmpcc(s
, cond
^ 1, l1
);
810 tcg_gen_ori_i32(reg
, reg
, 0xff);
814 /* Force a TB lookup after an instruction that changes the CPU state. */
815 static void gen_lookup_tb(DisasContext
*s
)
818 tcg_gen_movi_i32(QREG_PC
, s
->pc
);
819 s
->is_jmp
= DISAS_UPDATE
;
822 /* Generate a jump to an immediate address. */
823 static void gen_jmp_im(DisasContext
*s
, uint32_t dest
)
826 tcg_gen_movi_i32(QREG_PC
, dest
);
827 s
->is_jmp
= DISAS_JUMP
;
830 /* Generate a jump to the address in qreg DEST. */
831 static void gen_jmp(DisasContext
*s
, TCGv dest
)
834 tcg_gen_mov_i32(QREG_PC
, dest
);
835 s
->is_jmp
= DISAS_JUMP
;
838 static void gen_exception(DisasContext
*s
, uint32_t where
, int nr
)
841 gen_jmp_im(s
, where
);
842 gen_helper_raise_exception(cpu_env
, tcg_const_i32(nr
));
845 static inline void gen_addr_fault(DisasContext
*s
)
847 gen_exception(s
, s
->insn_pc
, EXCP_ADDRESS
);
850 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
851 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
852 op_sign ? EA_LOADS : EA_LOADU); \
853 if (IS_NULL_QREG(result)) { \
859 #define DEST_EA(env, insn, opsize, val, addrp) do { \
860 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
861 if (IS_NULL_QREG(ea_result)) { \
867 static inline bool use_goto_tb(DisasContext
*s
, uint32_t dest
)
869 #ifndef CONFIG_USER_ONLY
870 return (s
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
871 (s
->insn_pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
877 /* Generate a jump to an immediate address. */
878 static void gen_jmp_tb(DisasContext
*s
, int n
, uint32_t dest
)
880 if (unlikely(s
->singlestep_enabled
)) {
881 gen_exception(s
, dest
, EXCP_DEBUG
);
882 } else if (use_goto_tb(s
, dest
)) {
884 tcg_gen_movi_i32(QREG_PC
, dest
);
885 tcg_gen_exit_tb((uintptr_t)s
->tb
+ n
);
890 s
->is_jmp
= DISAS_TB_JUMP
;
893 DISAS_INSN(undef_mac
)
895 gen_exception(s
, s
->pc
- 2, EXCP_LINEA
);
898 DISAS_INSN(undef_fpu
)
900 gen_exception(s
, s
->pc
- 2, EXCP_LINEF
);
905 M68kCPU
*cpu
= m68k_env_get_cpu(env
);
907 gen_exception(s
, s
->pc
- 2, EXCP_UNSUPPORTED
);
908 cpu_abort(CPU(cpu
), "Illegal instruction: %04x @ %08x", insn
, s
->pc
- 2);
918 sign
= (insn
& 0x100) != 0;
920 tmp
= tcg_temp_new();
922 tcg_gen_ext16s_i32(tmp
, reg
);
924 tcg_gen_ext16u_i32(tmp
, reg
);
925 SRC_EA(env
, src
, OS_WORD
, sign
, NULL
);
926 tcg_gen_mul_i32(tmp
, tmp
, src
);
927 tcg_gen_mov_i32(reg
, tmp
);
928 /* Unlike m68k, coldfire always clears the overflow bit. */
929 gen_logic_cc(s
, tmp
);
939 sign
= (insn
& 0x100) != 0;
942 tcg_gen_ext16s_i32(QREG_DIV1
, reg
);
944 tcg_gen_ext16u_i32(QREG_DIV1
, reg
);
946 SRC_EA(env
, src
, OS_WORD
, sign
, NULL
);
947 tcg_gen_mov_i32(QREG_DIV2
, src
);
949 gen_helper_divs(cpu_env
, tcg_const_i32(1));
951 gen_helper_divu(cpu_env
, tcg_const_i32(1));
954 tmp
= tcg_temp_new();
955 src
= tcg_temp_new();
956 tcg_gen_ext16u_i32(tmp
, QREG_DIV1
);
957 tcg_gen_shli_i32(src
, QREG_DIV2
, 16);
958 tcg_gen_or_i32(reg
, tmp
, src
);
959 s
->cc_op
= CC_OP_FLAGS
;
969 ext
= read_im16(env
, s
);
971 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
976 tcg_gen_mov_i32(QREG_DIV1
, num
);
977 SRC_EA(env
, den
, OS_LONG
, 0, NULL
);
978 tcg_gen_mov_i32(QREG_DIV2
, den
);
980 gen_helper_divs(cpu_env
, tcg_const_i32(0));
982 gen_helper_divu(cpu_env
, tcg_const_i32(0));
984 if ((ext
& 7) == ((ext
>> 12) & 7)) {
986 tcg_gen_mov_i32 (reg
, QREG_DIV1
);
989 tcg_gen_mov_i32 (reg
, QREG_DIV2
);
991 s
->cc_op
= CC_OP_FLAGS
;
1003 add
= (insn
& 0x4000) != 0;
1004 reg
= DREG(insn
, 9);
1005 dest
= tcg_temp_new();
1007 SRC_EA(env
, tmp
, OS_LONG
, 0, &addr
);
1011 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1014 tcg_gen_add_i32(dest
, tmp
, src
);
1015 gen_helper_xflag_lt(QREG_CC_X
, dest
, src
);
1016 s
->cc_op
= CC_OP_ADD
;
1018 gen_helper_xflag_lt(QREG_CC_X
, tmp
, src
);
1019 tcg_gen_sub_i32(dest
, tmp
, src
);
1020 s
->cc_op
= CC_OP_SUB
;
1022 gen_update_cc_add(dest
, src
);
1024 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1026 tcg_gen_mov_i32(reg
, dest
);
1031 /* Reverse the order of the bits in REG. */
1035 reg
= DREG(insn
, 0);
1036 gen_helper_bitrev(reg
, reg
);
1039 DISAS_INSN(bitop_reg
)
1049 if ((insn
& 0x38) != 0)
1053 op
= (insn
>> 6) & 3;
1054 SRC_EA(env
, src1
, opsize
, 0, op
? &addr
: NULL
);
1055 src2
= DREG(insn
, 9);
1056 dest
= tcg_temp_new();
1059 tmp
= tcg_temp_new();
1060 if (opsize
== OS_BYTE
)
1061 tcg_gen_andi_i32(tmp
, src2
, 7);
1063 tcg_gen_andi_i32(tmp
, src2
, 31);
1065 tmp
= tcg_temp_new();
1066 tcg_gen_shr_i32(tmp
, src1
, src2
);
1067 tcg_gen_andi_i32(tmp
, tmp
, 1);
1068 tcg_gen_shli_i32(tmp
, tmp
, 2);
1069 /* Clear CCF_Z if bit set. */
1070 tcg_gen_ori_i32(QREG_CC_DEST
, QREG_CC_DEST
, CCF_Z
);
1071 tcg_gen_xor_i32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1073 tcg_gen_shl_i32(tmp
, tcg_const_i32(1), src2
);
1076 tcg_gen_xor_i32(dest
, src1
, tmp
);
1079 tcg_gen_not_i32(tmp
, tmp
);
1080 tcg_gen_and_i32(dest
, src1
, tmp
);
1083 tcg_gen_or_i32(dest
, src1
, tmp
);
1089 DEST_EA(env
, insn
, opsize
, dest
, &addr
);
1095 reg
= DREG(insn
, 0);
1097 gen_helper_sats(reg
, reg
, QREG_CC_DEST
);
1098 gen_logic_cc(s
, reg
);
1101 static void gen_push(DisasContext
*s
, TCGv val
)
1105 tmp
= tcg_temp_new();
1106 tcg_gen_subi_i32(tmp
, QREG_SP
, 4);
1107 gen_store(s
, OS_LONG
, tmp
, val
);
1108 tcg_gen_mov_i32(QREG_SP
, tmp
);
1120 mask
= read_im16(env
, s
);
1121 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1122 if (IS_NULL_QREG(tmp
)) {
1126 addr
= tcg_temp_new();
1127 tcg_gen_mov_i32(addr
, tmp
);
1128 is_load
= ((insn
& 0x0400) != 0);
1129 for (i
= 0; i
< 16; i
++, mask
>>= 1) {
1136 tmp
= gen_load(s
, OS_LONG
, addr
, 0);
1137 tcg_gen_mov_i32(reg
, tmp
);
1139 gen_store(s
, OS_LONG
, addr
, reg
);
1142 tcg_gen_addi_i32(addr
, addr
, 4);
1147 DISAS_INSN(bitop_im
)
1157 if ((insn
& 0x38) != 0)
1161 op
= (insn
>> 6) & 3;
1163 bitnum
= read_im16(env
, s
);
1164 if (bitnum
& 0xff00) {
1165 disas_undef(env
, s
, insn
);
1169 SRC_EA(env
, src1
, opsize
, 0, op
? &addr
: NULL
);
1172 if (opsize
== OS_BYTE
)
1178 tmp
= tcg_temp_new();
1179 assert (CCF_Z
== (1 << 2));
1181 tcg_gen_shri_i32(tmp
, src1
, bitnum
- 2);
1182 else if (bitnum
< 2)
1183 tcg_gen_shli_i32(tmp
, src1
, 2 - bitnum
);
1185 tcg_gen_mov_i32(tmp
, src1
);
1186 tcg_gen_andi_i32(tmp
, tmp
, CCF_Z
);
1187 /* Clear CCF_Z if bit set. */
1188 tcg_gen_ori_i32(QREG_CC_DEST
, QREG_CC_DEST
, CCF_Z
);
1189 tcg_gen_xor_i32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1193 tcg_gen_xori_i32(tmp
, src1
, mask
);
1196 tcg_gen_andi_i32(tmp
, src1
, ~mask
);
1199 tcg_gen_ori_i32(tmp
, src1
, mask
);
1204 DEST_EA(env
, insn
, opsize
, tmp
, &addr
);
1208 DISAS_INSN(arith_im
)
1216 op
= (insn
>> 9) & 7;
1217 SRC_EA(env
, src1
, OS_LONG
, 0, (op
== 6) ? NULL
: &addr
);
1218 im
= read_im32(env
, s
);
1219 dest
= tcg_temp_new();
1222 tcg_gen_ori_i32(dest
, src1
, im
);
1223 gen_logic_cc(s
, dest
);
1226 tcg_gen_andi_i32(dest
, src1
, im
);
1227 gen_logic_cc(s
, dest
);
1230 tcg_gen_mov_i32(dest
, src1
);
1231 gen_helper_xflag_lt(QREG_CC_X
, dest
, tcg_const_i32(im
));
1232 tcg_gen_subi_i32(dest
, dest
, im
);
1233 gen_update_cc_add(dest
, tcg_const_i32(im
));
1234 s
->cc_op
= CC_OP_SUB
;
1237 tcg_gen_mov_i32(dest
, src1
);
1238 tcg_gen_addi_i32(dest
, dest
, im
);
1239 gen_update_cc_add(dest
, tcg_const_i32(im
));
1240 gen_helper_xflag_lt(QREG_CC_X
, dest
, tcg_const_i32(im
));
1241 s
->cc_op
= CC_OP_ADD
;
1244 tcg_gen_xori_i32(dest
, src1
, im
);
1245 gen_logic_cc(s
, dest
);
1248 tcg_gen_mov_i32(dest
, src1
);
1249 tcg_gen_subi_i32(dest
, dest
, im
);
1250 gen_update_cc_add(dest
, tcg_const_i32(im
));
1251 s
->cc_op
= CC_OP_SUB
;
1257 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1265 reg
= DREG(insn
, 0);
1266 tcg_gen_bswap32_i32(reg
, reg
);
1276 switch (insn
>> 12) {
1277 case 1: /* move.b */
1280 case 2: /* move.l */
1283 case 3: /* move.w */
1289 SRC_EA(env
, src
, opsize
, 1, NULL
);
1290 op
= (insn
>> 6) & 7;
1293 /* The value will already have been sign extended. */
1294 dest
= AREG(insn
, 9);
1295 tcg_gen_mov_i32(dest
, src
);
1299 dest_ea
= ((insn
>> 9) & 7) | (op
<< 3);
1300 DEST_EA(env
, dest_ea
, opsize
, src
, NULL
);
1301 /* This will be correct because loads sign extend. */
1302 gen_logic_cc(s
, src
);
1311 reg
= DREG(insn
, 0);
1312 gen_helper_subx_cc(reg
, cpu_env
, tcg_const_i32(0), reg
);
1320 reg
= AREG(insn
, 9);
1321 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1322 if (IS_NULL_QREG(tmp
)) {
1326 tcg_gen_mov_i32(reg
, tmp
);
1333 switch ((insn
>> 6) & 3) {
1346 DEST_EA(env
, insn
, opsize
, tcg_const_i32(0), NULL
);
1347 gen_logic_cc(s
, tcg_const_i32(0));
1350 static TCGv
gen_get_ccr(DisasContext
*s
)
1355 dest
= tcg_temp_new();
1356 tcg_gen_shli_i32(dest
, QREG_CC_X
, 4);
1357 tcg_gen_or_i32(dest
, dest
, QREG_CC_DEST
);
1361 DISAS_INSN(move_from_ccr
)
1366 ccr
= gen_get_ccr(s
);
1367 reg
= DREG(insn
, 0);
1368 gen_partset_reg(OS_WORD
, reg
, ccr
);
1376 reg
= DREG(insn
, 0);
1377 src1
= tcg_temp_new();
1378 tcg_gen_mov_i32(src1
, reg
);
1379 tcg_gen_neg_i32(reg
, src1
);
1380 s
->cc_op
= CC_OP_SUB
;
1381 gen_update_cc_add(reg
, src1
);
1382 gen_helper_xflag_lt(QREG_CC_X
, tcg_const_i32(0), src1
);
1383 s
->cc_op
= CC_OP_SUB
;
1386 static void gen_set_sr_im(DisasContext
*s
, uint16_t val
, int ccr_only
)
1388 tcg_gen_movi_i32(QREG_CC_DEST
, val
& 0xf);
1389 tcg_gen_movi_i32(QREG_CC_X
, (val
& 0x10) >> 4);
1391 gen_helper_set_sr(cpu_env
, tcg_const_i32(val
& 0xff00));
1395 static void gen_set_sr(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
,
1401 s
->cc_op
= CC_OP_FLAGS
;
1402 if ((insn
& 0x38) == 0)
1404 tmp
= tcg_temp_new();
1405 reg
= DREG(insn
, 0);
1406 tcg_gen_andi_i32(QREG_CC_DEST
, reg
, 0xf);
1407 tcg_gen_shri_i32(tmp
, reg
, 4);
1408 tcg_gen_andi_i32(QREG_CC_X
, tmp
, 1);
1410 gen_helper_set_sr(cpu_env
, reg
);
1413 else if ((insn
& 0x3f) == 0x3c)
1416 val
= read_im16(env
, s
);
1417 gen_set_sr_im(s
, val
, ccr_only
);
1420 disas_undef(env
, s
, insn
);
1423 DISAS_INSN(move_to_ccr
)
1425 gen_set_sr(env
, s
, insn
, 1);
1432 reg
= DREG(insn
, 0);
1433 tcg_gen_not_i32(reg
, reg
);
1434 gen_logic_cc(s
, reg
);
1443 src1
= tcg_temp_new();
1444 src2
= tcg_temp_new();
1445 reg
= DREG(insn
, 0);
1446 tcg_gen_shli_i32(src1
, reg
, 16);
1447 tcg_gen_shri_i32(src2
, reg
, 16);
1448 tcg_gen_or_i32(reg
, src1
, src2
);
1449 gen_logic_cc(s
, reg
);
1456 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1457 if (IS_NULL_QREG(tmp
)) {
1470 reg
= DREG(insn
, 0);
1471 op
= (insn
>> 6) & 7;
1472 tmp
= tcg_temp_new();
1474 tcg_gen_ext16s_i32(tmp
, reg
);
1476 tcg_gen_ext8s_i32(tmp
, reg
);
1478 gen_partset_reg(OS_WORD
, reg
, tmp
);
1480 tcg_gen_mov_i32(reg
, tmp
);
1481 gen_logic_cc(s
, tmp
);
1489 switch ((insn
>> 6) & 3) {
1502 SRC_EA(env
, tmp
, opsize
, 1, NULL
);
1503 gen_logic_cc(s
, tmp
);
1508 /* Implemented as a NOP. */
1513 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1516 /* ??? This should be atomic. */
1523 dest
= tcg_temp_new();
1524 SRC_EA(env
, src1
, OS_BYTE
, 1, &addr
);
1525 gen_logic_cc(s
, src1
);
1526 tcg_gen_ori_i32(dest
, src1
, 0x80);
1527 DEST_EA(env
, insn
, OS_BYTE
, dest
, &addr
);
1537 /* The upper 32 bits of the product are discarded, so
1538 muls.l and mulu.l are functionally equivalent. */
1539 ext
= read_im16(env
, s
);
1541 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
1544 reg
= DREG(ext
, 12);
1545 SRC_EA(env
, src1
, OS_LONG
, 0, NULL
);
1546 dest
= tcg_temp_new();
1547 tcg_gen_mul_i32(dest
, src1
, reg
);
1548 tcg_gen_mov_i32(reg
, dest
);
1549 /* Unlike m68k, coldfire always clears the overflow bit. */
1550 gen_logic_cc(s
, dest
);
1559 offset
= cpu_ldsw_code(env
, s
->pc
);
1561 reg
= AREG(insn
, 0);
1562 tmp
= tcg_temp_new();
1563 tcg_gen_subi_i32(tmp
, QREG_SP
, 4);
1564 gen_store(s
, OS_LONG
, tmp
, reg
);
1565 if ((insn
& 7) != 7)
1566 tcg_gen_mov_i32(reg
, tmp
);
1567 tcg_gen_addi_i32(QREG_SP
, tmp
, offset
);
1576 src
= tcg_temp_new();
1577 reg
= AREG(insn
, 0);
1578 tcg_gen_mov_i32(src
, reg
);
1579 tmp
= gen_load(s
, OS_LONG
, src
, 0);
1580 tcg_gen_mov_i32(reg
, tmp
);
1581 tcg_gen_addi_i32(QREG_SP
, src
, 4);
1592 tmp
= gen_load(s
, OS_LONG
, QREG_SP
, 0);
1593 tcg_gen_addi_i32(QREG_SP
, QREG_SP
, 4);
1601 /* Load the target address first to ensure correct exception
1603 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1604 if (IS_NULL_QREG(tmp
)) {
1608 if ((insn
& 0x40) == 0) {
1610 gen_push(s
, tcg_const_i32(s
->pc
));
1623 SRC_EA(env
, src1
, OS_LONG
, 0, &addr
);
1624 val
= (insn
>> 9) & 7;
1627 dest
= tcg_temp_new();
1628 tcg_gen_mov_i32(dest
, src1
);
1629 if ((insn
& 0x38) == 0x08) {
1630 /* Don't update condition codes if the destination is an
1631 address register. */
1632 if (insn
& 0x0100) {
1633 tcg_gen_subi_i32(dest
, dest
, val
);
1635 tcg_gen_addi_i32(dest
, dest
, val
);
1638 src2
= tcg_const_i32(val
);
1639 if (insn
& 0x0100) {
1640 gen_helper_xflag_lt(QREG_CC_X
, dest
, src2
);
1641 tcg_gen_subi_i32(dest
, dest
, val
);
1642 s
->cc_op
= CC_OP_SUB
;
1644 tcg_gen_addi_i32(dest
, dest
, val
);
1645 gen_helper_xflag_lt(QREG_CC_X
, dest
, src2
);
1646 s
->cc_op
= CC_OP_ADD
;
1648 gen_update_cc_add(dest
, src2
);
1650 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1656 case 2: /* One extension word. */
1659 case 3: /* Two extension words. */
1662 case 4: /* No extension words. */
1665 disas_undef(env
, s
, insn
);
1677 op
= (insn
>> 8) & 0xf;
1678 offset
= (int8_t)insn
;
1680 offset
= (int16_t)read_im16(env
, s
);
1681 } else if (offset
== -1) {
1682 offset
= read_im32(env
, s
);
1686 gen_push(s
, tcg_const_i32(s
->pc
));
1691 l1
= gen_new_label();
1692 gen_jmpcc(s
, ((insn
>> 8) & 0xf) ^ 1, l1
);
1693 gen_jmp_tb(s
, 1, base
+ offset
);
1695 gen_jmp_tb(s
, 0, s
->pc
);
1697 /* Unconditional branch. */
1698 gen_jmp_tb(s
, 0, base
+ offset
);
1707 tcg_gen_movi_i32(DREG(insn
, 9), val
);
1708 gen_logic_cc(s
, tcg_const_i32(val
));
1721 SRC_EA(env
, src
, opsize
, (insn
& 0x80) == 0, NULL
);
1722 reg
= DREG(insn
, 9);
1723 tcg_gen_mov_i32(reg
, src
);
1724 gen_logic_cc(s
, src
);
1734 reg
= DREG(insn
, 9);
1735 dest
= tcg_temp_new();
1737 SRC_EA(env
, src
, OS_LONG
, 0, &addr
);
1738 tcg_gen_or_i32(dest
, src
, reg
);
1739 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1741 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1742 tcg_gen_or_i32(dest
, src
, reg
);
1743 tcg_gen_mov_i32(reg
, dest
);
1745 gen_logic_cc(s
, dest
);
1753 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1754 reg
= AREG(insn
, 9);
1755 tcg_gen_sub_i32(reg
, reg
, src
);
1764 reg
= DREG(insn
, 9);
1765 src
= DREG(insn
, 0);
1766 gen_helper_subx_cc(reg
, cpu_env
, reg
, src
);
1774 val
= (insn
>> 9) & 7;
1777 src
= tcg_const_i32(val
);
1778 gen_logic_cc(s
, src
);
1779 DEST_EA(env
, insn
, OS_LONG
, src
, NULL
);
1790 op
= (insn
>> 6) & 3;
1794 s
->cc_op
= CC_OP_CMPB
;
1798 s
->cc_op
= CC_OP_CMPW
;
1802 s
->cc_op
= CC_OP_SUB
;
1807 SRC_EA(env
, src
, opsize
, 1, NULL
);
1808 reg
= DREG(insn
, 9);
1809 dest
= tcg_temp_new();
1810 tcg_gen_sub_i32(dest
, reg
, src
);
1811 gen_update_cc_add(dest
, src
);
1826 SRC_EA(env
, src
, opsize
, 1, NULL
);
1827 reg
= AREG(insn
, 9);
1828 dest
= tcg_temp_new();
1829 tcg_gen_sub_i32(dest
, reg
, src
);
1830 gen_update_cc_add(dest
, src
);
1831 s
->cc_op
= CC_OP_SUB
;
1841 SRC_EA(env
, src
, OS_LONG
, 0, &addr
);
1842 reg
= DREG(insn
, 9);
1843 dest
= tcg_temp_new();
1844 tcg_gen_xor_i32(dest
, src
, reg
);
1845 gen_logic_cc(s
, dest
);
1846 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1856 reg
= DREG(insn
, 9);
1857 dest
= tcg_temp_new();
1859 SRC_EA(env
, src
, OS_LONG
, 0, &addr
);
1860 tcg_gen_and_i32(dest
, src
, reg
);
1861 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1863 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1864 tcg_gen_and_i32(dest
, src
, reg
);
1865 tcg_gen_mov_i32(reg
, dest
);
1867 gen_logic_cc(s
, dest
);
1875 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1876 reg
= AREG(insn
, 9);
1877 tcg_gen_add_i32(reg
, reg
, src
);
1886 reg
= DREG(insn
, 9);
1887 src
= DREG(insn
, 0);
1888 gen_helper_addx_cc(reg
, cpu_env
, reg
, src
);
1889 s
->cc_op
= CC_OP_FLAGS
;
1892 /* TODO: This could be implemented without helper functions. */
1893 DISAS_INSN(shift_im
)
1899 reg
= DREG(insn
, 0);
1900 tmp
= (insn
>> 9) & 7;
1903 shift
= tcg_const_i32(tmp
);
1904 /* No need to flush flags becuse we know we will set C flag. */
1906 gen_helper_shl_cc(reg
, cpu_env
, reg
, shift
);
1909 gen_helper_shr_cc(reg
, cpu_env
, reg
, shift
);
1911 gen_helper_sar_cc(reg
, cpu_env
, reg
, shift
);
1914 s
->cc_op
= CC_OP_SHIFT
;
1917 DISAS_INSN(shift_reg
)
1922 reg
= DREG(insn
, 0);
1923 shift
= DREG(insn
, 9);
1924 /* Shift by zero leaves C flag unmodified. */
1927 gen_helper_shl_cc(reg
, cpu_env
, reg
, shift
);
1930 gen_helper_shr_cc(reg
, cpu_env
, reg
, shift
);
1932 gen_helper_sar_cc(reg
, cpu_env
, reg
, shift
);
1935 s
->cc_op
= CC_OP_SHIFT
;
1941 reg
= DREG(insn
, 0);
1942 gen_logic_cc(s
, reg
);
1943 gen_helper_ff1(reg
, reg
);
1946 static TCGv
gen_get_sr(DisasContext
*s
)
1951 ccr
= gen_get_ccr(s
);
1952 sr
= tcg_temp_new();
1953 tcg_gen_andi_i32(sr
, QREG_SR
, 0xffe0);
1954 tcg_gen_or_i32(sr
, sr
, ccr
);
1964 ext
= read_im16(env
, s
);
1965 if (ext
!= 0x46FC) {
1966 gen_exception(s
, addr
, EXCP_UNSUPPORTED
);
1969 ext
= read_im16(env
, s
);
1970 if (IS_USER(s
) || (ext
& SR_S
) == 0) {
1971 gen_exception(s
, addr
, EXCP_PRIVILEGE
);
1974 gen_push(s
, gen_get_sr(s
));
1975 gen_set_sr_im(s
, ext
, 0);
1978 DISAS_INSN(move_from_sr
)
1984 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1988 reg
= DREG(insn
, 0);
1989 gen_partset_reg(OS_WORD
, reg
, sr
);
1992 DISAS_INSN(move_to_sr
)
1995 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1998 gen_set_sr(env
, s
, insn
, 0);
2002 DISAS_INSN(move_from_usp
)
2005 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2008 tcg_gen_ld_i32(AREG(insn
, 0), cpu_env
,
2009 offsetof(CPUM68KState
, sp
[M68K_USP
]));
2012 DISAS_INSN(move_to_usp
)
2015 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2018 tcg_gen_st_i32(AREG(insn
, 0), cpu_env
,
2019 offsetof(CPUM68KState
, sp
[M68K_USP
]));
2024 gen_exception(s
, s
->pc
, EXCP_HALT_INSN
);
2032 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2036 ext
= read_im16(env
, s
);
2038 gen_set_sr_im(s
, ext
, 0);
2039 tcg_gen_movi_i32(cpu_halted
, 1);
2040 gen_exception(s
, s
->pc
, EXCP_HLT
);
2046 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2049 gen_exception(s
, s
->pc
- 2, EXCP_RTE
);
2058 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2062 ext
= read_im16(env
, s
);
2065 reg
= AREG(ext
, 12);
2067 reg
= DREG(ext
, 12);
2069 gen_helper_movec(cpu_env
, tcg_const_i32(ext
& 0xfff), reg
);
2076 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2079 /* ICache fetch. Implement as no-op. */
2085 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2088 /* Cache push/invalidate. Implement as no-op. */
2093 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2098 M68kCPU
*cpu
= m68k_env_get_cpu(env
);
2101 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2104 /* TODO: Implement wdebug. */
2105 cpu_abort(CPU(cpu
), "WDEBUG not implemented");
2110 gen_exception(s
, s
->pc
- 2, EXCP_TRAP0
+ (insn
& 0xf));
2113 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2114 immediately before the next FP instruction is executed. */
2128 ext
= read_im16(env
, s
);
2129 opmode
= ext
& 0x7f;
2130 switch ((ext
>> 13) & 7) {
2135 case 3: /* fmove out */
2137 tmp32
= tcg_temp_new_i32();
2139 /* ??? TODO: Proper behavior on overflow. */
2140 switch ((ext
>> 10) & 7) {
2143 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2147 gen_helper_f64_to_f32(tmp32
, cpu_env
, src
);
2151 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2153 case 5: /* OS_DOUBLE */
2154 tcg_gen_mov_i32(tmp32
, AREG(insn
, 0));
2155 switch ((insn
>> 3) & 7) {
2160 tcg_gen_addi_i32(tmp32
, tmp32
, -8);
2163 offset
= cpu_ldsw_code(env
, s
->pc
);
2165 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2170 gen_store64(s
, tmp32
, src
);
2171 switch ((insn
>> 3) & 7) {
2173 tcg_gen_addi_i32(tmp32
, tmp32
, 8);
2174 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2177 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2180 tcg_temp_free_i32(tmp32
);
2184 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2189 DEST_EA(env
, insn
, opsize
, tmp32
, NULL
);
2190 tcg_temp_free_i32(tmp32
);
2192 case 4: /* fmove to control register. */
2193 switch ((ext
>> 10) & 7) {
2195 /* Not implemented. Ignore writes. */
2200 cpu_abort(NULL
, "Unimplemented: fmove to control %d",
2204 case 5: /* fmove from control register. */
2205 switch ((ext
>> 10) & 7) {
2207 /* Not implemented. Always return zero. */
2208 tmp32
= tcg_const_i32(0);
2213 cpu_abort(NULL
, "Unimplemented: fmove from control %d",
2217 DEST_EA(env
, insn
, OS_LONG
, tmp32
, NULL
);
2219 case 6: /* fmovem */
2225 if ((ext
& 0x1f00) != 0x1000 || (ext
& 0xff) == 0)
2227 tmp32
= gen_lea(env
, s
, insn
, OS_LONG
);
2228 if (IS_NULL_QREG(tmp32
)) {
2232 addr
= tcg_temp_new_i32();
2233 tcg_gen_mov_i32(addr
, tmp32
);
2235 for (i
= 0; i
< 8; i
++) {
2238 if (ext
& (1 << 13)) {
2240 tcg_gen_qemu_stf64(dest
, addr
, IS_USER(s
));
2243 tcg_gen_qemu_ldf64(dest
, addr
, IS_USER(s
));
2245 if (ext
& (mask
- 1))
2246 tcg_gen_addi_i32(addr
, addr
, 8);
2250 tcg_temp_free_i32(addr
);
2254 if (ext
& (1 << 14)) {
2255 /* Source effective address. */
2256 switch ((ext
>> 10) & 7) {
2257 case 0: opsize
= OS_LONG
; break;
2258 case 1: opsize
= OS_SINGLE
; break;
2259 case 4: opsize
= OS_WORD
; break;
2260 case 5: opsize
= OS_DOUBLE
; break;
2261 case 6: opsize
= OS_BYTE
; break;
2265 if (opsize
== OS_DOUBLE
) {
2266 tmp32
= tcg_temp_new_i32();
2267 tcg_gen_mov_i32(tmp32
, AREG(insn
, 0));
2268 switch ((insn
>> 3) & 7) {
2273 tcg_gen_addi_i32(tmp32
, tmp32
, -8);
2276 offset
= cpu_ldsw_code(env
, s
->pc
);
2278 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2281 offset
= cpu_ldsw_code(env
, s
->pc
);
2282 offset
+= s
->pc
- 2;
2284 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2289 src
= gen_load64(s
, tmp32
);
2290 switch ((insn
>> 3) & 7) {
2292 tcg_gen_addi_i32(tmp32
, tmp32
, 8);
2293 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2296 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2299 tcg_temp_free_i32(tmp32
);
2301 SRC_EA(env
, tmp32
, opsize
, 1, NULL
);
2302 src
= tcg_temp_new_i64();
2307 gen_helper_i32_to_f64(src
, cpu_env
, tmp32
);
2310 gen_helper_f32_to_f64(src
, cpu_env
, tmp32
);
2315 /* Source register. */
2316 src
= FREG(ext
, 10);
2318 dest
= FREG(ext
, 7);
2319 res
= tcg_temp_new_i64();
2321 tcg_gen_mov_f64(res
, dest
);
2325 case 0: case 0x40: case 0x44: /* fmove */
2326 tcg_gen_mov_f64(res
, src
);
2329 gen_helper_iround_f64(res
, cpu_env
, src
);
2332 case 3: /* fintrz */
2333 gen_helper_itrunc_f64(res
, cpu_env
, src
);
2336 case 4: case 0x41: case 0x45: /* fsqrt */
2337 gen_helper_sqrt_f64(res
, cpu_env
, src
);
2339 case 0x18: case 0x58: case 0x5c: /* fabs */
2340 gen_helper_abs_f64(res
, src
);
2342 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2343 gen_helper_chs_f64(res
, src
);
2345 case 0x20: case 0x60: case 0x64: /* fdiv */
2346 gen_helper_div_f64(res
, cpu_env
, res
, src
);
2348 case 0x22: case 0x62: case 0x66: /* fadd */
2349 gen_helper_add_f64(res
, cpu_env
, res
, src
);
2351 case 0x23: case 0x63: case 0x67: /* fmul */
2352 gen_helper_mul_f64(res
, cpu_env
, res
, src
);
2354 case 0x28: case 0x68: case 0x6c: /* fsub */
2355 gen_helper_sub_f64(res
, cpu_env
, res
, src
);
2357 case 0x38: /* fcmp */
2358 gen_helper_sub_cmp_f64(res
, cpu_env
, res
, src
);
2362 case 0x3a: /* ftst */
2363 tcg_gen_mov_f64(res
, src
);
2370 if (ext
& (1 << 14)) {
2371 tcg_temp_free_i64(src
);
2374 if (opmode
& 0x40) {
2375 if ((opmode
& 0x4) != 0)
2377 } else if ((s
->fpcr
& M68K_FPCR_PREC
) == 0) {
2382 TCGv tmp
= tcg_temp_new_i32();
2383 gen_helper_f64_to_f32(tmp
, cpu_env
, res
);
2384 gen_helper_f32_to_f64(res
, cpu_env
, tmp
);
2385 tcg_temp_free_i32(tmp
);
2387 tcg_gen_mov_f64(QREG_FP_RESULT
, res
);
2389 tcg_gen_mov_f64(dest
, res
);
2391 tcg_temp_free_i64(res
);
2394 /* FIXME: Is this right for offset addressing modes? */
2396 disas_undef_fpu(env
, s
, insn
);
2407 offset
= cpu_ldsw_code(env
, s
->pc
);
2409 if (insn
& (1 << 6)) {
2410 offset
= (offset
<< 16) | read_im16(env
, s
);
2413 l1
= gen_new_label();
2414 /* TODO: Raise BSUN exception. */
2415 flag
= tcg_temp_new();
2416 gen_helper_compare_f64(flag
, cpu_env
, QREG_FP_RESULT
);
2417 /* Jump to l1 if condition is true. */
2418 switch (insn
& 0xf) {
2421 case 1: /* eq (=0) */
2422 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(0), l1
);
2424 case 2: /* ogt (=1) */
2425 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(1), l1
);
2427 case 3: /* oge (=0 or =1) */
2428 tcg_gen_brcond_i32(TCG_COND_LEU
, flag
, tcg_const_i32(1), l1
);
2430 case 4: /* olt (=-1) */
2431 tcg_gen_brcond_i32(TCG_COND_LT
, flag
, tcg_const_i32(0), l1
);
2433 case 5: /* ole (=-1 or =0) */
2434 tcg_gen_brcond_i32(TCG_COND_LE
, flag
, tcg_const_i32(0), l1
);
2436 case 6: /* ogl (=-1 or =1) */
2437 tcg_gen_andi_i32(flag
, flag
, 1);
2438 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(0), l1
);
2440 case 7: /* or (=2) */
2441 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(2), l1
);
2443 case 8: /* un (<2) */
2444 tcg_gen_brcond_i32(TCG_COND_LT
, flag
, tcg_const_i32(2), l1
);
2446 case 9: /* ueq (=0 or =2) */
2447 tcg_gen_andi_i32(flag
, flag
, 1);
2448 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(0), l1
);
2450 case 10: /* ugt (>0) */
2451 tcg_gen_brcond_i32(TCG_COND_GT
, flag
, tcg_const_i32(0), l1
);
2453 case 11: /* uge (>=0) */
2454 tcg_gen_brcond_i32(TCG_COND_GE
, flag
, tcg_const_i32(0), l1
);
2456 case 12: /* ult (=-1 or =2) */
2457 tcg_gen_brcond_i32(TCG_COND_GEU
, flag
, tcg_const_i32(2), l1
);
2459 case 13: /* ule (!=1) */
2460 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(1), l1
);
2462 case 14: /* ne (!=0) */
2463 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(0), l1
);
2469 gen_jmp_tb(s
, 0, s
->pc
);
2471 gen_jmp_tb(s
, 1, addr
+ offset
);
2474 DISAS_INSN(frestore
)
2476 M68kCPU
*cpu
= m68k_env_get_cpu(env
);
2478 /* TODO: Implement frestore. */
2479 cpu_abort(CPU(cpu
), "FRESTORE not implemented");
2484 M68kCPU
*cpu
= m68k_env_get_cpu(env
);
2486 /* TODO: Implement fsave. */
2487 cpu_abort(CPU(cpu
), "FSAVE not implemented");
2490 static inline TCGv
gen_mac_extract_word(DisasContext
*s
, TCGv val
, int upper
)
2492 TCGv tmp
= tcg_temp_new();
2493 if (s
->env
->macsr
& MACSR_FI
) {
2495 tcg_gen_andi_i32(tmp
, val
, 0xffff0000);
2497 tcg_gen_shli_i32(tmp
, val
, 16);
2498 } else if (s
->env
->macsr
& MACSR_SU
) {
2500 tcg_gen_sari_i32(tmp
, val
, 16);
2502 tcg_gen_ext16s_i32(tmp
, val
);
2505 tcg_gen_shri_i32(tmp
, val
, 16);
2507 tcg_gen_ext16u_i32(tmp
, val
);
2512 static void gen_mac_clear_flags(void)
2514 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
,
2515 ~(MACSR_V
| MACSR_Z
| MACSR_N
| MACSR_EV
));
2531 s
->mactmp
= tcg_temp_new_i64();
2535 ext
= read_im16(env
, s
);
2537 acc
= ((insn
>> 7) & 1) | ((ext
>> 3) & 2);
2538 dual
= ((insn
& 0x30) != 0 && (ext
& 3) != 0);
2539 if (dual
&& !m68k_feature(s
->env
, M68K_FEATURE_CF_EMAC_B
)) {
2540 disas_undef(env
, s
, insn
);
2544 /* MAC with load. */
2545 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
2546 addr
= tcg_temp_new();
2547 tcg_gen_and_i32(addr
, tmp
, QREG_MAC_MASK
);
2548 /* Load the value now to ensure correct exception behavior.
2549 Perform writeback after reading the MAC inputs. */
2550 loadval
= gen_load(s
, OS_LONG
, addr
, 0);
2553 rx
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(insn
, 12);
2554 ry
= (ext
& 8) ? AREG(ext
, 0) : DREG(ext
, 0);
2556 loadval
= addr
= NULL_QREG
;
2557 rx
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2558 ry
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2561 gen_mac_clear_flags();
2564 /* Disabled because conditional branches clobber temporary vars. */
2565 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && !dual
) {
2566 /* Skip the multiply if we know we will ignore it. */
2567 l1
= gen_new_label();
2568 tmp
= tcg_temp_new();
2569 tcg_gen_andi_i32(tmp
, QREG_MACSR
, 1 << (acc
+ 8));
2570 gen_op_jmp_nz32(tmp
, l1
);
2574 if ((ext
& 0x0800) == 0) {
2576 rx
= gen_mac_extract_word(s
, rx
, (ext
& 0x80) != 0);
2577 ry
= gen_mac_extract_word(s
, ry
, (ext
& 0x40) != 0);
2579 if (s
->env
->macsr
& MACSR_FI
) {
2580 gen_helper_macmulf(s
->mactmp
, cpu_env
, rx
, ry
);
2582 if (s
->env
->macsr
& MACSR_SU
)
2583 gen_helper_macmuls(s
->mactmp
, cpu_env
, rx
, ry
);
2585 gen_helper_macmulu(s
->mactmp
, cpu_env
, rx
, ry
);
2586 switch ((ext
>> 9) & 3) {
2588 tcg_gen_shli_i64(s
->mactmp
, s
->mactmp
, 1);
2591 tcg_gen_shri_i64(s
->mactmp
, s
->mactmp
, 1);
2597 /* Save the overflow flag from the multiply. */
2598 saved_flags
= tcg_temp_new();
2599 tcg_gen_mov_i32(saved_flags
, QREG_MACSR
);
2601 saved_flags
= NULL_QREG
;
2605 /* Disabled because conditional branches clobber temporary vars. */
2606 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && dual
) {
2607 /* Skip the accumulate if the value is already saturated. */
2608 l1
= gen_new_label();
2609 tmp
= tcg_temp_new();
2610 gen_op_and32(tmp
, QREG_MACSR
, tcg_const_i32(MACSR_PAV0
<< acc
));
2611 gen_op_jmp_nz32(tmp
, l1
);
2616 tcg_gen_sub_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2618 tcg_gen_add_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2620 if (s
->env
->macsr
& MACSR_FI
)
2621 gen_helper_macsatf(cpu_env
, tcg_const_i32(acc
));
2622 else if (s
->env
->macsr
& MACSR_SU
)
2623 gen_helper_macsats(cpu_env
, tcg_const_i32(acc
));
2625 gen_helper_macsatu(cpu_env
, tcg_const_i32(acc
));
2628 /* Disabled because conditional branches clobber temporary vars. */
2634 /* Dual accumulate variant. */
2635 acc
= (ext
>> 2) & 3;
2636 /* Restore the overflow flag from the multiplier. */
2637 tcg_gen_mov_i32(QREG_MACSR
, saved_flags
);
2639 /* Disabled because conditional branches clobber temporary vars. */
2640 if ((s
->env
->macsr
& MACSR_OMC
) != 0) {
2641 /* Skip the accumulate if the value is already saturated. */
2642 l1
= gen_new_label();
2643 tmp
= tcg_temp_new();
2644 gen_op_and32(tmp
, QREG_MACSR
, tcg_const_i32(MACSR_PAV0
<< acc
));
2645 gen_op_jmp_nz32(tmp
, l1
);
2649 tcg_gen_sub_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2651 tcg_gen_add_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2652 if (s
->env
->macsr
& MACSR_FI
)
2653 gen_helper_macsatf(cpu_env
, tcg_const_i32(acc
));
2654 else if (s
->env
->macsr
& MACSR_SU
)
2655 gen_helper_macsats(cpu_env
, tcg_const_i32(acc
));
2657 gen_helper_macsatu(cpu_env
, tcg_const_i32(acc
));
2659 /* Disabled because conditional branches clobber temporary vars. */
2664 gen_helper_mac_set_flags(cpu_env
, tcg_const_i32(acc
));
2668 rw
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2669 tcg_gen_mov_i32(rw
, loadval
);
2670 /* FIXME: Should address writeback happen with the masked or
2672 switch ((insn
>> 3) & 7) {
2673 case 3: /* Post-increment. */
2674 tcg_gen_addi_i32(AREG(insn
, 0), addr
, 4);
2676 case 4: /* Pre-decrement. */
2677 tcg_gen_mov_i32(AREG(insn
, 0), addr
);
2682 DISAS_INSN(from_mac
)
2688 rx
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2689 accnum
= (insn
>> 9) & 3;
2690 acc
= MACREG(accnum
);
2691 if (s
->env
->macsr
& MACSR_FI
) {
2692 gen_helper_get_macf(rx
, cpu_env
, acc
);
2693 } else if ((s
->env
->macsr
& MACSR_OMC
) == 0) {
2694 tcg_gen_extrl_i64_i32(rx
, acc
);
2695 } else if (s
->env
->macsr
& MACSR_SU
) {
2696 gen_helper_get_macs(rx
, acc
);
2698 gen_helper_get_macu(rx
, acc
);
2701 tcg_gen_movi_i64(acc
, 0);
2702 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
, ~(MACSR_PAV0
<< accnum
));
2706 DISAS_INSN(move_mac
)
2708 /* FIXME: This can be done without a helper. */
2712 dest
= tcg_const_i32((insn
>> 9) & 3);
2713 gen_helper_mac_move(cpu_env
, dest
, tcg_const_i32(src
));
2714 gen_mac_clear_flags();
2715 gen_helper_mac_set_flags(cpu_env
, dest
);
2718 DISAS_INSN(from_macsr
)
2722 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2723 tcg_gen_mov_i32(reg
, QREG_MACSR
);
2726 DISAS_INSN(from_mask
)
2729 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2730 tcg_gen_mov_i32(reg
, QREG_MAC_MASK
);
2733 DISAS_INSN(from_mext
)
2737 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2738 acc
= tcg_const_i32((insn
& 0x400) ? 2 : 0);
2739 if (s
->env
->macsr
& MACSR_FI
)
2740 gen_helper_get_mac_extf(reg
, cpu_env
, acc
);
2742 gen_helper_get_mac_exti(reg
, cpu_env
, acc
);
2745 DISAS_INSN(macsr_to_ccr
)
2747 tcg_gen_movi_i32(QREG_CC_X
, 0);
2748 tcg_gen_andi_i32(QREG_CC_DEST
, QREG_MACSR
, 0xf);
2749 s
->cc_op
= CC_OP_FLAGS
;
2757 accnum
= (insn
>> 9) & 3;
2758 acc
= MACREG(accnum
);
2759 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2760 if (s
->env
->macsr
& MACSR_FI
) {
2761 tcg_gen_ext_i32_i64(acc
, val
);
2762 tcg_gen_shli_i64(acc
, acc
, 8);
2763 } else if (s
->env
->macsr
& MACSR_SU
) {
2764 tcg_gen_ext_i32_i64(acc
, val
);
2766 tcg_gen_extu_i32_i64(acc
, val
);
2768 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
, ~(MACSR_PAV0
<< accnum
));
2769 gen_mac_clear_flags();
2770 gen_helper_mac_set_flags(cpu_env
, tcg_const_i32(accnum
));
2773 DISAS_INSN(to_macsr
)
2776 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2777 gen_helper_set_macsr(cpu_env
, val
);
2784 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2785 tcg_gen_ori_i32(QREG_MAC_MASK
, val
, 0xffff0000);
2792 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2793 acc
= tcg_const_i32((insn
& 0x400) ? 2 : 0);
2794 if (s
->env
->macsr
& MACSR_FI
)
2795 gen_helper_set_mac_extf(cpu_env
, val
, acc
);
2796 else if (s
->env
->macsr
& MACSR_SU
)
2797 gen_helper_set_mac_exts(cpu_env
, val
, acc
);
2799 gen_helper_set_mac_extu(cpu_env
, val
, acc
);
2802 static disas_proc opcode_table
[65536];
2805 register_opcode (disas_proc proc
, uint16_t opcode
, uint16_t mask
)
2811 /* Sanity check. All set bits must be included in the mask. */
2812 if (opcode
& ~mask
) {
2814 "qemu internal error: bogus opcode definition %04x/%04x\n",
2818 /* This could probably be cleverer. For now just optimize the case where
2819 the top bits are known. */
2820 /* Find the first zero bit in the mask. */
2822 while ((i
& mask
) != 0)
2824 /* Iterate over all combinations of this and lower bits. */
2829 from
= opcode
& ~(i
- 1);
2831 for (i
= from
; i
< to
; i
++) {
2832 if ((i
& mask
) == opcode
)
2833 opcode_table
[i
] = proc
;
2837 /* Register m68k opcode handlers. Order is important.
2838 Later insn override earlier ones. */
2839 void register_m68k_insns (CPUM68KState
*env
)
2841 /* Build the opcode table only once to avoid
2842 multithreading issues. */
2843 if (opcode_table
[0] != NULL
) {
2847 /* use BASE() for instruction available
2848 * for CF_ISA_A and M68000.
2850 #define BASE(name, opcode, mask) \
2851 register_opcode(disas_##name, 0x##opcode, 0x##mask)
2852 #define INSN(name, opcode, mask, feature) do { \
2853 if (m68k_feature(env, M68K_FEATURE_##feature)) \
2854 BASE(name, opcode, mask); \
2856 BASE(undef
, 0000, 0000);
2857 INSN(arith_im
, 0080, fff8
, CF_ISA_A
);
2858 INSN(arith_im
, 0000, ff00
, M68000
);
2859 INSN(undef
, 00c0
, ffc0
, M68000
);
2860 INSN(bitrev
, 00c0
, fff8
, CF_ISA_APLUSC
);
2861 BASE(bitop_reg
, 0100, f1c0
);
2862 BASE(bitop_reg
, 0140, f1c0
);
2863 BASE(bitop_reg
, 0180, f1c0
);
2864 BASE(bitop_reg
, 01c0
, f1c0
);
2865 INSN(arith_im
, 0280, fff8
, CF_ISA_A
);
2866 INSN(arith_im
, 0200, ff00
, M68000
);
2867 INSN(undef
, 02c0
, ffc0
, M68000
);
2868 INSN(byterev
, 02c0
, fff8
, CF_ISA_APLUSC
);
2869 INSN(arith_im
, 0480, fff8
, CF_ISA_A
);
2870 INSN(arith_im
, 0400, ff00
, M68000
);
2871 INSN(undef
, 04c0
, ffc0
, M68000
);
2872 INSN(arith_im
, 0600, ff00
, M68000
);
2873 INSN(undef
, 06c0
, ffc0
, M68000
);
2874 INSN(ff1
, 04c0
, fff8
, CF_ISA_APLUSC
);
2875 INSN(arith_im
, 0680, fff8
, CF_ISA_A
);
2876 INSN(arith_im
, 0c00
, ff38
, CF_ISA_A
);
2877 INSN(arith_im
, 0c00
, ff00
, M68000
);
2878 BASE(bitop_im
, 0800, ffc0
);
2879 BASE(bitop_im
, 0840, ffc0
);
2880 BASE(bitop_im
, 0880, ffc0
);
2881 BASE(bitop_im
, 08c0
, ffc0
);
2882 INSN(arith_im
, 0a80
, fff8
, CF_ISA_A
);
2883 INSN(arith_im
, 0a00
, ff00
, M68000
);
2884 BASE(move
, 1000, f000
);
2885 BASE(move
, 2000, f000
);
2886 BASE(move
, 3000, f000
);
2887 INSN(strldsr
, 40e7
, ffff
, CF_ISA_APLUSC
);
2888 INSN(negx
, 4080, fff8
, CF_ISA_A
);
2889 INSN(move_from_sr
, 40c0
, fff8
, CF_ISA_A
);
2890 INSN(move_from_sr
, 40c0
, ffc0
, M68000
);
2891 BASE(lea
, 41c0
, f1c0
);
2892 BASE(clr
, 4200, ff00
);
2893 BASE(undef
, 42c0
, ffc0
);
2894 INSN(move_from_ccr
, 42c0
, fff8
, CF_ISA_A
);
2895 INSN(neg
, 4480, fff8
, CF_ISA_A
);
2896 INSN(neg
, 4400, ff00
, M68000
);
2897 INSN(undef
, 44c0
, ffc0
, M68000
);
2898 BASE(move_to_ccr
, 44c0
, ffc0
);
2899 INSN(not, 4680, fff8
, CF_ISA_A
);
2900 INSN(not, 4600, ff00
, M68000
);
2901 INSN(undef
, 46c0
, ffc0
, M68000
);
2902 INSN(move_to_sr
, 46c0
, ffc0
, CF_ISA_A
);
2903 BASE(pea
, 4840, ffc0
);
2904 BASE(swap
, 4840, fff8
);
2905 BASE(movem
, 48c0
, fbc0
);
2906 BASE(ext
, 4880, fff8
);
2907 BASE(ext
, 48c0
, fff8
);
2908 BASE(ext
, 49c0
, fff8
);
2909 BASE(tst
, 4a00
, ff00
);
2910 INSN(tas
, 4ac0
, ffc0
, CF_ISA_B
);
2911 INSN(tas
, 4ac0
, ffc0
, M68000
);
2912 INSN(halt
, 4ac8
, ffff
, CF_ISA_A
);
2913 INSN(pulse
, 4acc
, ffff
, CF_ISA_A
);
2914 BASE(illegal
, 4afc
, ffff
);
2915 INSN(mull
, 4c00
, ffc0
, CF_ISA_A
);
2916 INSN(mull
, 4c00
, ffc0
, LONG_MULDIV
);
2917 INSN(divl
, 4c40
, ffc0
, CF_ISA_A
);
2918 INSN(divl
, 4c40
, ffc0
, LONG_MULDIV
);
2919 INSN(sats
, 4c80
, fff8
, CF_ISA_B
);
2920 BASE(trap
, 4e40
, fff0
);
2921 BASE(link
, 4e50
, fff8
);
2922 BASE(unlk
, 4e58
, fff8
);
2923 INSN(move_to_usp
, 4e60
, fff8
, USP
);
2924 INSN(move_from_usp
, 4e68
, fff8
, USP
);
2925 BASE(nop
, 4e71
, ffff
);
2926 BASE(stop
, 4e72
, ffff
);
2927 BASE(rte
, 4e73
, ffff
);
2928 BASE(rts
, 4e75
, ffff
);
2929 INSN(movec
, 4e7b
, ffff
, CF_ISA_A
);
2930 BASE(jump
, 4e80
, ffc0
);
2931 INSN(jump
, 4ec0
, ffc0
, CF_ISA_A
);
2932 INSN(addsubq
, 5180, f1c0
, CF_ISA_A
);
2933 INSN(jump
, 4ec0
, ffc0
, M68000
);
2934 INSN(addsubq
, 5000, f080
, M68000
);
2935 INSN(addsubq
, 5080, f0c0
, M68000
);
2936 INSN(scc
, 50c0
, f0f8
, CF_ISA_A
);
2937 INSN(addsubq
, 5080, f1c0
, CF_ISA_A
);
2938 INSN(tpf
, 51f8
, fff8
, CF_ISA_A
);
2940 /* Branch instructions. */
2941 BASE(branch
, 6000, f000
);
2942 /* Disable long branch instructions, then add back the ones we want. */
2943 BASE(undef
, 60ff
, f0ff
); /* All long branches. */
2944 INSN(branch
, 60ff
, f0ff
, CF_ISA_B
);
2945 INSN(undef
, 60ff
, ffff
, CF_ISA_B
); /* bra.l */
2946 INSN(branch
, 60ff
, ffff
, BRAL
);
2947 INSN(branch
, 60ff
, f0ff
, BCCL
);
2949 BASE(moveq
, 7000, f100
);
2950 INSN(mvzs
, 7100, f100
, CF_ISA_B
);
2951 BASE(or, 8000, f000
);
2952 BASE(divw
, 80c0
, f0c0
);
2953 BASE(addsub
, 9000, f000
);
2954 INSN(subx
, 9180, f1f8
, CF_ISA_A
);
2955 INSN(suba
, 91c0
, f1c0
, CF_ISA_A
);
2957 BASE(undef_mac
, a000
, f000
);
2958 INSN(mac
, a000
, f100
, CF_EMAC
);
2959 INSN(from_mac
, a180
, f9b0
, CF_EMAC
);
2960 INSN(move_mac
, a110
, f9fc
, CF_EMAC
);
2961 INSN(from_macsr
,a980
, f9f0
, CF_EMAC
);
2962 INSN(from_mask
, ad80
, fff0
, CF_EMAC
);
2963 INSN(from_mext
, ab80
, fbf0
, CF_EMAC
);
2964 INSN(macsr_to_ccr
, a9c0
, ffff
, CF_EMAC
);
2965 INSN(to_mac
, a100
, f9c0
, CF_EMAC
);
2966 INSN(to_macsr
, a900
, ffc0
, CF_EMAC
);
2967 INSN(to_mext
, ab00
, fbc0
, CF_EMAC
);
2968 INSN(to_mask
, ad00
, ffc0
, CF_EMAC
);
2970 INSN(mov3q
, a140
, f1c0
, CF_ISA_B
);
2971 INSN(cmp
, b000
, f1c0
, CF_ISA_B
); /* cmp.b */
2972 INSN(cmp
, b040
, f1c0
, CF_ISA_B
); /* cmp.w */
2973 INSN(cmpa
, b0c0
, f1c0
, CF_ISA_B
); /* cmpa.w */
2974 INSN(cmp
, b080
, f1c0
, CF_ISA_A
);
2975 INSN(cmpa
, b1c0
, f1c0
, CF_ISA_A
);
2976 INSN(cmp
, b000
, f100
, M68000
);
2977 INSN(eor
, b100
, f100
, M68000
);
2978 INSN(cmpa
, b0c0
, f0c0
, M68000
);
2979 INSN(eor
, b180
, f1c0
, CF_ISA_A
);
2980 BASE(and, c000
, f000
);
2981 BASE(mulw
, c0c0
, f0c0
);
2982 BASE(addsub
, d000
, f000
);
2983 INSN(addx
, d180
, f1f8
, CF_ISA_A
);
2984 INSN(adda
, d1c0
, f1c0
, CF_ISA_A
);
2985 INSN(adda
, d0c0
, f0c0
, M68000
);
2986 INSN(shift_im
, e080
, f0f0
, CF_ISA_A
);
2987 INSN(shift_reg
, e0a0
, f0f0
, CF_ISA_A
);
2988 INSN(undef_fpu
, f000
, f000
, CF_ISA_A
);
2989 INSN(fpu
, f200
, ffc0
, CF_FPU
);
2990 INSN(fbcc
, f280
, ffc0
, CF_FPU
);
2991 INSN(frestore
, f340
, ffc0
, CF_FPU
);
2992 INSN(fsave
, f340
, ffc0
, CF_FPU
);
2993 INSN(intouch
, f340
, ffc0
, CF_ISA_A
);
2994 INSN(cpushl
, f428
, ff38
, CF_ISA_A
);
2995 INSN(wddata
, fb00
, ff00
, CF_ISA_A
);
2996 INSN(wdebug
, fbc0
, ffc0
, CF_ISA_A
);
3000 /* ??? Some of this implementation is not exception safe. We should always
3001 write back the result to memory before setting the condition codes. */
3002 static void disas_m68k_insn(CPUM68KState
* env
, DisasContext
*s
)
3006 insn
= read_im16(env
, s
);
3008 opcode_table
[insn
](env
, s
, insn
);
3011 /* generate intermediate code for basic block 'tb'. */
3012 void gen_intermediate_code(CPUM68KState
*env
, TranslationBlock
*tb
)
3014 M68kCPU
*cpu
= m68k_env_get_cpu(env
);
3015 CPUState
*cs
= CPU(cpu
);
3016 DisasContext dc1
, *dc
= &dc1
;
3017 target_ulong pc_start
;
3022 /* generate intermediate code */
3028 dc
->is_jmp
= DISAS_NEXT
;
3030 dc
->cc_op
= CC_OP_DYNAMIC
;
3031 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
3032 dc
->fpcr
= env
->fpcr
;
3033 dc
->user
= (env
->sr
& SR_S
) == 0;
3036 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3037 if (max_insns
== 0) {
3038 max_insns
= CF_COUNT_MASK
;
3040 if (max_insns
> TCG_MAX_INSNS
) {
3041 max_insns
= TCG_MAX_INSNS
;
3046 pc_offset
= dc
->pc
- pc_start
;
3047 gen_throws_exception
= NULL
;
3048 tcg_gen_insn_start(dc
->pc
);
3051 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
3052 gen_exception(dc
, dc
->pc
, EXCP_DEBUG
);
3053 dc
->is_jmp
= DISAS_JUMP
;
3054 /* The address covered by the breakpoint must be included in
3055 [tb->pc, tb->pc + tb->size) in order to for it to be
3056 properly cleared -- thus we increment the PC here so that
3057 the logic setting tb->size below does the right thing. */
3062 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
3066 dc
->insn_pc
= dc
->pc
;
3067 disas_m68k_insn(env
, dc
);
3068 } while (!dc
->is_jmp
&& !tcg_op_buf_full() &&
3069 !cs
->singlestep_enabled
&&
3071 (pc_offset
) < (TARGET_PAGE_SIZE
- 32) &&
3072 num_insns
< max_insns
);
3074 if (tb
->cflags
& CF_LAST_IO
)
3076 if (unlikely(cs
->singlestep_enabled
)) {
3077 /* Make sure the pc is updated, and raise a debug exception. */
3079 gen_flush_cc_op(dc
);
3080 tcg_gen_movi_i32(QREG_PC
, dc
->pc
);
3082 gen_helper_raise_exception(cpu_env
, tcg_const_i32(EXCP_DEBUG
));
3084 switch(dc
->is_jmp
) {
3086 gen_flush_cc_op(dc
);
3087 gen_jmp_tb(dc
, 0, dc
->pc
);
3092 gen_flush_cc_op(dc
);
3093 /* indicate that the hash table must be used to find the next TB */
3097 /* nothing more to generate */
3101 gen_tb_end(tb
, num_insns
);
3104 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
3105 && qemu_log_in_addr_range(pc_start
)) {
3106 qemu_log("----------------\n");
3107 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3108 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
, 0);
3112 tb
->size
= dc
->pc
- pc_start
;
3113 tb
->icount
= num_insns
;
3116 void m68k_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
3119 M68kCPU
*cpu
= M68K_CPU(cs
);
3120 CPUM68KState
*env
= &cpu
->env
;
3124 for (i
= 0; i
< 8; i
++)
3126 u
.d
= env
->fregs
[i
];
3127 cpu_fprintf (f
, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3128 i
, env
->dregs
[i
], i
, env
->aregs
[i
],
3129 i
, u
.l
.upper
, u
.l
.lower
, *(double *)&u
.d
);
3131 cpu_fprintf (f
, "PC = %08x ", env
->pc
);
3133 cpu_fprintf (f
, "SR = %04x %c%c%c%c%c ", sr
, (sr
& 0x10) ? 'X' : '-',
3134 (sr
& CCF_N
) ? 'N' : '-', (sr
& CCF_Z
) ? 'Z' : '-',
3135 (sr
& CCF_V
) ? 'V' : '-', (sr
& CCF_C
) ? 'C' : '-');
3136 cpu_fprintf (f
, "FPRESULT = %12g\n", *(double *)&env
->fp_result
);
3139 void restore_state_to_opc(CPUM68KState
*env
, TranslationBlock
*tb
,