4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 //#define DEBUG_DISPATCH 1
39 /* Fake floating point. */
40 #define tcg_gen_mov_f64 tcg_gen_mov_i64
41 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
42 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
44 #define DEFO32(name, offset) static TCGv QREG_##name;
45 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
46 #define DEFF64(name, offset) static TCGv_i64 QREG_##name;
52 static TCGv_ptr cpu_env
;
54 static char cpu_reg_names
[3*8*3 + 5*4];
55 static TCGv cpu_dregs
[8];
56 static TCGv cpu_aregs
[8];
57 static TCGv_i64 cpu_fregs
[8];
58 static TCGv_i64 cpu_macc
[4];
60 #define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
61 #define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
62 #define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
63 #define MACREG(acc) cpu_macc[acc]
64 #define QREG_SP cpu_aregs[7]
66 static TCGv NULL_QREG
;
67 #define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
68 /* Used to distinguish stores from bad addressing modes. */
69 static TCGv store_dummy
;
71 #include "gen-icount.h"
73 void m68k_tcg_init(void)
78 #define DEFO32(name, offset) QREG_##name = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, offset), #name);
79 #define DEFO64(name, offset) QREG_##name = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, offset), #name);
80 #define DEFF64(name, offset) DEFO64(name, offset)
86 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
89 for (i
= 0; i
< 8; i
++) {
91 cpu_dregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
92 offsetof(CPUM68KState
, dregs
[i
]), p
);
95 cpu_aregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
96 offsetof(CPUM68KState
, aregs
[i
]), p
);
99 cpu_fregs
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
100 offsetof(CPUM68KState
, fregs
[i
]), p
);
103 for (i
= 0; i
< 4; i
++) {
104 sprintf(p
, "ACC%d", i
);
105 cpu_macc
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
106 offsetof(CPUM68KState
, macc
[i
]), p
);
110 NULL_QREG
= tcg_global_mem_new(TCG_AREG0
, -4, "NULL");
111 store_dummy
= tcg_global_mem_new(TCG_AREG0
, -8, "NULL");
117 static inline void qemu_assert(int cond
, const char *msg
)
120 fprintf (stderr
, "badness: %s\n", msg
);
125 /* internal defines */
126 typedef struct DisasContext
{
128 target_ulong insn_pc
; /* Start of the current instruction. */
134 struct TranslationBlock
*tb
;
135 int singlestep_enabled
;
141 #define DISAS_JUMP_NEXT 4
143 #if defined(CONFIG_USER_ONLY)
146 #define IS_USER(s) s->user
149 /* XXX: move that elsewhere */
150 /* ??? Fix exceptions. */
151 static void *gen_throws_exception
;
152 #define gen_last_qop NULL
160 typedef void (*disas_proc
)(DisasContext
*, uint16_t);
162 #ifdef DEBUG_DISPATCH
163 #define DISAS_INSN(name) \
164 static void real_disas_##name (DisasContext *s, uint16_t insn); \
165 static void disas_##name (DisasContext *s, uint16_t insn) { \
166 qemu_log("Dispatch " #name "\n"); \
167 real_disas_##name(s, insn); } \
168 static void real_disas_##name (DisasContext *s, uint16_t insn)
170 #define DISAS_INSN(name) \
171 static void disas_##name (DisasContext *s, uint16_t insn)
174 /* Generate a load from the specified address. Narrow values are
175 sign extended to full register width. */
176 static inline TCGv
gen_load(DisasContext
* s
, int opsize
, TCGv addr
, int sign
)
179 int index
= IS_USER(s
);
181 tmp
= tcg_temp_new_i32();
185 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
187 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
191 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
193 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
197 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
200 qemu_assert(0, "bad load size");
202 gen_throws_exception
= gen_last_qop
;
206 static inline TCGv_i64
gen_load64(DisasContext
* s
, TCGv addr
)
209 int index
= IS_USER(s
);
211 tmp
= tcg_temp_new_i64();
212 tcg_gen_qemu_ldf64(tmp
, addr
, index
);
213 gen_throws_exception
= gen_last_qop
;
217 /* Generate a store. */
218 static inline void gen_store(DisasContext
*s
, int opsize
, TCGv addr
, TCGv val
)
220 int index
= IS_USER(s
);
224 tcg_gen_qemu_st8(val
, addr
, index
);
227 tcg_gen_qemu_st16(val
, addr
, index
);
231 tcg_gen_qemu_st32(val
, addr
, index
);
234 qemu_assert(0, "bad store size");
236 gen_throws_exception
= gen_last_qop
;
239 static inline void gen_store64(DisasContext
*s
, TCGv addr
, TCGv_i64 val
)
241 int index
= IS_USER(s
);
243 tcg_gen_qemu_stf64(val
, addr
, index
);
244 gen_throws_exception
= gen_last_qop
;
253 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
254 otherwise generate a store. */
255 static TCGv
gen_ldst(DisasContext
*s
, int opsize
, TCGv addr
, TCGv val
,
258 if (what
== EA_STORE
) {
259 gen_store(s
, opsize
, addr
, val
);
262 return gen_load(s
, opsize
, addr
, what
== EA_LOADS
);
266 /* Read a 32-bit immediate constant. */
267 static inline uint32_t read_im32(DisasContext
*s
)
270 im
= ((uint32_t)lduw_code(s
->pc
)) << 16;
272 im
|= lduw_code(s
->pc
);
277 /* Calculate and address index. */
278 static TCGv
gen_addr_index(uint16_t ext
, TCGv tmp
)
283 add
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(ext
, 12);
284 if ((ext
& 0x800) == 0) {
285 tcg_gen_ext16s_i32(tmp
, add
);
288 scale
= (ext
>> 9) & 3;
290 tcg_gen_shli_i32(tmp
, add
, scale
);
296 /* Handle a base + index + displacement effective addresss.
297 A NULL_QREG base means pc-relative. */
298 static TCGv
gen_lea_indexed(DisasContext
*s
, int opsize
, TCGv base
)
307 ext
= lduw_code(s
->pc
);
310 if ((ext
& 0x800) == 0 && !m68k_feature(s
->env
, M68K_FEATURE_WORD_INDEX
))
314 /* full extension word format */
315 if (!m68k_feature(s
->env
, M68K_FEATURE_EXT_FULL
))
318 if ((ext
& 0x30) > 0x10) {
319 /* base displacement */
320 if ((ext
& 0x30) == 0x20) {
321 bd
= (int16_t)lduw_code(s
->pc
);
329 tmp
= tcg_temp_new();
330 if ((ext
& 0x44) == 0) {
332 add
= gen_addr_index(ext
, tmp
);
336 if ((ext
& 0x80) == 0) {
337 /* base not suppressed */
338 if (IS_NULL_QREG(base
)) {
339 base
= tcg_const_i32(offset
+ bd
);
342 if (!IS_NULL_QREG(add
)) {
343 tcg_gen_add_i32(tmp
, add
, base
);
349 if (!IS_NULL_QREG(add
)) {
351 tcg_gen_addi_i32(tmp
, add
, bd
);
355 add
= tcg_const_i32(bd
);
357 if ((ext
& 3) != 0) {
358 /* memory indirect */
359 base
= gen_load(s
, OS_LONG
, add
, 0);
360 if ((ext
& 0x44) == 4) {
361 add
= gen_addr_index(ext
, tmp
);
362 tcg_gen_add_i32(tmp
, add
, base
);
368 /* outer displacement */
369 if ((ext
& 3) == 2) {
370 od
= (int16_t)lduw_code(s
->pc
);
379 tcg_gen_addi_i32(tmp
, add
, od
);
384 /* brief extension word format */
385 tmp
= tcg_temp_new();
386 add
= gen_addr_index(ext
, tmp
);
387 if (!IS_NULL_QREG(base
)) {
388 tcg_gen_add_i32(tmp
, add
, base
);
390 tcg_gen_addi_i32(tmp
, tmp
, (int8_t)ext
);
392 tcg_gen_addi_i32(tmp
, add
, offset
+ (int8_t)ext
);
399 /* Update the CPU env CC_OP state. */
400 static inline void gen_flush_cc_op(DisasContext
*s
)
402 if (s
->cc_op
!= CC_OP_DYNAMIC
)
403 tcg_gen_movi_i32(QREG_CC_OP
, s
->cc_op
);
406 /* Evaluate all the CC flags. */
407 static inline void gen_flush_flags(DisasContext
*s
)
409 if (s
->cc_op
== CC_OP_FLAGS
)
412 gen_helper_flush_flags(cpu_env
, QREG_CC_OP
);
413 s
->cc_op
= CC_OP_FLAGS
;
416 static void gen_logic_cc(DisasContext
*s
, TCGv val
)
418 tcg_gen_mov_i32(QREG_CC_DEST
, val
);
419 s
->cc_op
= CC_OP_LOGIC
;
422 static void gen_update_cc_add(TCGv dest
, TCGv src
)
424 tcg_gen_mov_i32(QREG_CC_DEST
, dest
);
425 tcg_gen_mov_i32(QREG_CC_SRC
, src
);
428 static inline int opsize_bytes(int opsize
)
431 case OS_BYTE
: return 1;
432 case OS_WORD
: return 2;
433 case OS_LONG
: return 4;
434 case OS_SINGLE
: return 4;
435 case OS_DOUBLE
: return 8;
437 qemu_assert(0, "bad operand size");
442 /* Assign value to a register. If the width is less than the register width
443 only the low part of the register is set. */
444 static void gen_partset_reg(int opsize
, TCGv reg
, TCGv val
)
449 tcg_gen_andi_i32(reg
, reg
, 0xffffff00);
450 tmp
= tcg_temp_new();
451 tcg_gen_ext8u_i32(tmp
, val
);
452 tcg_gen_or_i32(reg
, reg
, tmp
);
455 tcg_gen_andi_i32(reg
, reg
, 0xffff0000);
456 tmp
= tcg_temp_new();
457 tcg_gen_ext16u_i32(tmp
, val
);
458 tcg_gen_or_i32(reg
, reg
, tmp
);
462 tcg_gen_mov_i32(reg
, val
);
465 qemu_assert(0, "Bad operand size");
470 /* Sign or zero extend a value. */
471 static inline TCGv
gen_extend(TCGv val
, int opsize
, int sign
)
477 tmp
= tcg_temp_new();
479 tcg_gen_ext8s_i32(tmp
, val
);
481 tcg_gen_ext8u_i32(tmp
, val
);
484 tmp
= tcg_temp_new();
486 tcg_gen_ext16s_i32(tmp
, val
);
488 tcg_gen_ext16u_i32(tmp
, val
);
495 qemu_assert(0, "Bad operand size");
500 /* Generate code for an "effective address". Does not adjust the base
501 register for autoincrement addressing modes. */
502 static TCGv
gen_lea(DisasContext
*s
, uint16_t insn
, int opsize
)
509 switch ((insn
>> 3) & 7) {
510 case 0: /* Data register direct. */
511 case 1: /* Address register direct. */
513 case 2: /* Indirect register */
514 case 3: /* Indirect postincrement. */
515 return AREG(insn
, 0);
516 case 4: /* Indirect predecrememnt. */
518 tmp
= tcg_temp_new();
519 tcg_gen_subi_i32(tmp
, reg
, opsize_bytes(opsize
));
521 case 5: /* Indirect displacement. */
523 tmp
= tcg_temp_new();
524 ext
= lduw_code(s
->pc
);
526 tcg_gen_addi_i32(tmp
, reg
, (int16_t)ext
);
528 case 6: /* Indirect index + displacement. */
530 return gen_lea_indexed(s
, opsize
, reg
);
533 case 0: /* Absolute short. */
534 offset
= ldsw_code(s
->pc
);
536 return tcg_const_i32(offset
);
537 case 1: /* Absolute long. */
538 offset
= read_im32(s
);
539 return tcg_const_i32(offset
);
540 case 2: /* pc displacement */
542 offset
+= ldsw_code(s
->pc
);
544 return tcg_const_i32(offset
);
545 case 3: /* pc index+displacement. */
546 return gen_lea_indexed(s
, opsize
, NULL_QREG
);
547 case 4: /* Immediate. */
552 /* Should never happen. */
556 /* Helper function for gen_ea. Reuse the computed address between the
557 for read/write operands. */
558 static inline TCGv
gen_ea_once(DisasContext
*s
, uint16_t insn
, int opsize
,
559 TCGv val
, TCGv
*addrp
, ea_what what
)
563 if (addrp
&& what
== EA_STORE
) {
566 tmp
= gen_lea(s
, insn
, opsize
);
567 if (IS_NULL_QREG(tmp
))
572 return gen_ldst(s
, opsize
, tmp
, val
, what
);
575 /* Generate code to load/store a value ito/from an EA. If VAL > 0 this is
576 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
577 ADDRP is non-null for readwrite operands. */
578 static TCGv
gen_ea(DisasContext
*s
, uint16_t insn
, int opsize
, TCGv val
,
579 TCGv
*addrp
, ea_what what
)
585 switch ((insn
>> 3) & 7) {
586 case 0: /* Data register direct. */
588 if (what
== EA_STORE
) {
589 gen_partset_reg(opsize
, reg
, val
);
592 return gen_extend(reg
, opsize
, what
== EA_LOADS
);
594 case 1: /* Address register direct. */
596 if (what
== EA_STORE
) {
597 tcg_gen_mov_i32(reg
, val
);
600 return gen_extend(reg
, opsize
, what
== EA_LOADS
);
602 case 2: /* Indirect register */
604 return gen_ldst(s
, opsize
, reg
, val
, what
);
605 case 3: /* Indirect postincrement. */
607 result
= gen_ldst(s
, opsize
, reg
, val
, what
);
608 /* ??? This is not exception safe. The instruction may still
609 fault after this point. */
610 if (what
== EA_STORE
|| !addrp
)
611 tcg_gen_addi_i32(reg
, reg
, opsize_bytes(opsize
));
613 case 4: /* Indirect predecrememnt. */
616 if (addrp
&& what
== EA_STORE
) {
619 tmp
= gen_lea(s
, insn
, opsize
);
620 if (IS_NULL_QREG(tmp
))
625 result
= gen_ldst(s
, opsize
, tmp
, val
, what
);
626 /* ??? This is not exception safe. The instruction may still
627 fault after this point. */
628 if (what
== EA_STORE
|| !addrp
) {
630 tcg_gen_mov_i32(reg
, tmp
);
634 case 5: /* Indirect displacement. */
635 case 6: /* Indirect index + displacement. */
636 return gen_ea_once(s
, insn
, opsize
, val
, addrp
, what
);
639 case 0: /* Absolute short. */
640 case 1: /* Absolute long. */
641 case 2: /* pc displacement */
642 case 3: /* pc index+displacement. */
643 return gen_ea_once(s
, insn
, opsize
, val
, addrp
, what
);
644 case 4: /* Immediate. */
645 /* Sign extend values for consistency. */
648 if (what
== EA_LOADS
)
649 offset
= ldsb_code(s
->pc
+ 1);
651 offset
= ldub_code(s
->pc
+ 1);
655 if (what
== EA_LOADS
)
656 offset
= ldsw_code(s
->pc
);
658 offset
= lduw_code(s
->pc
);
662 offset
= read_im32(s
);
665 qemu_assert(0, "Bad immediate operand");
667 return tcg_const_i32(offset
);
672 /* Should never happen. */
676 /* This generates a conditional branch, clobbering all temporaries. */
677 static void gen_jmpcc(DisasContext
*s
, int cond
, int l1
)
681 /* TODO: Optimize compare/branch pairs rather than always flushing
682 flag state to CC_OP_FLAGS. */
690 case 2: /* HI (!C && !Z) */
691 tmp
= tcg_temp_new();
692 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
| CCF_Z
);
693 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
695 case 3: /* LS (C || Z) */
696 tmp
= tcg_temp_new();
697 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
| CCF_Z
);
698 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
700 case 4: /* CC (!C) */
701 tmp
= tcg_temp_new();
702 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
);
703 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
706 tmp
= tcg_temp_new();
707 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
);
708 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
710 case 6: /* NE (!Z) */
711 tmp
= tcg_temp_new();
712 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_Z
);
713 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
716 tmp
= tcg_temp_new();
717 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_Z
);
718 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
720 case 8: /* VC (!V) */
721 tmp
= tcg_temp_new();
722 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_V
);
723 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
726 tmp
= tcg_temp_new();
727 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_V
);
728 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
730 case 10: /* PL (!N) */
731 tmp
= tcg_temp_new();
732 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
733 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
735 case 11: /* MI (N) */
736 tmp
= tcg_temp_new();
737 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
738 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
740 case 12: /* GE (!(N ^ V)) */
741 tmp
= tcg_temp_new();
742 assert(CCF_V
== (CCF_N
>> 2));
743 tcg_gen_shri_i32(tmp
, QREG_CC_DEST
, 2);
744 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
745 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
);
746 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
748 case 13: /* LT (N ^ V) */
749 tmp
= tcg_temp_new();
750 assert(CCF_V
== (CCF_N
>> 2));
751 tcg_gen_shri_i32(tmp
, QREG_CC_DEST
, 2);
752 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
753 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
);
754 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
756 case 14: /* GT (!(Z || (N ^ V))) */
757 tmp
= tcg_temp_new();
758 assert(CCF_V
== (CCF_N
>> 2));
759 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
760 tcg_gen_shri_i32(tmp
, tmp
, 2);
761 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
762 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
| CCF_Z
);
763 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
765 case 15: /* LE (Z || (N ^ V)) */
766 tmp
= tcg_temp_new();
767 assert(CCF_V
== (CCF_N
>> 2));
768 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
769 tcg_gen_shri_i32(tmp
, tmp
, 2);
770 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
771 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
| CCF_Z
);
772 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
775 /* Should ever happen. */
786 l1
= gen_new_label();
787 cond
= (insn
>> 8) & 0xf;
789 tcg_gen_andi_i32(reg
, reg
, 0xffffff00);
790 /* This is safe because we modify the reg directly, with no other values
792 gen_jmpcc(s
, cond
^ 1, l1
);
793 tcg_gen_ori_i32(reg
, reg
, 0xff);
797 /* Force a TB lookup after an instruction that changes the CPU state. */
798 static void gen_lookup_tb(DisasContext
*s
)
801 tcg_gen_movi_i32(QREG_PC
, s
->pc
);
802 s
->is_jmp
= DISAS_UPDATE
;
805 /* Generate a jump to an immediate address. */
806 static void gen_jmp_im(DisasContext
*s
, uint32_t dest
)
809 tcg_gen_movi_i32(QREG_PC
, dest
);
810 s
->is_jmp
= DISAS_JUMP
;
813 /* Generate a jump to the address in qreg DEST. */
814 static void gen_jmp(DisasContext
*s
, TCGv dest
)
817 tcg_gen_mov_i32(QREG_PC
, dest
);
818 s
->is_jmp
= DISAS_JUMP
;
821 static void gen_exception(DisasContext
*s
, uint32_t where
, int nr
)
824 gen_jmp_im(s
, where
);
825 gen_helper_raise_exception(tcg_const_i32(nr
));
828 static inline void gen_addr_fault(DisasContext
*s
)
830 gen_exception(s
, s
->insn_pc
, EXCP_ADDRESS
);
833 #define SRC_EA(result, opsize, op_sign, addrp) do { \
834 result = gen_ea(s, insn, opsize, NULL_QREG, addrp, op_sign ? EA_LOADS : EA_LOADU); \
835 if (IS_NULL_QREG(result)) { \
841 #define DEST_EA(insn, opsize, val, addrp) do { \
842 TCGv ea_result = gen_ea(s, insn, opsize, val, addrp, EA_STORE); \
843 if (IS_NULL_QREG(ea_result)) { \
849 /* Generate a jump to an immediate address. */
850 static void gen_jmp_tb(DisasContext
*s
, int n
, uint32_t dest
)
852 TranslationBlock
*tb
;
855 if (unlikely(s
->singlestep_enabled
)) {
856 gen_exception(s
, dest
, EXCP_DEBUG
);
857 } else if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
858 (s
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
860 tcg_gen_movi_i32(QREG_PC
, dest
);
861 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
866 s
->is_jmp
= DISAS_TB_JUMP
;
869 DISAS_INSN(undef_mac
)
871 gen_exception(s
, s
->pc
- 2, EXCP_LINEA
);
874 DISAS_INSN(undef_fpu
)
876 gen_exception(s
, s
->pc
- 2, EXCP_LINEF
);
881 gen_exception(s
, s
->pc
- 2, EXCP_UNSUPPORTED
);
882 cpu_abort(cpu_single_env
, "Illegal instruction: %04x @ %08x",
893 sign
= (insn
& 0x100) != 0;
895 tmp
= tcg_temp_new();
897 tcg_gen_ext16s_i32(tmp
, reg
);
899 tcg_gen_ext16u_i32(tmp
, reg
);
900 SRC_EA(src
, OS_WORD
, sign
, NULL
);
901 tcg_gen_mul_i32(tmp
, tmp
, src
);
902 tcg_gen_mov_i32(reg
, tmp
);
903 /* Unlike m68k, coldfire always clears the overflow bit. */
904 gen_logic_cc(s
, tmp
);
914 sign
= (insn
& 0x100) != 0;
917 tcg_gen_ext16s_i32(QREG_DIV1
, reg
);
919 tcg_gen_ext16u_i32(QREG_DIV1
, reg
);
921 SRC_EA(src
, OS_WORD
, sign
, NULL
);
922 tcg_gen_mov_i32(QREG_DIV2
, src
);
924 gen_helper_divs(cpu_env
, tcg_const_i32(1));
926 gen_helper_divu(cpu_env
, tcg_const_i32(1));
929 tmp
= tcg_temp_new();
930 src
= tcg_temp_new();
931 tcg_gen_ext16u_i32(tmp
, QREG_DIV1
);
932 tcg_gen_shli_i32(src
, QREG_DIV2
, 16);
933 tcg_gen_or_i32(reg
, tmp
, src
);
934 s
->cc_op
= CC_OP_FLAGS
;
944 ext
= lduw_code(s
->pc
);
947 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
952 tcg_gen_mov_i32(QREG_DIV1
, num
);
953 SRC_EA(den
, OS_LONG
, 0, NULL
);
954 tcg_gen_mov_i32(QREG_DIV2
, den
);
956 gen_helper_divs(cpu_env
, tcg_const_i32(0));
958 gen_helper_divu(cpu_env
, tcg_const_i32(0));
960 if ((ext
& 7) == ((ext
>> 12) & 7)) {
962 tcg_gen_mov_i32 (reg
, QREG_DIV1
);
965 tcg_gen_mov_i32 (reg
, QREG_DIV2
);
967 s
->cc_op
= CC_OP_FLAGS
;
979 add
= (insn
& 0x4000) != 0;
981 dest
= tcg_temp_new();
983 SRC_EA(tmp
, OS_LONG
, 0, &addr
);
987 SRC_EA(src
, OS_LONG
, 0, NULL
);
990 tcg_gen_add_i32(dest
, tmp
, src
);
991 gen_helper_xflag_lt(QREG_CC_X
, dest
, src
);
992 s
->cc_op
= CC_OP_ADD
;
994 gen_helper_xflag_lt(QREG_CC_X
, tmp
, src
);
995 tcg_gen_sub_i32(dest
, tmp
, src
);
996 s
->cc_op
= CC_OP_SUB
;
998 gen_update_cc_add(dest
, src
);
1000 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1002 tcg_gen_mov_i32(reg
, dest
);
1007 /* Reverse the order of the bits in REG. */
1011 reg
= DREG(insn
, 0);
1012 gen_helper_bitrev(reg
, reg
);
1015 DISAS_INSN(bitop_reg
)
1025 if ((insn
& 0x38) != 0)
1029 op
= (insn
>> 6) & 3;
1030 SRC_EA(src1
, opsize
, 0, op
? &addr
: NULL
);
1031 src2
= DREG(insn
, 9);
1032 dest
= tcg_temp_new();
1035 tmp
= tcg_temp_new();
1036 if (opsize
== OS_BYTE
)
1037 tcg_gen_andi_i32(tmp
, src2
, 7);
1039 tcg_gen_andi_i32(tmp
, src2
, 31);
1041 tmp
= tcg_temp_new();
1042 tcg_gen_shr_i32(tmp
, src1
, src2
);
1043 tcg_gen_andi_i32(tmp
, tmp
, 1);
1044 tcg_gen_shli_i32(tmp
, tmp
, 2);
1045 /* Clear CCF_Z if bit set. */
1046 tcg_gen_ori_i32(QREG_CC_DEST
, QREG_CC_DEST
, CCF_Z
);
1047 tcg_gen_xor_i32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1049 tcg_gen_shl_i32(tmp
, tcg_const_i32(1), src2
);
1052 tcg_gen_xor_i32(dest
, src1
, tmp
);
1055 tcg_gen_not_i32(tmp
, tmp
);
1056 tcg_gen_and_i32(dest
, src1
, tmp
);
1059 tcg_gen_or_i32(dest
, src1
, tmp
);
1065 DEST_EA(insn
, opsize
, dest
, &addr
);
1071 reg
= DREG(insn
, 0);
1073 gen_helper_sats(reg
, reg
, QREG_CC_DEST
);
1074 gen_logic_cc(s
, reg
);
1077 static void gen_push(DisasContext
*s
, TCGv val
)
1081 tmp
= tcg_temp_new();
1082 tcg_gen_subi_i32(tmp
, QREG_SP
, 4);
1083 gen_store(s
, OS_LONG
, tmp
, val
);
1084 tcg_gen_mov_i32(QREG_SP
, tmp
);
1096 mask
= lduw_code(s
->pc
);
1098 tmp
= gen_lea(s
, insn
, OS_LONG
);
1099 if (IS_NULL_QREG(tmp
)) {
1103 addr
= tcg_temp_new();
1104 tcg_gen_mov_i32(addr
, tmp
);
1105 is_load
= ((insn
& 0x0400) != 0);
1106 for (i
= 0; i
< 16; i
++, mask
>>= 1) {
1113 tmp
= gen_load(s
, OS_LONG
, addr
, 0);
1114 tcg_gen_mov_i32(reg
, tmp
);
1116 gen_store(s
, OS_LONG
, addr
, reg
);
1119 tcg_gen_addi_i32(addr
, addr
, 4);
1124 DISAS_INSN(bitop_im
)
1134 if ((insn
& 0x38) != 0)
1138 op
= (insn
>> 6) & 3;
1140 bitnum
= lduw_code(s
->pc
);
1142 if (bitnum
& 0xff00) {
1143 disas_undef(s
, insn
);
1147 SRC_EA(src1
, opsize
, 0, op
? &addr
: NULL
);
1150 if (opsize
== OS_BYTE
)
1156 tmp
= tcg_temp_new();
1157 assert (CCF_Z
== (1 << 2));
1159 tcg_gen_shri_i32(tmp
, src1
, bitnum
- 2);
1160 else if (bitnum
< 2)
1161 tcg_gen_shli_i32(tmp
, src1
, 2 - bitnum
);
1163 tcg_gen_mov_i32(tmp
, src1
);
1164 tcg_gen_andi_i32(tmp
, tmp
, CCF_Z
);
1165 /* Clear CCF_Z if bit set. */
1166 tcg_gen_ori_i32(QREG_CC_DEST
, QREG_CC_DEST
, CCF_Z
);
1167 tcg_gen_xor_i32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1171 tcg_gen_xori_i32(tmp
, src1
, mask
);
1174 tcg_gen_andi_i32(tmp
, src1
, ~mask
);
1177 tcg_gen_ori_i32(tmp
, src1
, mask
);
1182 DEST_EA(insn
, opsize
, tmp
, &addr
);
1186 DISAS_INSN(arith_im
)
1194 op
= (insn
>> 9) & 7;
1195 SRC_EA(src1
, OS_LONG
, 0, (op
== 6) ? NULL
: &addr
);
1197 dest
= tcg_temp_new();
1200 tcg_gen_ori_i32(dest
, src1
, im
);
1201 gen_logic_cc(s
, dest
);
1204 tcg_gen_andi_i32(dest
, src1
, im
);
1205 gen_logic_cc(s
, dest
);
1208 tcg_gen_mov_i32(dest
, src1
);
1209 gen_helper_xflag_lt(QREG_CC_X
, dest
, tcg_const_i32(im
));
1210 tcg_gen_subi_i32(dest
, dest
, im
);
1211 gen_update_cc_add(dest
, tcg_const_i32(im
));
1212 s
->cc_op
= CC_OP_SUB
;
1215 tcg_gen_mov_i32(dest
, src1
);
1216 tcg_gen_addi_i32(dest
, dest
, im
);
1217 gen_update_cc_add(dest
, tcg_const_i32(im
));
1218 gen_helper_xflag_lt(QREG_CC_X
, dest
, tcg_const_i32(im
));
1219 s
->cc_op
= CC_OP_ADD
;
1222 tcg_gen_xori_i32(dest
, src1
, im
);
1223 gen_logic_cc(s
, dest
);
1226 tcg_gen_mov_i32(dest
, src1
);
1227 tcg_gen_subi_i32(dest
, dest
, im
);
1228 gen_update_cc_add(dest
, tcg_const_i32(im
));
1229 s
->cc_op
= CC_OP_SUB
;
1235 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1243 reg
= DREG(insn
, 0);
1244 tcg_gen_bswap32_i32(reg
, reg
);
1254 switch (insn
>> 12) {
1255 case 1: /* move.b */
1258 case 2: /* move.l */
1261 case 3: /* move.w */
1267 SRC_EA(src
, opsize
, 1, NULL
);
1268 op
= (insn
>> 6) & 7;
1271 /* The value will already have been sign extended. */
1272 dest
= AREG(insn
, 9);
1273 tcg_gen_mov_i32(dest
, src
);
1277 dest_ea
= ((insn
>> 9) & 7) | (op
<< 3);
1278 DEST_EA(dest_ea
, opsize
, src
, NULL
);
1279 /* This will be correct because loads sign extend. */
1280 gen_logic_cc(s
, src
);
1289 reg
= DREG(insn
, 0);
1290 gen_helper_subx_cc(reg
, cpu_env
, tcg_const_i32(0), reg
);
1298 reg
= AREG(insn
, 9);
1299 tmp
= gen_lea(s
, insn
, OS_LONG
);
1300 if (IS_NULL_QREG(tmp
)) {
1304 tcg_gen_mov_i32(reg
, tmp
);
1311 switch ((insn
>> 6) & 3) {
1324 DEST_EA(insn
, opsize
, tcg_const_i32(0), NULL
);
1325 gen_logic_cc(s
, tcg_const_i32(0));
1328 static TCGv
gen_get_ccr(DisasContext
*s
)
1333 dest
= tcg_temp_new();
1334 tcg_gen_shli_i32(dest
, QREG_CC_X
, 4);
1335 tcg_gen_or_i32(dest
, dest
, QREG_CC_DEST
);
1339 DISAS_INSN(move_from_ccr
)
1344 ccr
= gen_get_ccr(s
);
1345 reg
= DREG(insn
, 0);
1346 gen_partset_reg(OS_WORD
, reg
, ccr
);
1354 reg
= DREG(insn
, 0);
1355 src1
= tcg_temp_new();
1356 tcg_gen_mov_i32(src1
, reg
);
1357 tcg_gen_neg_i32(reg
, src1
);
1358 s
->cc_op
= CC_OP_SUB
;
1359 gen_update_cc_add(reg
, src1
);
1360 gen_helper_xflag_lt(QREG_CC_X
, tcg_const_i32(0), src1
);
1361 s
->cc_op
= CC_OP_SUB
;
1364 static void gen_set_sr_im(DisasContext
*s
, uint16_t val
, int ccr_only
)
1366 tcg_gen_movi_i32(QREG_CC_DEST
, val
& 0xf);
1367 tcg_gen_movi_i32(QREG_CC_X
, (val
& 0x10) >> 4);
1369 gen_helper_set_sr(cpu_env
, tcg_const_i32(val
& 0xff00));
1373 static void gen_set_sr(DisasContext
*s
, uint16_t insn
, int ccr_only
)
1378 s
->cc_op
= CC_OP_FLAGS
;
1379 if ((insn
& 0x38) == 0)
1381 tmp
= tcg_temp_new();
1382 reg
= DREG(insn
, 0);
1383 tcg_gen_andi_i32(QREG_CC_DEST
, reg
, 0xf);
1384 tcg_gen_shri_i32(tmp
, reg
, 4);
1385 tcg_gen_andi_i32(QREG_CC_X
, tmp
, 1);
1387 gen_helper_set_sr(cpu_env
, reg
);
1390 else if ((insn
& 0x3f) == 0x3c)
1393 val
= lduw_code(s
->pc
);
1395 gen_set_sr_im(s
, val
, ccr_only
);
1398 disas_undef(s
, insn
);
1401 DISAS_INSN(move_to_ccr
)
1403 gen_set_sr(s
, insn
, 1);
1410 reg
= DREG(insn
, 0);
1411 tcg_gen_not_i32(reg
, reg
);
1412 gen_logic_cc(s
, reg
);
1421 src1
= tcg_temp_new();
1422 src2
= tcg_temp_new();
1423 reg
= DREG(insn
, 0);
1424 tcg_gen_shli_i32(src1
, reg
, 16);
1425 tcg_gen_shri_i32(src2
, reg
, 16);
1426 tcg_gen_or_i32(reg
, src1
, src2
);
1427 gen_logic_cc(s
, reg
);
1434 tmp
= gen_lea(s
, insn
, OS_LONG
);
1435 if (IS_NULL_QREG(tmp
)) {
1448 reg
= DREG(insn
, 0);
1449 op
= (insn
>> 6) & 7;
1450 tmp
= tcg_temp_new();
1452 tcg_gen_ext16s_i32(tmp
, reg
);
1454 tcg_gen_ext8s_i32(tmp
, reg
);
1456 gen_partset_reg(OS_WORD
, reg
, tmp
);
1458 tcg_gen_mov_i32(reg
, tmp
);
1459 gen_logic_cc(s
, tmp
);
1467 switch ((insn
>> 6) & 3) {
1480 SRC_EA(tmp
, opsize
, 1, NULL
);
1481 gen_logic_cc(s
, tmp
);
1486 /* Implemented as a NOP. */
1491 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1494 /* ??? This should be atomic. */
1501 dest
= tcg_temp_new();
1502 SRC_EA(src1
, OS_BYTE
, 1, &addr
);
1503 gen_logic_cc(s
, src1
);
1504 tcg_gen_ori_i32(dest
, src1
, 0x80);
1505 DEST_EA(insn
, OS_BYTE
, dest
, &addr
);
1515 /* The upper 32 bits of the product are discarded, so
1516 muls.l and mulu.l are functionally equivalent. */
1517 ext
= lduw_code(s
->pc
);
1520 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
1523 reg
= DREG(ext
, 12);
1524 SRC_EA(src1
, OS_LONG
, 0, NULL
);
1525 dest
= tcg_temp_new();
1526 tcg_gen_mul_i32(dest
, src1
, reg
);
1527 tcg_gen_mov_i32(reg
, dest
);
1528 /* Unlike m68k, coldfire always clears the overflow bit. */
1529 gen_logic_cc(s
, dest
);
1538 offset
= ldsw_code(s
->pc
);
1540 reg
= AREG(insn
, 0);
1541 tmp
= tcg_temp_new();
1542 tcg_gen_subi_i32(tmp
, QREG_SP
, 4);
1543 gen_store(s
, OS_LONG
, tmp
, reg
);
1544 if ((insn
& 7) != 7)
1545 tcg_gen_mov_i32(reg
, tmp
);
1546 tcg_gen_addi_i32(QREG_SP
, tmp
, offset
);
1555 src
= tcg_temp_new();
1556 reg
= AREG(insn
, 0);
1557 tcg_gen_mov_i32(src
, reg
);
1558 tmp
= gen_load(s
, OS_LONG
, src
, 0);
1559 tcg_gen_mov_i32(reg
, tmp
);
1560 tcg_gen_addi_i32(QREG_SP
, src
, 4);
1571 tmp
= gen_load(s
, OS_LONG
, QREG_SP
, 0);
1572 tcg_gen_addi_i32(QREG_SP
, QREG_SP
, 4);
1580 /* Load the target address first to ensure correct exception
1582 tmp
= gen_lea(s
, insn
, OS_LONG
);
1583 if (IS_NULL_QREG(tmp
)) {
1587 if ((insn
& 0x40) == 0) {
1589 gen_push(s
, tcg_const_i32(s
->pc
));
1602 SRC_EA(src1
, OS_LONG
, 0, &addr
);
1603 val
= (insn
>> 9) & 7;
1606 dest
= tcg_temp_new();
1607 tcg_gen_mov_i32(dest
, src1
);
1608 if ((insn
& 0x38) == 0x08) {
1609 /* Don't update condition codes if the destination is an
1610 address register. */
1611 if (insn
& 0x0100) {
1612 tcg_gen_subi_i32(dest
, dest
, val
);
1614 tcg_gen_addi_i32(dest
, dest
, val
);
1617 src2
= tcg_const_i32(val
);
1618 if (insn
& 0x0100) {
1619 gen_helper_xflag_lt(QREG_CC_X
, dest
, src2
);
1620 tcg_gen_subi_i32(dest
, dest
, val
);
1621 s
->cc_op
= CC_OP_SUB
;
1623 tcg_gen_addi_i32(dest
, dest
, val
);
1624 gen_helper_xflag_lt(QREG_CC_X
, dest
, src2
);
1625 s
->cc_op
= CC_OP_ADD
;
1627 gen_update_cc_add(dest
, src2
);
1629 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1635 case 2: /* One extension word. */
1638 case 3: /* Two extension words. */
1641 case 4: /* No extension words. */
1644 disas_undef(s
, insn
);
1656 op
= (insn
>> 8) & 0xf;
1657 offset
= (int8_t)insn
;
1659 offset
= ldsw_code(s
->pc
);
1661 } else if (offset
== -1) {
1662 offset
= read_im32(s
);
1666 gen_push(s
, tcg_const_i32(s
->pc
));
1671 l1
= gen_new_label();
1672 gen_jmpcc(s
, ((insn
>> 8) & 0xf) ^ 1, l1
);
1673 gen_jmp_tb(s
, 1, base
+ offset
);
1675 gen_jmp_tb(s
, 0, s
->pc
);
1677 /* Unconditional branch. */
1678 gen_jmp_tb(s
, 0, base
+ offset
);
1687 tcg_gen_movi_i32(DREG(insn
, 9), val
);
1688 gen_logic_cc(s
, tcg_const_i32(val
));
1701 SRC_EA(src
, opsize
, (insn
& 0x80) == 0, NULL
);
1702 reg
= DREG(insn
, 9);
1703 tcg_gen_mov_i32(reg
, src
);
1704 gen_logic_cc(s
, src
);
1714 reg
= DREG(insn
, 9);
1715 dest
= tcg_temp_new();
1717 SRC_EA(src
, OS_LONG
, 0, &addr
);
1718 tcg_gen_or_i32(dest
, src
, reg
);
1719 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1721 SRC_EA(src
, OS_LONG
, 0, NULL
);
1722 tcg_gen_or_i32(dest
, src
, reg
);
1723 tcg_gen_mov_i32(reg
, dest
);
1725 gen_logic_cc(s
, dest
);
1733 SRC_EA(src
, OS_LONG
, 0, NULL
);
1734 reg
= AREG(insn
, 9);
1735 tcg_gen_sub_i32(reg
, reg
, src
);
1744 reg
= DREG(insn
, 9);
1745 src
= DREG(insn
, 0);
1746 gen_helper_subx_cc(reg
, cpu_env
, reg
, src
);
1754 val
= (insn
>> 9) & 7;
1757 src
= tcg_const_i32(val
);
1758 gen_logic_cc(s
, src
);
1759 DEST_EA(insn
, OS_LONG
, src
, NULL
);
1770 op
= (insn
>> 6) & 3;
1774 s
->cc_op
= CC_OP_CMPB
;
1778 s
->cc_op
= CC_OP_CMPW
;
1782 s
->cc_op
= CC_OP_SUB
;
1787 SRC_EA(src
, opsize
, 1, NULL
);
1788 reg
= DREG(insn
, 9);
1789 dest
= tcg_temp_new();
1790 tcg_gen_sub_i32(dest
, reg
, src
);
1791 gen_update_cc_add(dest
, src
);
1806 SRC_EA(src
, opsize
, 1, NULL
);
1807 reg
= AREG(insn
, 9);
1808 dest
= tcg_temp_new();
1809 tcg_gen_sub_i32(dest
, reg
, src
);
1810 gen_update_cc_add(dest
, src
);
1811 s
->cc_op
= CC_OP_SUB
;
1821 SRC_EA(src
, OS_LONG
, 0, &addr
);
1822 reg
= DREG(insn
, 9);
1823 dest
= tcg_temp_new();
1824 tcg_gen_xor_i32(dest
, src
, reg
);
1825 gen_logic_cc(s
, dest
);
1826 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1836 reg
= DREG(insn
, 9);
1837 dest
= tcg_temp_new();
1839 SRC_EA(src
, OS_LONG
, 0, &addr
);
1840 tcg_gen_and_i32(dest
, src
, reg
);
1841 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1843 SRC_EA(src
, OS_LONG
, 0, NULL
);
1844 tcg_gen_and_i32(dest
, src
, reg
);
1845 tcg_gen_mov_i32(reg
, dest
);
1847 gen_logic_cc(s
, dest
);
1855 SRC_EA(src
, OS_LONG
, 0, NULL
);
1856 reg
= AREG(insn
, 9);
1857 tcg_gen_add_i32(reg
, reg
, src
);
1866 reg
= DREG(insn
, 9);
1867 src
= DREG(insn
, 0);
1868 gen_helper_addx_cc(reg
, cpu_env
, reg
, src
);
1869 s
->cc_op
= CC_OP_FLAGS
;
1872 /* TODO: This could be implemented without helper functions. */
1873 DISAS_INSN(shift_im
)
1879 reg
= DREG(insn
, 0);
1880 tmp
= (insn
>> 9) & 7;
1883 shift
= tcg_const_i32(tmp
);
1884 /* No need to flush flags becuse we know we will set C flag. */
1886 gen_helper_shl_cc(reg
, cpu_env
, reg
, shift
);
1889 gen_helper_shr_cc(reg
, cpu_env
, reg
, shift
);
1891 gen_helper_sar_cc(reg
, cpu_env
, reg
, shift
);
1894 s
->cc_op
= CC_OP_SHIFT
;
1897 DISAS_INSN(shift_reg
)
1902 reg
= DREG(insn
, 0);
1903 shift
= DREG(insn
, 9);
1904 /* Shift by zero leaves C flag unmodified. */
1907 gen_helper_shl_cc(reg
, cpu_env
, reg
, shift
);
1910 gen_helper_shr_cc(reg
, cpu_env
, reg
, shift
);
1912 gen_helper_sar_cc(reg
, cpu_env
, reg
, shift
);
1915 s
->cc_op
= CC_OP_SHIFT
;
1921 reg
= DREG(insn
, 0);
1922 gen_logic_cc(s
, reg
);
1923 gen_helper_ff1(reg
, reg
);
1926 static TCGv
gen_get_sr(DisasContext
*s
)
1931 ccr
= gen_get_ccr(s
);
1932 sr
= tcg_temp_new();
1933 tcg_gen_andi_i32(sr
, QREG_SR
, 0xffe0);
1934 tcg_gen_or_i32(sr
, sr
, ccr
);
1944 ext
= lduw_code(s
->pc
);
1946 if (ext
!= 0x46FC) {
1947 gen_exception(s
, addr
, EXCP_UNSUPPORTED
);
1950 ext
= lduw_code(s
->pc
);
1952 if (IS_USER(s
) || (ext
& SR_S
) == 0) {
1953 gen_exception(s
, addr
, EXCP_PRIVILEGE
);
1956 gen_push(s
, gen_get_sr(s
));
1957 gen_set_sr_im(s
, ext
, 0);
1960 DISAS_INSN(move_from_sr
)
1966 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1970 reg
= DREG(insn
, 0);
1971 gen_partset_reg(OS_WORD
, reg
, sr
);
1974 DISAS_INSN(move_to_sr
)
1977 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1980 gen_set_sr(s
, insn
, 0);
1984 DISAS_INSN(move_from_usp
)
1987 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1990 /* TODO: Implement USP. */
1991 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1994 DISAS_INSN(move_to_usp
)
1997 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2000 /* TODO: Implement USP. */
2001 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
2006 gen_exception(s
, s
->pc
, EXCP_HALT_INSN
);
2014 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2018 ext
= lduw_code(s
->pc
);
2021 gen_set_sr_im(s
, ext
, 0);
2022 tcg_gen_movi_i32(QREG_HALTED
, 1);
2023 gen_exception(s
, s
->pc
, EXCP_HLT
);
2029 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2032 gen_exception(s
, s
->pc
- 2, EXCP_RTE
);
2041 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2045 ext
= lduw_code(s
->pc
);
2049 reg
= AREG(ext
, 12);
2051 reg
= DREG(ext
, 12);
2053 gen_helper_movec(cpu_env
, tcg_const_i32(ext
& 0xfff), reg
);
2060 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2063 /* ICache fetch. Implement as no-op. */
2069 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2072 /* Cache push/invalidate. Implement as no-op. */
2077 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2083 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2086 /* TODO: Implement wdebug. */
2087 qemu_assert(0, "WDEBUG not implemented");
2092 gen_exception(s
, s
->pc
- 2, EXCP_TRAP0
+ (insn
& 0xf));
2095 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2096 immediately before the next FP instruction is executed. */
2110 ext
= lduw_code(s
->pc
);
2112 opmode
= ext
& 0x7f;
2113 switch ((ext
>> 13) & 7) {
2118 case 3: /* fmove out */
2120 tmp32
= tcg_temp_new_i32();
2122 /* ??? TODO: Proper behavior on overflow. */
2123 switch ((ext
>> 10) & 7) {
2126 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2130 gen_helper_f64_to_f32(tmp32
, cpu_env
, src
);
2134 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2136 case 5: /* OS_DOUBLE */
2137 tcg_gen_mov_i32(tmp32
, AREG(insn
, 0));
2138 switch ((insn
>> 3) & 7) {
2143 tcg_gen_addi_i32(tmp32
, tmp32
, -8);
2146 offset
= ldsw_code(s
->pc
);
2148 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2153 gen_store64(s
, tmp32
, src
);
2154 switch ((insn
>> 3) & 7) {
2156 tcg_gen_addi_i32(tmp32
, tmp32
, 8);
2157 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2160 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2163 tcg_temp_free_i32(tmp32
);
2167 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2172 DEST_EA(insn
, opsize
, tmp32
, NULL
);
2173 tcg_temp_free_i32(tmp32
);
2175 case 4: /* fmove to control register. */
2176 switch ((ext
>> 10) & 7) {
2178 /* Not implemented. Ignore writes. */
2183 cpu_abort(NULL
, "Unimplemented: fmove to control %d",
2187 case 5: /* fmove from control register. */
2188 switch ((ext
>> 10) & 7) {
2190 /* Not implemented. Always return zero. */
2191 tmp32
= tcg_const_i32(0);
2196 cpu_abort(NULL
, "Unimplemented: fmove from control %d",
2200 DEST_EA(insn
, OS_LONG
, tmp32
, NULL
);
2202 case 6: /* fmovem */
2208 if ((ext
& 0x1f00) != 0x1000 || (ext
& 0xff) == 0)
2210 tmp32
= gen_lea(s
, insn
, OS_LONG
);
2211 if (IS_NULL_QREG(tmp32
)) {
2215 addr
= tcg_temp_new_i32();
2216 tcg_gen_mov_i32(addr
, tmp32
);
2218 for (i
= 0; i
< 8; i
++) {
2222 if (ext
& (1 << 13)) {
2224 tcg_gen_qemu_stf64(dest
, addr
, IS_USER(s
));
2227 tcg_gen_qemu_ldf64(dest
, addr
, IS_USER(s
));
2229 if (ext
& (mask
- 1))
2230 tcg_gen_addi_i32(addr
, addr
, 8);
2234 tcg_temp_free_i32(addr
);
2238 if (ext
& (1 << 14)) {
2239 /* Source effective address. */
2240 switch ((ext
>> 10) & 7) {
2241 case 0: opsize
= OS_LONG
; break;
2242 case 1: opsize
= OS_SINGLE
; break;
2243 case 4: opsize
= OS_WORD
; break;
2244 case 5: opsize
= OS_DOUBLE
; break;
2245 case 6: opsize
= OS_BYTE
; break;
2249 if (opsize
== OS_DOUBLE
) {
2250 tmp32
= tcg_temp_new_i32();
2251 tcg_gen_mov_i32(tmp32
, AREG(insn
, 0));
2252 switch ((insn
>> 3) & 7) {
2257 tcg_gen_addi_i32(tmp32
, tmp32
, -8);
2260 offset
= ldsw_code(s
->pc
);
2262 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2265 offset
= ldsw_code(s
->pc
);
2266 offset
+= s
->pc
- 2;
2268 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2273 src
= gen_load64(s
, tmp32
);
2274 switch ((insn
>> 3) & 7) {
2276 tcg_gen_addi_i32(tmp32
, tmp32
, 8);
2277 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2280 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2283 tcg_temp_free_i32(tmp32
);
2285 SRC_EA(tmp32
, opsize
, 1, NULL
);
2286 src
= tcg_temp_new_i64();
2291 gen_helper_i32_to_f64(src
, cpu_env
, tmp32
);
2294 gen_helper_f32_to_f64(src
, cpu_env
, tmp32
);
2299 /* Source register. */
2300 src
= FREG(ext
, 10);
2302 dest
= FREG(ext
, 7);
2303 res
= tcg_temp_new_i64();
2305 tcg_gen_mov_f64(res
, dest
);
2309 case 0: case 0x40: case 0x44: /* fmove */
2310 tcg_gen_mov_f64(res
, src
);
2313 gen_helper_iround_f64(res
, cpu_env
, src
);
2316 case 3: /* fintrz */
2317 gen_helper_itrunc_f64(res
, cpu_env
, src
);
2320 case 4: case 0x41: case 0x45: /* fsqrt */
2321 gen_helper_sqrt_f64(res
, cpu_env
, src
);
2323 case 0x18: case 0x58: case 0x5c: /* fabs */
2324 gen_helper_abs_f64(res
, src
);
2326 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2327 gen_helper_chs_f64(res
, src
);
2329 case 0x20: case 0x60: case 0x64: /* fdiv */
2330 gen_helper_div_f64(res
, cpu_env
, res
, src
);
2332 case 0x22: case 0x62: case 0x66: /* fadd */
2333 gen_helper_add_f64(res
, cpu_env
, res
, src
);
2335 case 0x23: case 0x63: case 0x67: /* fmul */
2336 gen_helper_mul_f64(res
, cpu_env
, res
, src
);
2338 case 0x28: case 0x68: case 0x6c: /* fsub */
2339 gen_helper_sub_f64(res
, cpu_env
, res
, src
);
2341 case 0x38: /* fcmp */
2342 gen_helper_sub_cmp_f64(res
, cpu_env
, res
, src
);
2346 case 0x3a: /* ftst */
2347 tcg_gen_mov_f64(res
, src
);
2354 if (ext
& (1 << 14)) {
2355 tcg_temp_free_i64(src
);
2358 if (opmode
& 0x40) {
2359 if ((opmode
& 0x4) != 0)
2361 } else if ((s
->fpcr
& M68K_FPCR_PREC
) == 0) {
2366 TCGv tmp
= tcg_temp_new_i32();
2367 gen_helper_f64_to_f32(tmp
, cpu_env
, res
);
2368 gen_helper_f32_to_f64(res
, cpu_env
, tmp
);
2369 tcg_temp_free_i32(tmp
);
2371 tcg_gen_mov_f64(QREG_FP_RESULT
, res
);
2373 tcg_gen_mov_f64(dest
, res
);
2375 tcg_temp_free_i64(res
);
2378 /* FIXME: Is this right for offset addressing modes? */
2380 disas_undef_fpu(s
, insn
);
2391 offset
= ldsw_code(s
->pc
);
2393 if (insn
& (1 << 6)) {
2394 offset
= (offset
<< 16) | lduw_code(s
->pc
);
2398 l1
= gen_new_label();
2399 /* TODO: Raise BSUN exception. */
2400 flag
= tcg_temp_new();
2401 gen_helper_compare_f64(flag
, cpu_env
, QREG_FP_RESULT
);
2402 /* Jump to l1 if condition is true. */
2403 switch (insn
& 0xf) {
2406 case 1: /* eq (=0) */
2407 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(0), l1
);
2409 case 2: /* ogt (=1) */
2410 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(1), l1
);
2412 case 3: /* oge (=0 or =1) */
2413 tcg_gen_brcond_i32(TCG_COND_LEU
, flag
, tcg_const_i32(1), l1
);
2415 case 4: /* olt (=-1) */
2416 tcg_gen_brcond_i32(TCG_COND_LT
, flag
, tcg_const_i32(0), l1
);
2418 case 5: /* ole (=-1 or =0) */
2419 tcg_gen_brcond_i32(TCG_COND_LE
, flag
, tcg_const_i32(0), l1
);
2421 case 6: /* ogl (=-1 or =1) */
2422 tcg_gen_andi_i32(flag
, flag
, 1);
2423 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(0), l1
);
2425 case 7: /* or (=2) */
2426 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(2), l1
);
2428 case 8: /* un (<2) */
2429 tcg_gen_brcond_i32(TCG_COND_LT
, flag
, tcg_const_i32(2), l1
);
2431 case 9: /* ueq (=0 or =2) */
2432 tcg_gen_andi_i32(flag
, flag
, 1);
2433 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(0), l1
);
2435 case 10: /* ugt (>0) */
2436 tcg_gen_brcond_i32(TCG_COND_GT
, flag
, tcg_const_i32(0), l1
);
2438 case 11: /* uge (>=0) */
2439 tcg_gen_brcond_i32(TCG_COND_GE
, flag
, tcg_const_i32(0), l1
);
2441 case 12: /* ult (=-1 or =2) */
2442 tcg_gen_brcond_i32(TCG_COND_GEU
, flag
, tcg_const_i32(2), l1
);
2444 case 13: /* ule (!=1) */
2445 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(1), l1
);
2447 case 14: /* ne (!=0) */
2448 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(0), l1
);
2454 gen_jmp_tb(s
, 0, s
->pc
);
2456 gen_jmp_tb(s
, 1, addr
+ offset
);
2459 DISAS_INSN(frestore
)
2461 /* TODO: Implement frestore. */
2462 qemu_assert(0, "FRESTORE not implemented");
2467 /* TODO: Implement fsave. */
2468 qemu_assert(0, "FSAVE not implemented");
2471 static inline TCGv
gen_mac_extract_word(DisasContext
*s
, TCGv val
, int upper
)
2473 TCGv tmp
= tcg_temp_new();
2474 if (s
->env
->macsr
& MACSR_FI
) {
2476 tcg_gen_andi_i32(tmp
, val
, 0xffff0000);
2478 tcg_gen_shli_i32(tmp
, val
, 16);
2479 } else if (s
->env
->macsr
& MACSR_SU
) {
2481 tcg_gen_sari_i32(tmp
, val
, 16);
2483 tcg_gen_ext16s_i32(tmp
, val
);
2486 tcg_gen_shri_i32(tmp
, val
, 16);
2488 tcg_gen_ext16u_i32(tmp
, val
);
2493 static void gen_mac_clear_flags(void)
2495 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
,
2496 ~(MACSR_V
| MACSR_Z
| MACSR_N
| MACSR_EV
));
2512 s
->mactmp
= tcg_temp_new_i64();
2516 ext
= lduw_code(s
->pc
);
2519 acc
= ((insn
>> 7) & 1) | ((ext
>> 3) & 2);
2520 dual
= ((insn
& 0x30) != 0 && (ext
& 3) != 0);
2521 if (dual
&& !m68k_feature(s
->env
, M68K_FEATURE_CF_EMAC_B
)) {
2522 disas_undef(s
, insn
);
2526 /* MAC with load. */
2527 tmp
= gen_lea(s
, insn
, OS_LONG
);
2528 addr
= tcg_temp_new();
2529 tcg_gen_and_i32(addr
, tmp
, QREG_MAC_MASK
);
2530 /* Load the value now to ensure correct exception behavior.
2531 Perform writeback after reading the MAC inputs. */
2532 loadval
= gen_load(s
, OS_LONG
, addr
, 0);
2535 rx
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(insn
, 12);
2536 ry
= (ext
& 8) ? AREG(ext
, 0) : DREG(ext
, 0);
2538 loadval
= addr
= NULL_QREG
;
2539 rx
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2540 ry
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2543 gen_mac_clear_flags();
2546 /* Disabled because conditional branches clobber temporary vars. */
2547 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && !dual
) {
2548 /* Skip the multiply if we know we will ignore it. */
2549 l1
= gen_new_label();
2550 tmp
= tcg_temp_new();
2551 tcg_gen_andi_i32(tmp
, QREG_MACSR
, 1 << (acc
+ 8));
2552 gen_op_jmp_nz32(tmp
, l1
);
2556 if ((ext
& 0x0800) == 0) {
2558 rx
= gen_mac_extract_word(s
, rx
, (ext
& 0x80) != 0);
2559 ry
= gen_mac_extract_word(s
, ry
, (ext
& 0x40) != 0);
2561 if (s
->env
->macsr
& MACSR_FI
) {
2562 gen_helper_macmulf(s
->mactmp
, cpu_env
, rx
, ry
);
2564 if (s
->env
->macsr
& MACSR_SU
)
2565 gen_helper_macmuls(s
->mactmp
, cpu_env
, rx
, ry
);
2567 gen_helper_macmulu(s
->mactmp
, cpu_env
, rx
, ry
);
2568 switch ((ext
>> 9) & 3) {
2570 tcg_gen_shli_i64(s
->mactmp
, s
->mactmp
, 1);
2573 tcg_gen_shri_i64(s
->mactmp
, s
->mactmp
, 1);
2579 /* Save the overflow flag from the multiply. */
2580 saved_flags
= tcg_temp_new();
2581 tcg_gen_mov_i32(saved_flags
, QREG_MACSR
);
2583 saved_flags
= NULL_QREG
;
2587 /* Disabled because conditional branches clobber temporary vars. */
2588 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && dual
) {
2589 /* Skip the accumulate if the value is already saturated. */
2590 l1
= gen_new_label();
2591 tmp
= tcg_temp_new();
2592 gen_op_and32(tmp
, QREG_MACSR
, tcg_const_i32(MACSR_PAV0
<< acc
));
2593 gen_op_jmp_nz32(tmp
, l1
);
2598 tcg_gen_sub_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2600 tcg_gen_add_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2602 if (s
->env
->macsr
& MACSR_FI
)
2603 gen_helper_macsatf(cpu_env
, tcg_const_i32(acc
));
2604 else if (s
->env
->macsr
& MACSR_SU
)
2605 gen_helper_macsats(cpu_env
, tcg_const_i32(acc
));
2607 gen_helper_macsatu(cpu_env
, tcg_const_i32(acc
));
2610 /* Disabled because conditional branches clobber temporary vars. */
2616 /* Dual accumulate variant. */
2617 acc
= (ext
>> 2) & 3;
2618 /* Restore the overflow flag from the multiplier. */
2619 tcg_gen_mov_i32(QREG_MACSR
, saved_flags
);
2621 /* Disabled because conditional branches clobber temporary vars. */
2622 if ((s
->env
->macsr
& MACSR_OMC
) != 0) {
2623 /* Skip the accumulate if the value is already saturated. */
2624 l1
= gen_new_label();
2625 tmp
= tcg_temp_new();
2626 gen_op_and32(tmp
, QREG_MACSR
, tcg_const_i32(MACSR_PAV0
<< acc
));
2627 gen_op_jmp_nz32(tmp
, l1
);
2631 tcg_gen_sub_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2633 tcg_gen_add_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2634 if (s
->env
->macsr
& MACSR_FI
)
2635 gen_helper_macsatf(cpu_env
, tcg_const_i32(acc
));
2636 else if (s
->env
->macsr
& MACSR_SU
)
2637 gen_helper_macsats(cpu_env
, tcg_const_i32(acc
));
2639 gen_helper_macsatu(cpu_env
, tcg_const_i32(acc
));
2641 /* Disabled because conditional branches clobber temporary vars. */
2646 gen_helper_mac_set_flags(cpu_env
, tcg_const_i32(acc
));
2650 rw
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2651 tcg_gen_mov_i32(rw
, loadval
);
2652 /* FIXME: Should address writeback happen with the masked or
2654 switch ((insn
>> 3) & 7) {
2655 case 3: /* Post-increment. */
2656 tcg_gen_addi_i32(AREG(insn
, 0), addr
, 4);
2658 case 4: /* Pre-decrement. */
2659 tcg_gen_mov_i32(AREG(insn
, 0), addr
);
2664 DISAS_INSN(from_mac
)
2670 rx
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2671 accnum
= (insn
>> 9) & 3;
2672 acc
= MACREG(accnum
);
2673 if (s
->env
->macsr
& MACSR_FI
) {
2674 gen_helper_get_macf(rx
, cpu_env
, acc
);
2675 } else if ((s
->env
->macsr
& MACSR_OMC
) == 0) {
2676 tcg_gen_trunc_i64_i32(rx
, acc
);
2677 } else if (s
->env
->macsr
& MACSR_SU
) {
2678 gen_helper_get_macs(rx
, acc
);
2680 gen_helper_get_macu(rx
, acc
);
2683 tcg_gen_movi_i64(acc
, 0);
2684 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
, ~(MACSR_PAV0
<< accnum
));
2688 DISAS_INSN(move_mac
)
2690 /* FIXME: This can be done without a helper. */
2694 dest
= tcg_const_i32((insn
>> 9) & 3);
2695 gen_helper_mac_move(cpu_env
, dest
, tcg_const_i32(src
));
2696 gen_mac_clear_flags();
2697 gen_helper_mac_set_flags(cpu_env
, dest
);
2700 DISAS_INSN(from_macsr
)
2704 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2705 tcg_gen_mov_i32(reg
, QREG_MACSR
);
2708 DISAS_INSN(from_mask
)
2711 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2712 tcg_gen_mov_i32(reg
, QREG_MAC_MASK
);
2715 DISAS_INSN(from_mext
)
2719 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2720 acc
= tcg_const_i32((insn
& 0x400) ? 2 : 0);
2721 if (s
->env
->macsr
& MACSR_FI
)
2722 gen_helper_get_mac_extf(reg
, cpu_env
, acc
);
2724 gen_helper_get_mac_exti(reg
, cpu_env
, acc
);
2727 DISAS_INSN(macsr_to_ccr
)
2729 tcg_gen_movi_i32(QREG_CC_X
, 0);
2730 tcg_gen_andi_i32(QREG_CC_DEST
, QREG_MACSR
, 0xf);
2731 s
->cc_op
= CC_OP_FLAGS
;
2739 accnum
= (insn
>> 9) & 3;
2740 acc
= MACREG(accnum
);
2741 SRC_EA(val
, OS_LONG
, 0, NULL
);
2742 if (s
->env
->macsr
& MACSR_FI
) {
2743 tcg_gen_ext_i32_i64(acc
, val
);
2744 tcg_gen_shli_i64(acc
, acc
, 8);
2745 } else if (s
->env
->macsr
& MACSR_SU
) {
2746 tcg_gen_ext_i32_i64(acc
, val
);
2748 tcg_gen_extu_i32_i64(acc
, val
);
2750 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
, ~(MACSR_PAV0
<< accnum
));
2751 gen_mac_clear_flags();
2752 gen_helper_mac_set_flags(cpu_env
, tcg_const_i32(accnum
));
2755 DISAS_INSN(to_macsr
)
2758 SRC_EA(val
, OS_LONG
, 0, NULL
);
2759 gen_helper_set_macsr(cpu_env
, val
);
2766 SRC_EA(val
, OS_LONG
, 0, NULL
);
2767 tcg_gen_ori_i32(QREG_MAC_MASK
, val
, 0xffff0000);
2774 SRC_EA(val
, OS_LONG
, 0, NULL
);
2775 acc
= tcg_const_i32((insn
& 0x400) ? 2 : 0);
2776 if (s
->env
->macsr
& MACSR_FI
)
2777 gen_helper_set_mac_extf(cpu_env
, val
, acc
);
2778 else if (s
->env
->macsr
& MACSR_SU
)
2779 gen_helper_set_mac_exts(cpu_env
, val
, acc
);
2781 gen_helper_set_mac_extu(cpu_env
, val
, acc
);
2784 static disas_proc opcode_table
[65536];
2787 register_opcode (disas_proc proc
, uint16_t opcode
, uint16_t mask
)
2793 /* Sanity check. All set bits must be included in the mask. */
2794 if (opcode
& ~mask
) {
2796 "qemu internal error: bogus opcode definition %04x/%04x\n",
2800 /* This could probably be cleverer. For now just optimize the case where
2801 the top bits are known. */
2802 /* Find the first zero bit in the mask. */
2804 while ((i
& mask
) != 0)
2806 /* Iterate over all combinations of this and lower bits. */
2811 from
= opcode
& ~(i
- 1);
2813 for (i
= from
; i
< to
; i
++) {
2814 if ((i
& mask
) == opcode
)
2815 opcode_table
[i
] = proc
;
2819 /* Register m68k opcode handlers. Order is important.
2820 Later insn override earlier ones. */
2821 void register_m68k_insns (CPUM68KState
*env
)
2823 #define INSN(name, opcode, mask, feature) do { \
2824 if (m68k_feature(env, M68K_FEATURE_##feature)) \
2825 register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2827 INSN(undef
, 0000, 0000, CF_ISA_A
);
2828 INSN(arith_im
, 0080, fff8
, CF_ISA_A
);
2829 INSN(bitrev
, 00c0
, fff8
, CF_ISA_APLUSC
);
2830 INSN(bitop_reg
, 0100, f1c0
, CF_ISA_A
);
2831 INSN(bitop_reg
, 0140, f1c0
, CF_ISA_A
);
2832 INSN(bitop_reg
, 0180, f1c0
, CF_ISA_A
);
2833 INSN(bitop_reg
, 01c0
, f1c0
, CF_ISA_A
);
2834 INSN(arith_im
, 0280, fff8
, CF_ISA_A
);
2835 INSN(byterev
, 02c0
, fff8
, CF_ISA_APLUSC
);
2836 INSN(arith_im
, 0480, fff8
, CF_ISA_A
);
2837 INSN(ff1
, 04c0
, fff8
, CF_ISA_APLUSC
);
2838 INSN(arith_im
, 0680, fff8
, CF_ISA_A
);
2839 INSN(bitop_im
, 0800, ffc0
, CF_ISA_A
);
2840 INSN(bitop_im
, 0840, ffc0
, CF_ISA_A
);
2841 INSN(bitop_im
, 0880, ffc0
, CF_ISA_A
);
2842 INSN(bitop_im
, 08c0
, ffc0
, CF_ISA_A
);
2843 INSN(arith_im
, 0a80
, fff8
, CF_ISA_A
);
2844 INSN(arith_im
, 0c00
, ff38
, CF_ISA_A
);
2845 INSN(move
, 1000, f000
, CF_ISA_A
);
2846 INSN(move
, 2000, f000
, CF_ISA_A
);
2847 INSN(move
, 3000, f000
, CF_ISA_A
);
2848 INSN(strldsr
, 40e7
, ffff
, CF_ISA_APLUSC
);
2849 INSN(negx
, 4080, fff8
, CF_ISA_A
);
2850 INSN(move_from_sr
, 40c0
, fff8
, CF_ISA_A
);
2851 INSN(lea
, 41c0
, f1c0
, CF_ISA_A
);
2852 INSN(clr
, 4200, ff00
, CF_ISA_A
);
2853 INSN(undef
, 42c0
, ffc0
, CF_ISA_A
);
2854 INSN(move_from_ccr
, 42c0
, fff8
, CF_ISA_A
);
2855 INSN(neg
, 4480, fff8
, CF_ISA_A
);
2856 INSN(move_to_ccr
, 44c0
, ffc0
, CF_ISA_A
);
2857 INSN(not, 4680, fff8
, CF_ISA_A
);
2858 INSN(move_to_sr
, 46c0
, ffc0
, CF_ISA_A
);
2859 INSN(pea
, 4840, ffc0
, CF_ISA_A
);
2860 INSN(swap
, 4840, fff8
, CF_ISA_A
);
2861 INSN(movem
, 48c0
, fbc0
, CF_ISA_A
);
2862 INSN(ext
, 4880, fff8
, CF_ISA_A
);
2863 INSN(ext
, 48c0
, fff8
, CF_ISA_A
);
2864 INSN(ext
, 49c0
, fff8
, CF_ISA_A
);
2865 INSN(tst
, 4a00
, ff00
, CF_ISA_A
);
2866 INSN(tas
, 4ac0
, ffc0
, CF_ISA_B
);
2867 INSN(halt
, 4ac8
, ffff
, CF_ISA_A
);
2868 INSN(pulse
, 4acc
, ffff
, CF_ISA_A
);
2869 INSN(illegal
, 4afc
, ffff
, CF_ISA_A
);
2870 INSN(mull
, 4c00
, ffc0
, CF_ISA_A
);
2871 INSN(divl
, 4c40
, ffc0
, CF_ISA_A
);
2872 INSN(sats
, 4c80
, fff8
, CF_ISA_B
);
2873 INSN(trap
, 4e40
, fff0
, CF_ISA_A
);
2874 INSN(link
, 4e50
, fff8
, CF_ISA_A
);
2875 INSN(unlk
, 4e58
, fff8
, CF_ISA_A
);
2876 INSN(move_to_usp
, 4e60
, fff8
, USP
);
2877 INSN(move_from_usp
, 4e68
, fff8
, USP
);
2878 INSN(nop
, 4e71
, ffff
, CF_ISA_A
);
2879 INSN(stop
, 4e72
, ffff
, CF_ISA_A
);
2880 INSN(rte
, 4e73
, ffff
, CF_ISA_A
);
2881 INSN(rts
, 4e75
, ffff
, CF_ISA_A
);
2882 INSN(movec
, 4e7b
, ffff
, CF_ISA_A
);
2883 INSN(jump
, 4e80
, ffc0
, CF_ISA_A
);
2884 INSN(jump
, 4ec0
, ffc0
, CF_ISA_A
);
2885 INSN(addsubq
, 5180, f1c0
, CF_ISA_A
);
2886 INSN(scc
, 50c0
, f0f8
, CF_ISA_A
);
2887 INSN(addsubq
, 5080, f1c0
, CF_ISA_A
);
2888 INSN(tpf
, 51f8
, fff8
, CF_ISA_A
);
2890 /* Branch instructions. */
2891 INSN(branch
, 6000, f000
, CF_ISA_A
);
2892 /* Disable long branch instructions, then add back the ones we want. */
2893 INSN(undef
, 60ff
, f0ff
, CF_ISA_A
); /* All long branches. */
2894 INSN(branch
, 60ff
, f0ff
, CF_ISA_B
);
2895 INSN(undef
, 60ff
, ffff
, CF_ISA_B
); /* bra.l */
2896 INSN(branch
, 60ff
, ffff
, BRAL
);
2898 INSN(moveq
, 7000, f100
, CF_ISA_A
);
2899 INSN(mvzs
, 7100, f100
, CF_ISA_B
);
2900 INSN(or, 8000, f000
, CF_ISA_A
);
2901 INSN(divw
, 80c0
, f0c0
, CF_ISA_A
);
2902 INSN(addsub
, 9000, f000
, CF_ISA_A
);
2903 INSN(subx
, 9180, f1f8
, CF_ISA_A
);
2904 INSN(suba
, 91c0
, f1c0
, CF_ISA_A
);
2906 INSN(undef_mac
, a000
, f000
, CF_ISA_A
);
2907 INSN(mac
, a000
, f100
, CF_EMAC
);
2908 INSN(from_mac
, a180
, f9b0
, CF_EMAC
);
2909 INSN(move_mac
, a110
, f9fc
, CF_EMAC
);
2910 INSN(from_macsr
,a980
, f9f0
, CF_EMAC
);
2911 INSN(from_mask
, ad80
, fff0
, CF_EMAC
);
2912 INSN(from_mext
, ab80
, fbf0
, CF_EMAC
);
2913 INSN(macsr_to_ccr
, a9c0
, ffff
, CF_EMAC
);
2914 INSN(to_mac
, a100
, f9c0
, CF_EMAC
);
2915 INSN(to_macsr
, a900
, ffc0
, CF_EMAC
);
2916 INSN(to_mext
, ab00
, fbc0
, CF_EMAC
);
2917 INSN(to_mask
, ad00
, ffc0
, CF_EMAC
);
2919 INSN(mov3q
, a140
, f1c0
, CF_ISA_B
);
2920 INSN(cmp
, b000
, f1c0
, CF_ISA_B
); /* cmp.b */
2921 INSN(cmp
, b040
, f1c0
, CF_ISA_B
); /* cmp.w */
2922 INSN(cmpa
, b0c0
, f1c0
, CF_ISA_B
); /* cmpa.w */
2923 INSN(cmp
, b080
, f1c0
, CF_ISA_A
);
2924 INSN(cmpa
, b1c0
, f1c0
, CF_ISA_A
);
2925 INSN(eor
, b180
, f1c0
, CF_ISA_A
);
2926 INSN(and, c000
, f000
, CF_ISA_A
);
2927 INSN(mulw
, c0c0
, f0c0
, CF_ISA_A
);
2928 INSN(addsub
, d000
, f000
, CF_ISA_A
);
2929 INSN(addx
, d180
, f1f8
, CF_ISA_A
);
2930 INSN(adda
, d1c0
, f1c0
, CF_ISA_A
);
2931 INSN(shift_im
, e080
, f0f0
, CF_ISA_A
);
2932 INSN(shift_reg
, e0a0
, f0f0
, CF_ISA_A
);
2933 INSN(undef_fpu
, f000
, f000
, CF_ISA_A
);
2934 INSN(fpu
, f200
, ffc0
, CF_FPU
);
2935 INSN(fbcc
, f280
, ffc0
, CF_FPU
);
2936 INSN(frestore
, f340
, ffc0
, CF_FPU
);
2937 INSN(fsave
, f340
, ffc0
, CF_FPU
);
2938 INSN(intouch
, f340
, ffc0
, CF_ISA_A
);
2939 INSN(cpushl
, f428
, ff38
, CF_ISA_A
);
2940 INSN(wddata
, fb00
, ff00
, CF_ISA_A
);
2941 INSN(wdebug
, fbc0
, ffc0
, CF_ISA_A
);
2945 /* ??? Some of this implementation is not exception safe. We should always
2946 write back the result to memory before setting the condition codes. */
2947 static void disas_m68k_insn(CPUState
* env
, DisasContext
*s
)
2951 insn
= lduw_code(s
->pc
);
2954 opcode_table
[insn
](s
, insn
);
2957 /* generate intermediate code for basic block 'tb'. */
2959 gen_intermediate_code_internal(CPUState
*env
, TranslationBlock
*tb
,
2962 DisasContext dc1
, *dc
= &dc1
;
2963 uint16_t *gen_opc_end
;
2966 target_ulong pc_start
;
2971 /* generate intermediate code */
2976 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2979 dc
->is_jmp
= DISAS_NEXT
;
2981 dc
->cc_op
= CC_OP_DYNAMIC
;
2982 dc
->singlestep_enabled
= env
->singlestep_enabled
;
2983 dc
->fpcr
= env
->fpcr
;
2984 dc
->user
= (env
->sr
& SR_S
) == 0;
2989 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2991 max_insns
= CF_COUNT_MASK
;
2995 pc_offset
= dc
->pc
- pc_start
;
2996 gen_throws_exception
= NULL
;
2997 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
2998 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
2999 if (bp
->pc
== dc
->pc
) {
3000 gen_exception(dc
, dc
->pc
, EXCP_DEBUG
);
3001 dc
->is_jmp
= DISAS_JUMP
;
3009 j
= gen_opc_ptr
- gen_opc_buf
;
3013 gen_opc_instr_start
[lj
++] = 0;
3015 gen_opc_pc
[lj
] = dc
->pc
;
3016 gen_opc_instr_start
[lj
] = 1;
3017 gen_opc_icount
[lj
] = num_insns
;
3019 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3021 dc
->insn_pc
= dc
->pc
;
3022 disas_m68k_insn(env
, dc
);
3024 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
3025 !env
->singlestep_enabled
&&
3027 (pc_offset
) < (TARGET_PAGE_SIZE
- 32) &&
3028 num_insns
< max_insns
);
3030 if (tb
->cflags
& CF_LAST_IO
)
3032 if (unlikely(env
->singlestep_enabled
)) {
3033 /* Make sure the pc is updated, and raise a debug exception. */
3035 gen_flush_cc_op(dc
);
3036 tcg_gen_movi_i32(QREG_PC
, dc
->pc
);
3038 gen_helper_raise_exception(tcg_const_i32(EXCP_DEBUG
));
3040 switch(dc
->is_jmp
) {
3042 gen_flush_cc_op(dc
);
3043 gen_jmp_tb(dc
, 0, dc
->pc
);
3048 gen_flush_cc_op(dc
);
3049 /* indicate that the hash table must be used to find the next TB */
3053 /* nothing more to generate */
3057 gen_icount_end(tb
, num_insns
);
3058 *gen_opc_ptr
= INDEX_op_end
;
3061 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3062 qemu_log("----------------\n");
3063 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3064 log_target_disas(pc_start
, dc
->pc
- pc_start
, 0);
3069 j
= gen_opc_ptr
- gen_opc_buf
;
3072 gen_opc_instr_start
[lj
++] = 0;
3074 tb
->size
= dc
->pc
- pc_start
;
3075 tb
->icount
= num_insns
;
3079 //expand_target_qops();
3082 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
3084 gen_intermediate_code_internal(env
, tb
, 0);
3087 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
3089 gen_intermediate_code_internal(env
, tb
, 1);
3092 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
3098 for (i
= 0; i
< 8; i
++)
3100 u
.d
= env
->fregs
[i
];
3101 cpu_fprintf (f
, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3102 i
, env
->dregs
[i
], i
, env
->aregs
[i
],
3103 i
, u
.l
.upper
, u
.l
.lower
, *(double *)&u
.d
);
3105 cpu_fprintf (f
, "PC = %08x ", env
->pc
);
3107 cpu_fprintf (f
, "SR = %04x %c%c%c%c%c ", sr
, (sr
& 0x10) ? 'X' : '-',
3108 (sr
& CCF_N
) ? 'N' : '-', (sr
& CCF_Z
) ? 'Z' : '-',
3109 (sr
& CCF_V
) ? 'V' : '-', (sr
& CCF_C
) ? 'C' : '-');
3110 cpu_fprintf (f
, "FPRESULT = %12g\n", *(double *)&env
->fp_result
);
3113 void restore_state_to_opc(CPUState
*env
, TranslationBlock
*tb
, int pc_pos
)
3115 env
->pc
= gen_opc_pc
[pc_pos
];