4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #include "m68k-qreg.h"
34 //#define DEBUG_DISPATCH 1
36 static inline void qemu_assert(int cond
, const char *msg
)
39 fprintf (stderr
, "badness: %s\n", msg
);
44 /* internal defines */
45 typedef struct DisasContext
{
47 target_ulong insn_pc
; /* Start of the current instruction. */
53 struct TranslationBlock
*tb
;
54 int singlestep_enabled
;
58 #define DISAS_JUMP_NEXT 4
60 #if defined(CONFIG_USER_ONLY)
63 #define IS_USER(s) s->user
66 /* XXX: move that elsewhere */
67 /* ??? Fix exceptions. */
68 static void *gen_throws_exception
;
69 #define gen_last_qop NULL
74 #if defined(CONFIG_USER_ONLY)
75 #define gen_st(s, name, addr, val) gen_op_st##name##_raw(addr, val)
76 #define gen_ld(s, name, val, addr) gen_op_ld##name##_raw(val, addr)
78 #define gen_st(s, name, addr, val) do { \
80 gen_op_st##name##_user(addr, val); \
82 gen_op_st##name##_kernel(addr, val); \
84 #define gen_ld(s, name, val, addr) do { \
86 gen_op_ld##name##_user(val, addr); \
88 gen_op_ld##name##_kernel(val, addr); \
100 #define DREG(insn, pos) (((insn >> pos) & 7) + QREG_D0)
101 #define AREG(insn, pos) (((insn >> pos) & 7) + QREG_A0)
102 #define FREG(insn, pos) (((insn >> pos) & 7) + QREG_F0)
104 typedef void (*disas_proc
)(DisasContext
*, uint16_t);
106 #ifdef DEBUG_DISPATCH
107 #define DISAS_INSN(name) \
108 static void real_disas_##name (DisasContext *s, uint16_t insn); \
109 static void disas_##name (DisasContext *s, uint16_t insn) { \
110 if (logfile) fprintf(logfile, "Dispatch " #name "\n"); \
111 real_disas_##name(s, insn); } \
112 static void real_disas_##name (DisasContext *s, uint16_t insn)
114 #define DISAS_INSN(name) \
115 static void disas_##name (DisasContext *s, uint16_t insn)
118 /* Generate a load from the specified address. Narrow values are
119 sign extended to full register width. */
120 static inline int gen_load(DisasContext
* s
, int opsize
, int addr
, int sign
)
126 tmp
= gen_new_qreg(QMODE_I32
);
128 gen_ld(s
, 8s32
, tmp
, addr
);
130 gen_ld(s
, 8u32, tmp
, addr
);
133 tmp
= gen_new_qreg(QMODE_I32
);
135 gen_ld(s
, 16s32
, tmp
, addr
);
137 gen_ld(s
, 16u32, tmp
, addr
);
140 tmp
= gen_new_qreg(QMODE_I32
);
141 gen_ld(s
, 32, tmp
, addr
);
144 tmp
= gen_new_qreg(QMODE_F32
);
145 gen_ld(s
, f32
, tmp
, addr
);
148 tmp
= gen_new_qreg(QMODE_F64
);
149 gen_ld(s
, f64
, tmp
, addr
);
152 qemu_assert(0, "bad load size");
154 gen_throws_exception
= gen_last_qop
;
158 /* Generate a store. */
159 static inline void gen_store(DisasContext
*s
, int opsize
, int addr
, int val
)
164 gen_st(s
, 8, addr
, val
);
167 gen_st(s
, 16, addr
, val
);
170 gen_st(s
, 32, addr
, val
);
173 gen_st(s
, f32
, addr
, val
);
176 gen_st(s
, f64
, addr
, val
);
179 qemu_assert(0, "bad store size");
181 gen_throws_exception
= gen_last_qop
;
184 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
185 otherwise generate a store. */
186 static int gen_ldst(DisasContext
*s
, int opsize
, int addr
, int val
)
189 gen_store(s
, opsize
, addr
, val
);
192 return gen_load(s
, opsize
, addr
, val
!= 0);
196 /* Read a 32-bit immediate constant. */
197 static inline uint32_t read_im32(DisasContext
*s
)
200 im
= ((uint32_t)lduw_code(s
->pc
)) << 16;
202 im
|= lduw_code(s
->pc
);
207 /* Calculate and address index. */
208 static int gen_addr_index(uint16_t ext
, int tmp
)
213 add
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(ext
, 12);
214 if ((ext
& 0x800) == 0) {
215 gen_op_ext16s32(tmp
, add
);
218 scale
= (ext
>> 9) & 3;
220 gen_op_shl32(tmp
, add
, gen_im32(scale
));
226 /* Handle a base + index + displacement effective addresss. A base of
227 -1 means pc-relative. */
228 static int gen_lea_indexed(DisasContext
*s
, int opsize
, int base
)
237 ext
= lduw_code(s
->pc
);
240 if ((ext
& 0x800) == 0 && !m68k_feature(s
->env
, M68K_FEATURE_WORD_INDEX
))
244 /* full extension word format */
245 if (!m68k_feature(s
->env
, M68K_FEATURE_EXT_FULL
))
248 if ((ext
& 0x30) > 0x10) {
249 /* base displacement */
250 if ((ext
& 0x30) == 0x20) {
251 bd
= (int16_t)lduw_code(s
->pc
);
259 tmp
= gen_new_qreg(QMODE_I32
);
260 if ((ext
& 0x44) == 0) {
262 add
= gen_addr_index(ext
, tmp
);
266 if ((ext
& 0x80) == 0) {
267 /* base not suppressed */
269 base
= gen_im32(offset
+ bd
);
273 gen_op_add32(tmp
, add
, base
);
281 gen_op_add32(tmp
, add
, gen_im32(bd
));
287 if ((ext
& 3) != 0) {
288 /* memory indirect */
289 base
= gen_load(s
, OS_LONG
, add
, 0);
290 if ((ext
& 0x44) == 4) {
291 add
= gen_addr_index(ext
, tmp
);
292 gen_op_add32(tmp
, add
, base
);
298 /* outer displacement */
299 if ((ext
& 3) == 2) {
300 od
= (int16_t)lduw_code(s
->pc
);
309 gen_op_add32(tmp
, add
, gen_im32(od
));
314 /* brief extension word format */
315 tmp
= gen_new_qreg(QMODE_I32
);
316 add
= gen_addr_index(ext
, tmp
);
318 gen_op_add32(tmp
, add
, base
);
320 gen_op_add32(tmp
, tmp
, gen_im32((int8_t)ext
));
322 gen_op_add32(tmp
, add
, gen_im32(offset
+ (int8_t)ext
));
329 /* Update the CPU env CC_OP state. */
330 static inline void gen_flush_cc_op(DisasContext
*s
)
332 if (s
->cc_op
!= CC_OP_DYNAMIC
)
333 gen_op_mov32(QREG_CC_OP
, gen_im32(s
->cc_op
));
336 /* Evaluate all the CC flags. */
337 static inline void gen_flush_flags(DisasContext
*s
)
339 if (s
->cc_op
== CC_OP_FLAGS
)
342 gen_op_flush_flags();
343 s
->cc_op
= CC_OP_FLAGS
;
346 static inline int opsize_bytes(int opsize
)
349 case OS_BYTE
: return 1;
350 case OS_WORD
: return 2;
351 case OS_LONG
: return 4;
352 case OS_SINGLE
: return 4;
353 case OS_DOUBLE
: return 8;
355 qemu_assert(0, "bad operand size");
359 /* Assign value to a register. If the width is less than the register width
360 only the low part of the register is set. */
361 static void gen_partset_reg(int opsize
, int reg
, int val
)
366 gen_op_and32(reg
, reg
, gen_im32(0xffffff00));
367 tmp
= gen_new_qreg(QMODE_I32
);
368 gen_op_and32(tmp
, val
, gen_im32(0xff));
369 gen_op_or32(reg
, reg
, tmp
);
372 gen_op_and32(reg
, reg
, gen_im32(0xffff0000));
373 tmp
= gen_new_qreg(QMODE_I32
);
374 gen_op_and32(tmp
, val
, gen_im32(0xffff));
375 gen_op_or32(reg
, reg
, tmp
);
378 gen_op_mov32(reg
, val
);
381 gen_op_pack_32_f32(reg
, val
);
384 qemu_assert(0, "Bad operand size");
389 /* Sign or zero extend a value. */
390 static inline int gen_extend(int val
, int opsize
, int sign
)
396 tmp
= gen_new_qreg(QMODE_I32
);
398 gen_op_ext8s32(tmp
, val
);
400 gen_op_ext8u32(tmp
, val
);
403 tmp
= gen_new_qreg(QMODE_I32
);
405 gen_op_ext16s32(tmp
, val
);
407 gen_op_ext16u32(tmp
, val
);
413 tmp
= gen_new_qreg(QMODE_F32
);
414 gen_op_pack_f32_32(tmp
, val
);
417 qemu_assert(0, "Bad operand size");
422 /* Generate code for an "effective address". Does not adjust the base
423 register for autoincrememnt addressing modes. */
424 static int gen_lea(DisasContext
*s
, uint16_t insn
, int opsize
)
432 switch ((insn
>> 3) & 7) {
433 case 0: /* Data register direct. */
434 case 1: /* Address register direct. */
436 case 2: /* Indirect register */
437 case 3: /* Indirect postincrement. */
440 case 4: /* Indirect predecrememnt. */
442 tmp
= gen_new_qreg(QMODE_I32
);
443 gen_op_sub32(tmp
, reg
, gen_im32(opsize_bytes(opsize
)));
445 case 5: /* Indirect displacement. */
447 tmp
= gen_new_qreg(QMODE_I32
);
448 ext
= lduw_code(s
->pc
);
450 gen_op_add32(tmp
, reg
, gen_im32((int16_t)ext
));
452 case 6: /* Indirect index + displacement. */
454 return gen_lea_indexed(s
, opsize
, reg
);
457 case 0: /* Absolute short. */
458 offset
= ldsw_code(s
->pc
);
460 return gen_im32(offset
);
461 case 1: /* Absolute long. */
462 offset
= read_im32(s
);
463 return gen_im32(offset
);
464 case 2: /* pc displacement */
465 tmp
= gen_new_qreg(QMODE_I32
);
467 offset
+= ldsw_code(s
->pc
);
469 return gen_im32(offset
);
470 case 3: /* pc index+displacement. */
471 return gen_lea_indexed(s
, opsize
, -1);
472 case 4: /* Immediate. */
477 /* Should never happen. */
481 /* Helper function for gen_ea. Reuse the computed address between the
482 for read/write operands. */
483 static inline int gen_ea_once(DisasContext
*s
, uint16_t insn
, int opsize
,
488 if (addrp
&& val
> 0) {
491 tmp
= gen_lea(s
, insn
, opsize
);
497 return gen_ldst(s
, opsize
, tmp
, val
);
500 /* Generate code to load/store a value ito/from an EA. If VAL > 0 this is
501 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
502 ADDRP is non-null for readwrite operands. */
503 static int gen_ea(DisasContext
*s
, uint16_t insn
, int opsize
, int val
,
511 switch ((insn
>> 3) & 7) {
512 case 0: /* Data register direct. */
515 gen_partset_reg(opsize
, reg
, val
);
518 return gen_extend(reg
, opsize
, val
);
520 case 1: /* Address register direct. */
523 gen_op_mov32(reg
, val
);
526 return gen_extend(reg
, opsize
, val
);
528 case 2: /* Indirect register */
530 return gen_ldst(s
, opsize
, reg
, val
);
531 case 3: /* Indirect postincrement. */
533 result
= gen_ldst(s
, opsize
, reg
, val
);
534 /* ??? This is not exception safe. The instruction may still
535 fault after this point. */
536 if (val
> 0 || !addrp
)
537 gen_op_add32(reg
, reg
, gen_im32(opsize_bytes(opsize
)));
539 case 4: /* Indirect predecrememnt. */
542 if (addrp
&& val
> 0) {
545 tmp
= gen_lea(s
, insn
, opsize
);
551 result
= gen_ldst(s
, opsize
, tmp
, val
);
552 /* ??? This is not exception safe. The instruction may still
553 fault after this point. */
554 if (val
> 0 || !addrp
) {
556 gen_op_mov32(reg
, tmp
);
560 case 5: /* Indirect displacement. */
561 case 6: /* Indirect index + displacement. */
562 return gen_ea_once(s
, insn
, opsize
, val
, addrp
);
565 case 0: /* Absolute short. */
566 case 1: /* Absolute long. */
567 case 2: /* pc displacement */
568 case 3: /* pc index+displacement. */
569 return gen_ea_once(s
, insn
, opsize
, val
, addrp
);
570 case 4: /* Immediate. */
571 /* Sign extend values for consistency. */
575 offset
= ldsb_code(s
->pc
+ 1);
577 offset
= ldub_code(s
->pc
+ 1);
582 offset
= ldsw_code(s
->pc
);
584 offset
= lduw_code(s
->pc
);
588 offset
= read_im32(s
);
591 qemu_assert(0, "Bad immediate operand");
593 return gen_im32(offset
);
598 /* Should never happen. */
602 static void gen_logic_cc(DisasContext
*s
, int val
)
604 gen_op_logic_cc(val
);
605 s
->cc_op
= CC_OP_LOGIC
;
608 static void gen_jmpcc(DisasContext
*s
, int cond
, int l1
)
619 case 2: /* HI (!C && !Z) */
620 tmp
= gen_new_qreg(QMODE_I32
);
621 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_C
| CCF_Z
));
622 gen_op_jmp_z32(tmp
, l1
);
624 case 3: /* LS (C || Z) */
625 tmp
= gen_new_qreg(QMODE_I32
);
626 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_C
| CCF_Z
));
627 gen_op_jmp_nz32(tmp
, l1
);
629 case 4: /* CC (!C) */
630 tmp
= gen_new_qreg(QMODE_I32
);
631 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_C
));
632 gen_op_jmp_z32(tmp
, l1
);
635 tmp
= gen_new_qreg(QMODE_I32
);
636 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_C
));
637 gen_op_jmp_nz32(tmp
, l1
);
639 case 6: /* NE (!Z) */
640 tmp
= gen_new_qreg(QMODE_I32
);
641 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_Z
));
642 gen_op_jmp_z32(tmp
, l1
);
645 tmp
= gen_new_qreg(QMODE_I32
);
646 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_Z
));
647 gen_op_jmp_nz32(tmp
, l1
);
649 case 8: /* VC (!V) */
650 tmp
= gen_new_qreg(QMODE_I32
);
651 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_V
));
652 gen_op_jmp_z32(tmp
, l1
);
655 tmp
= gen_new_qreg(QMODE_I32
);
656 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_V
));
657 gen_op_jmp_nz32(tmp
, l1
);
659 case 10: /* PL (!N) */
660 tmp
= gen_new_qreg(QMODE_I32
);
661 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_N
));
662 gen_op_jmp_z32(tmp
, l1
);
664 case 11: /* MI (N) */
665 tmp
= gen_new_qreg(QMODE_I32
);
666 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_N
));
667 gen_op_jmp_nz32(tmp
, l1
);
669 case 12: /* GE (!(N ^ V)) */
670 tmp
= gen_new_qreg(QMODE_I32
);
671 gen_op_shr32(tmp
, QREG_CC_DEST
, gen_im32(2));
672 gen_op_xor32(tmp
, tmp
, QREG_CC_DEST
);
673 gen_op_and32(tmp
, tmp
, gen_im32(CCF_V
));
674 gen_op_jmp_z32(tmp
, l1
);
676 case 13: /* LT (N ^ V) */
677 tmp
= gen_new_qreg(QMODE_I32
);
678 gen_op_shr32(tmp
, QREG_CC_DEST
, gen_im32(2));
679 gen_op_xor32(tmp
, tmp
, QREG_CC_DEST
);
680 gen_op_and32(tmp
, tmp
, gen_im32(CCF_V
));
681 gen_op_jmp_nz32(tmp
, l1
);
683 case 14: /* GT (!(Z || (N ^ V))) */
686 l2
= gen_new_label();
687 tmp
= gen_new_qreg(QMODE_I32
);
688 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_Z
));
689 gen_op_jmp_nz32(tmp
, l2
);
690 tmp
= gen_new_qreg(QMODE_I32
);
691 gen_op_shr32(tmp
, QREG_CC_DEST
, gen_im32(2));
692 gen_op_xor32(tmp
, tmp
, QREG_CC_DEST
);
693 gen_op_and32(tmp
, tmp
, gen_im32(CCF_V
));
694 gen_op_jmp_nz32(tmp
, l2
);
699 case 15: /* LE (Z || (N ^ V)) */
700 tmp
= gen_new_qreg(QMODE_I32
);
701 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_Z
));
702 gen_op_jmp_nz32(tmp
, l1
);
703 tmp
= gen_new_qreg(QMODE_I32
);
704 gen_op_shr32(tmp
, QREG_CC_DEST
, gen_im32(2));
705 gen_op_xor32(tmp
, tmp
, QREG_CC_DEST
);
706 gen_op_and32(tmp
, tmp
, gen_im32(CCF_V
));
707 gen_op_jmp_nz32(tmp
, l1
);
710 /* Should ever happen. */
721 l1
= gen_new_label();
722 cond
= (insn
>> 8) & 0xf;
724 gen_op_and32(reg
, reg
, gen_im32(0xffffff00));
725 gen_jmpcc(s
, cond
^ 1, l1
);
726 gen_op_or32(reg
, reg
, gen_im32(0xff));
730 /* Force a TB lookup after an instruction that changes the CPU state. */
731 static void gen_lookup_tb(DisasContext
*s
)
734 gen_op_mov32(QREG_PC
, gen_im32(s
->pc
));
735 s
->is_jmp
= DISAS_UPDATE
;
738 /* Generate a jump to to the address in qreg DEST. */
739 static void gen_jmp(DisasContext
*s
, int dest
)
742 gen_op_mov32(QREG_PC
, dest
);
743 s
->is_jmp
= DISAS_JUMP
;
746 static void gen_exception(DisasContext
*s
, uint32_t where
, int nr
)
749 gen_jmp(s
, gen_im32(where
));
750 gen_op_raise_exception(nr
);
753 static inline void gen_addr_fault(DisasContext
*s
)
755 gen_exception(s
, s
->insn_pc
, EXCP_ADDRESS
);
758 #define SRC_EA(result, opsize, val, addrp) do { \
759 result = gen_ea(s, insn, opsize, val, addrp); \
760 if (result == -1) { \
766 #define DEST_EA(insn, opsize, val, addrp) do { \
767 int ea_result = gen_ea(s, insn, opsize, val, addrp); \
768 if (ea_result == -1) { \
774 /* Generate a jump to an immediate address. */
775 static void gen_jmp_tb(DisasContext
*s
, int n
, uint32_t dest
)
777 TranslationBlock
*tb
;
780 if (__builtin_expect (s
->singlestep_enabled
, 0)) {
781 gen_exception(s
, dest
, EXCP_DEBUG
);
782 } else if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
783 (s
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
785 gen_op_mov32(QREG_PC
, gen_im32(dest
));
786 tcg_gen_exit_tb((long)tb
+ n
);
788 gen_jmp(s
, gen_im32(dest
));
791 s
->is_jmp
= DISAS_TB_JUMP
;
794 DISAS_INSN(undef_mac
)
796 gen_exception(s
, s
->pc
- 2, EXCP_LINEA
);
799 DISAS_INSN(undef_fpu
)
801 gen_exception(s
, s
->pc
- 2, EXCP_LINEF
);
806 gen_exception(s
, s
->pc
- 2, EXCP_UNSUPPORTED
);
807 cpu_abort(cpu_single_env
, "Illegal instruction: %04x @ %08x",
818 sign
= (insn
& 0x100) != 0;
820 tmp
= gen_new_qreg(QMODE_I32
);
822 gen_op_ext16s32(tmp
, reg
);
824 gen_op_ext16u32(tmp
, reg
);
825 SRC_EA(src
, OS_WORD
, sign
? -1 : 0, NULL
);
826 gen_op_mul32(tmp
, tmp
, src
);
827 gen_op_mov32(reg
, tmp
);
828 /* Unlike m68k, coldfire always clears the overflow bit. */
829 gen_logic_cc(s
, tmp
);
839 sign
= (insn
& 0x100) != 0;
842 gen_op_ext16s32(QREG_DIV1
, reg
);
844 gen_op_ext16u32(QREG_DIV1
, reg
);
846 SRC_EA(src
, OS_WORD
, sign
? -1 : 0, NULL
);
847 gen_op_mov32(QREG_DIV2
, src
);
854 tmp
= gen_new_qreg(QMODE_I32
);
855 src
= gen_new_qreg(QMODE_I32
);
856 gen_op_ext16u32(tmp
, QREG_DIV1
);
857 gen_op_shl32(src
, QREG_DIV2
, gen_im32(16));
858 gen_op_or32(reg
, tmp
, src
);
860 s
->cc_op
= CC_OP_FLAGS
;
870 ext
= lduw_code(s
->pc
);
873 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
878 gen_op_mov32(QREG_DIV1
, num
);
879 SRC_EA(den
, OS_LONG
, 0, NULL
);
880 gen_op_mov32(QREG_DIV2
, den
);
888 gen_op_mov32 (reg
, QREG_DIV1
);
891 gen_op_mov32 (reg
, QREG_DIV2
);
894 s
->cc_op
= CC_OP_FLAGS
;
906 add
= (insn
& 0x4000) != 0;
908 dest
= gen_new_qreg(QMODE_I32
);
910 SRC_EA(tmp
, OS_LONG
, 0, &addr
);
914 SRC_EA(src
, OS_LONG
, 0, NULL
);
917 gen_op_add32(dest
, tmp
, src
);
918 gen_op_update_xflag_lt(dest
, src
);
919 s
->cc_op
= CC_OP_ADD
;
921 gen_op_update_xflag_lt(tmp
, src
);
922 gen_op_sub32(dest
, tmp
, src
);
923 s
->cc_op
= CC_OP_SUB
;
925 gen_op_update_cc_add(dest
, src
);
927 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
929 gen_op_mov32(reg
, dest
);
934 /* Reverse the order of the bits in REG. */
942 val
= gen_new_qreg(QMODE_I32
);
943 tmp1
= gen_new_qreg(QMODE_I32
);
944 tmp2
= gen_new_qreg(QMODE_I32
);
946 gen_op_mov32(val
, reg
);
947 /* Reverse bits within each nibble. */
948 gen_op_shl32(tmp1
, val
, gen_im32(3));
949 gen_op_and32(tmp1
, tmp1
, gen_im32(0x88888888));
950 gen_op_shl32(tmp2
, val
, gen_im32(1));
951 gen_op_and32(tmp2
, tmp2
, gen_im32(0x44444444));
952 gen_op_or32(tmp1
, tmp1
, tmp2
);
953 gen_op_shr32(tmp2
, val
, gen_im32(1));
954 gen_op_and32(tmp2
, tmp2
, gen_im32(0x22222222));
955 gen_op_or32(tmp1
, tmp1
, tmp2
);
956 gen_op_shr32(tmp2
, val
, gen_im32(3));
957 gen_op_and32(tmp2
, tmp2
, gen_im32(0x11111111));
958 gen_op_or32(tmp1
, tmp1
, tmp2
);
959 /* Reverse nibbles withing bytes. */
960 gen_op_shl32(val
, tmp1
, gen_im32(4));
961 gen_op_and32(val
, val
, gen_im32(0xf0f0f0f0));
962 gen_op_shr32(tmp2
, tmp1
, gen_im32(4));
963 gen_op_and32(tmp2
, tmp2
, gen_im32(0x0f0f0f0f));
964 gen_op_or32(val
, val
, tmp2
);
966 gen_op_bswap32(reg
, val
);
967 gen_op_mov32(reg
, val
);
970 DISAS_INSN(bitop_reg
)
980 if ((insn
& 0x38) != 0)
984 op
= (insn
>> 6) & 3;
985 SRC_EA(src1
, opsize
, 0, op
? &addr
: NULL
);
986 src2
= DREG(insn
, 9);
987 dest
= gen_new_qreg(QMODE_I32
);
990 tmp
= gen_new_qreg(QMODE_I32
);
991 if (opsize
== OS_BYTE
)
992 gen_op_and32(tmp
, src2
, gen_im32(7));
994 gen_op_and32(tmp
, src2
, gen_im32(31));
996 tmp
= gen_new_qreg(QMODE_I32
);
997 gen_op_shl32(tmp
, gen_im32(1), src2
);
999 gen_op_btest(src1
, tmp
);
1002 gen_op_xor32(dest
, src1
, tmp
);
1005 gen_op_not32(tmp
, tmp
);
1006 gen_op_and32(dest
, src1
, tmp
);
1009 gen_op_or32(dest
, src1
, tmp
);
1015 DEST_EA(insn
, opsize
, dest
, &addr
);
1024 reg
= DREG(insn
, 0);
1025 tmp
= gen_new_qreg(QMODE_I32
);
1027 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_V
));
1028 l1
= gen_new_label();
1029 gen_op_jmp_z32(tmp
, l1
);
1030 tmp
= gen_new_qreg(QMODE_I32
);
1031 gen_op_shr32(tmp
, reg
, gen_im32(31));
1032 gen_op_xor32(tmp
, tmp
, gen_im32(0x80000000));
1033 gen_op_mov32(reg
, tmp
);
1035 gen_logic_cc(s
, tmp
);
1038 static void gen_push(DisasContext
*s
, int val
)
1042 tmp
= gen_new_qreg(QMODE_I32
);
1043 gen_op_sub32(tmp
, QREG_SP
, gen_im32(4));
1044 gen_store(s
, OS_LONG
, tmp
, val
);
1045 gen_op_mov32(QREG_SP
, tmp
);
1057 mask
= lduw_code(s
->pc
);
1059 tmp
= gen_lea(s
, insn
, OS_LONG
);
1064 addr
= gen_new_qreg(QMODE_I32
);
1065 gen_op_mov32(addr
, tmp
);
1066 is_load
= ((insn
& 0x0400) != 0);
1067 for (i
= 0; i
< 16; i
++, mask
>>= 1) {
1074 tmp
= gen_load(s
, OS_LONG
, addr
, 0);
1075 gen_op_mov32(reg
, tmp
);
1077 gen_store(s
, OS_LONG
, addr
, reg
);
1080 gen_op_add32(addr
, addr
, gen_im32(4));
1085 DISAS_INSN(bitop_im
)
1096 if ((insn
& 0x38) != 0)
1100 op
= (insn
>> 6) & 3;
1102 bitnum
= lduw_code(s
->pc
);
1104 if (bitnum
& 0xff00) {
1105 disas_undef(s
, insn
);
1109 SRC_EA(src1
, opsize
, 0, op
? &addr
: NULL
);
1112 tmp
= gen_new_qreg(QMODE_I32
);
1113 if (opsize
== OS_BYTE
)
1119 gen_op_btest(src1
, gen_im32(mask
));
1121 dest
= gen_new_qreg(QMODE_I32
);
1127 gen_op_xor32(dest
, src1
, gen_im32(mask
));
1130 gen_op_and32(dest
, src1
, gen_im32(~mask
));
1133 gen_op_or32(dest
, src1
, gen_im32(mask
));
1139 DEST_EA(insn
, opsize
, dest
, &addr
);
1142 DISAS_INSN(arith_im
)
1150 op
= (insn
>> 9) & 7;
1151 SRC_EA(src1
, OS_LONG
, 0, (op
== 6) ? NULL
: &addr
);
1152 src2
= gen_im32(read_im32(s
));
1153 dest
= gen_new_qreg(QMODE_I32
);
1156 gen_op_or32(dest
, src1
, src2
);
1157 gen_logic_cc(s
, dest
);
1160 gen_op_and32(dest
, src1
, src2
);
1161 gen_logic_cc(s
, dest
);
1164 gen_op_mov32(dest
, src1
);
1165 gen_op_update_xflag_lt(dest
, src2
);
1166 gen_op_sub32(dest
, dest
, src2
);
1167 gen_op_update_cc_add(dest
, src2
);
1168 s
->cc_op
= CC_OP_SUB
;
1171 gen_op_mov32(dest
, src1
);
1172 gen_op_add32(dest
, dest
, src2
);
1173 gen_op_update_cc_add(dest
, src2
);
1174 gen_op_update_xflag_lt(dest
, src2
);
1175 s
->cc_op
= CC_OP_ADD
;
1178 gen_op_xor32(dest
, src1
, src2
);
1179 gen_logic_cc(s
, dest
);
1182 gen_op_mov32(dest
, src1
);
1183 gen_op_sub32(dest
, dest
, src2
);
1184 gen_op_update_cc_add(dest
, src2
);
1185 s
->cc_op
= CC_OP_SUB
;
1191 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1199 reg
= DREG(insn
, 0);
1200 gen_op_bswap32(reg
, reg
);
1210 switch (insn
>> 12) {
1211 case 1: /* move.b */
1214 case 2: /* move.l */
1217 case 3: /* move.w */
1223 SRC_EA(src
, opsize
, -1, NULL
);
1224 op
= (insn
>> 6) & 7;
1227 /* The value will already have been sign extended. */
1228 dest
= AREG(insn
, 9);
1229 gen_op_mov32(dest
, src
);
1233 dest_ea
= ((insn
>> 9) & 7) | (op
<< 3);
1234 DEST_EA(dest_ea
, opsize
, src
, NULL
);
1235 /* This will be correct because loads sign extend. */
1236 gen_logic_cc(s
, src
);
1247 reg
= DREG(insn
, 0);
1248 dest
= gen_new_qreg(QMODE_I32
);
1249 gen_op_mov32 (dest
, gen_im32(0));
1250 gen_op_subx_cc(dest
, reg
);
1252 tmp
= gen_new_qreg(QMODE_I32
);
1253 gen_op_mov32 (tmp
, QREG_CC_DEST
);
1254 gen_op_update_cc_add(dest
, reg
);
1255 gen_op_mov32(reg
, dest
);
1256 s
->cc_op
= CC_OP_DYNAMIC
;
1258 gen_op_or32(tmp
, tmp
, gen_im32(~CCF_Z
));
1259 gen_op_and32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1260 s
->cc_op
= CC_OP_FLAGS
;
1268 reg
= AREG(insn
, 9);
1269 tmp
= gen_lea(s
, insn
, OS_LONG
);
1274 gen_op_mov32(reg
, tmp
);
1281 switch ((insn
>> 6) & 3) {
1294 DEST_EA(insn
, opsize
, gen_im32(0), NULL
);
1295 gen_logic_cc(s
, gen_im32(0));
1298 static int gen_get_ccr(DisasContext
*s
)
1303 dest
= gen_new_qreg(QMODE_I32
);
1304 gen_op_get_xflag(dest
);
1305 gen_op_shl32(dest
, dest
, gen_im32(4));
1306 gen_op_or32(dest
, dest
, QREG_CC_DEST
);
1310 DISAS_INSN(move_from_ccr
)
1315 ccr
= gen_get_ccr(s
);
1316 reg
= DREG(insn
, 0);
1317 gen_partset_reg(OS_WORD
, reg
, ccr
);
1325 reg
= DREG(insn
, 0);
1326 src1
= gen_new_qreg(QMODE_I32
);
1327 gen_op_mov32(src1
, reg
);
1328 gen_op_neg32(reg
, src1
);
1329 s
->cc_op
= CC_OP_SUB
;
1330 gen_op_update_cc_add(reg
, src1
);
1331 gen_op_update_xflag_lt(gen_im32(0), src1
);
1332 s
->cc_op
= CC_OP_SUB
;
1335 static void gen_set_sr_im(DisasContext
*s
, uint16_t val
, int ccr_only
)
1337 gen_op_logic_cc(gen_im32(val
& 0xf));
1338 gen_op_update_xflag_tst(gen_im32((val
& 0x10) >> 4));
1340 gen_op_set_sr(gen_im32(val
& 0xff00));
1344 static void gen_set_sr(DisasContext
*s
, uint16_t insn
, int ccr_only
)
1349 s
->cc_op
= CC_OP_FLAGS
;
1350 if ((insn
& 0x38) == 0)
1352 src1
= gen_new_qreg(QMODE_I32
);
1353 reg
= DREG(insn
, 0);
1354 gen_op_and32(src1
, reg
, gen_im32(0xf));
1355 gen_op_logic_cc(src1
);
1356 gen_op_shr32(src1
, reg
, gen_im32(4));
1357 gen_op_and32(src1
, src1
, gen_im32(1));
1358 gen_op_update_xflag_tst(src1
);
1363 else if ((insn
& 0x3f) == 0x3c)
1366 val
= lduw_code(s
->pc
);
1368 gen_set_sr_im(s
, val
, ccr_only
);
1371 disas_undef(s
, insn
);
1374 DISAS_INSN(move_to_ccr
)
1376 gen_set_sr(s
, insn
, 1);
1383 reg
= DREG(insn
, 0);
1384 gen_op_not32(reg
, reg
);
1385 gen_logic_cc(s
, reg
);
1395 dest
= gen_new_qreg(QMODE_I32
);
1396 src1
= gen_new_qreg(QMODE_I32
);
1397 src2
= gen_new_qreg(QMODE_I32
);
1398 reg
= DREG(insn
, 0);
1399 gen_op_shl32(src1
, reg
, gen_im32(16));
1400 gen_op_shr32(src2
, reg
, gen_im32(16));
1401 gen_op_or32(dest
, src1
, src2
);
1402 gen_op_mov32(reg
, dest
);
1403 gen_logic_cc(s
, dest
);
1410 tmp
= gen_lea(s
, insn
, OS_LONG
);
1424 reg
= DREG(insn
, 0);
1425 op
= (insn
>> 6) & 7;
1426 tmp
= gen_new_qreg(QMODE_I32
);
1428 gen_op_ext16s32(tmp
, reg
);
1430 gen_op_ext8s32(tmp
, reg
);
1432 gen_partset_reg(OS_WORD
, reg
, tmp
);
1434 gen_op_mov32(reg
, tmp
);
1435 gen_logic_cc(s
, tmp
);
1443 switch ((insn
>> 6) & 3) {
1456 SRC_EA(tmp
, opsize
, -1, NULL
);
1457 gen_logic_cc(s
, tmp
);
1462 /* Implemented as a NOP. */
1467 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1470 /* ??? This should be atomic. */
1477 dest
= gen_new_qreg(QMODE_I32
);
1478 SRC_EA(src1
, OS_BYTE
, -1, &addr
);
1479 gen_logic_cc(s
, src1
);
1480 gen_op_or32(dest
, src1
, gen_im32(0x80));
1481 DEST_EA(insn
, OS_BYTE
, dest
, &addr
);
1491 /* The upper 32 bits of the product are discarded, so
1492 muls.l and mulu.l are functionally equivalent. */
1493 ext
= lduw_code(s
->pc
);
1496 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
1499 reg
= DREG(ext
, 12);
1500 SRC_EA(src1
, OS_LONG
, 0, NULL
);
1501 dest
= gen_new_qreg(QMODE_I32
);
1502 gen_op_mul32(dest
, src1
, reg
);
1503 gen_op_mov32(reg
, dest
);
1504 /* Unlike m68k, coldfire always clears the overflow bit. */
1505 gen_logic_cc(s
, dest
);
1514 offset
= ldsw_code(s
->pc
);
1516 reg
= AREG(insn
, 0);
1517 tmp
= gen_new_qreg(QMODE_I32
);
1518 gen_op_sub32(tmp
, QREG_SP
, gen_im32(4));
1519 gen_store(s
, OS_LONG
, tmp
, reg
);
1521 gen_op_mov32(reg
, tmp
);
1522 gen_op_add32(QREG_SP
, tmp
, gen_im32(offset
));
1531 src
= gen_new_qreg(QMODE_I32
);
1532 reg
= AREG(insn
, 0);
1533 gen_op_mov32(src
, reg
);
1534 tmp
= gen_load(s
, OS_LONG
, src
, 0);
1535 gen_op_mov32(reg
, tmp
);
1536 gen_op_add32(QREG_SP
, src
, gen_im32(4));
1547 tmp
= gen_load(s
, OS_LONG
, QREG_SP
, 0);
1548 gen_op_add32(QREG_SP
, QREG_SP
, gen_im32(4));
1556 /* Load the target address first to ensure correct exception
1558 tmp
= gen_lea(s
, insn
, OS_LONG
);
1563 if ((insn
& 0x40) == 0) {
1565 gen_push(s
, gen_im32(s
->pc
));
1578 SRC_EA(src1
, OS_LONG
, 0, &addr
);
1579 val
= (insn
>> 9) & 7;
1582 src2
= gen_im32(val
);
1583 dest
= gen_new_qreg(QMODE_I32
);
1584 gen_op_mov32(dest
, src1
);
1585 if ((insn
& 0x38) == 0x08) {
1586 /* Don't update condition codes if the destination is an
1587 address register. */
1588 if (insn
& 0x0100) {
1589 gen_op_sub32(dest
, dest
, src2
);
1591 gen_op_add32(dest
, dest
, src2
);
1594 if (insn
& 0x0100) {
1595 gen_op_update_xflag_lt(dest
, src2
);
1596 gen_op_sub32(dest
, dest
, src2
);
1597 s
->cc_op
= CC_OP_SUB
;
1599 gen_op_add32(dest
, dest
, src2
);
1600 gen_op_update_xflag_lt(dest
, src2
);
1601 s
->cc_op
= CC_OP_ADD
;
1603 gen_op_update_cc_add(dest
, src2
);
1605 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1611 case 2: /* One extension word. */
1614 case 3: /* Two extension words. */
1617 case 4: /* No extension words. */
1620 disas_undef(s
, insn
);
1632 op
= (insn
>> 8) & 0xf;
1633 offset
= (int8_t)insn
;
1635 offset
= ldsw_code(s
->pc
);
1637 } else if (offset
== -1) {
1638 offset
= read_im32(s
);
1642 gen_push(s
, gen_im32(s
->pc
));
1647 l1
= gen_new_label();
1648 gen_jmpcc(s
, ((insn
>> 8) & 0xf) ^ 1, l1
);
1649 gen_jmp_tb(s
, 1, base
+ offset
);
1651 gen_jmp_tb(s
, 0, s
->pc
);
1653 /* Unconditional branch. */
1654 gen_jmp_tb(s
, 0, base
+ offset
);
1662 tmp
= gen_im32((int8_t)insn
);
1663 gen_op_mov32(DREG(insn
, 9), tmp
);
1664 gen_logic_cc(s
, tmp
);
1677 SRC_EA(src
, opsize
, (insn
& 0x80) ? 0 : -1, NULL
);
1678 reg
= DREG(insn
, 9);
1679 gen_op_mov32(reg
, src
);
1680 gen_logic_cc(s
, src
);
1690 reg
= DREG(insn
, 9);
1691 dest
= gen_new_qreg(QMODE_I32
);
1693 SRC_EA(src
, OS_LONG
, 0, &addr
);
1694 gen_op_or32(dest
, src
, reg
);
1695 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1697 SRC_EA(src
, OS_LONG
, 0, NULL
);
1698 gen_op_or32(dest
, src
, reg
);
1699 gen_op_mov32(reg
, dest
);
1701 gen_logic_cc(s
, dest
);
1709 SRC_EA(src
, OS_LONG
, 0, NULL
);
1710 reg
= AREG(insn
, 9);
1711 gen_op_sub32(reg
, reg
, src
);
1722 reg
= DREG(insn
, 9);
1723 src
= DREG(insn
, 0);
1724 dest
= gen_new_qreg(QMODE_I32
);
1725 gen_op_mov32 (dest
, reg
);
1726 gen_op_subx_cc(dest
, src
);
1728 tmp
= gen_new_qreg(QMODE_I32
);
1729 gen_op_mov32 (tmp
, QREG_CC_DEST
);
1730 gen_op_update_cc_add(dest
, src
);
1731 gen_op_mov32(reg
, dest
);
1732 s
->cc_op
= CC_OP_DYNAMIC
;
1734 gen_op_or32(tmp
, tmp
, gen_im32(~CCF_Z
));
1735 gen_op_and32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1736 s
->cc_op
= CC_OP_FLAGS
;
1744 val
= (insn
>> 9) & 7;
1747 src
= gen_im32(val
);
1748 gen_logic_cc(s
, src
);
1749 DEST_EA(insn
, OS_LONG
, src
, NULL
);
1760 op
= (insn
>> 6) & 3;
1764 s
->cc_op
= CC_OP_CMPB
;
1768 s
->cc_op
= CC_OP_CMPW
;
1772 s
->cc_op
= CC_OP_SUB
;
1777 SRC_EA(src
, opsize
, -1, NULL
);
1778 reg
= DREG(insn
, 9);
1779 dest
= gen_new_qreg(QMODE_I32
);
1780 gen_op_sub32(dest
, reg
, src
);
1781 gen_op_update_cc_add(dest
, src
);
1796 SRC_EA(src
, opsize
, -1, NULL
);
1797 reg
= AREG(insn
, 9);
1798 dest
= gen_new_qreg(QMODE_I32
);
1799 gen_op_sub32(dest
, reg
, src
);
1800 gen_op_update_cc_add(dest
, src
);
1801 s
->cc_op
= CC_OP_SUB
;
1811 SRC_EA(src
, OS_LONG
, 0, &addr
);
1812 reg
= DREG(insn
, 9);
1813 dest
= gen_new_qreg(QMODE_I32
);
1814 gen_op_xor32(dest
, src
, reg
);
1815 gen_logic_cc(s
, dest
);
1816 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1826 reg
= DREG(insn
, 9);
1827 dest
= gen_new_qreg(QMODE_I32
);
1829 SRC_EA(src
, OS_LONG
, 0, &addr
);
1830 gen_op_and32(dest
, src
, reg
);
1831 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1833 SRC_EA(src
, OS_LONG
, 0, NULL
);
1834 gen_op_and32(dest
, src
, reg
);
1835 gen_op_mov32(reg
, dest
);
1837 gen_logic_cc(s
, dest
);
1845 SRC_EA(src
, OS_LONG
, 0, NULL
);
1846 reg
= AREG(insn
, 9);
1847 gen_op_add32(reg
, reg
, src
);
1858 reg
= DREG(insn
, 9);
1859 src
= DREG(insn
, 0);
1860 dest
= gen_new_qreg(QMODE_I32
);
1861 gen_op_mov32 (dest
, reg
);
1862 gen_op_addx_cc(dest
, src
);
1864 tmp
= gen_new_qreg(QMODE_I32
);
1865 gen_op_mov32 (tmp
, QREG_CC_DEST
);
1866 gen_op_update_cc_add(dest
, src
);
1867 gen_op_mov32(reg
, dest
);
1868 s
->cc_op
= CC_OP_DYNAMIC
;
1870 gen_op_or32(tmp
, tmp
, gen_im32(~CCF_Z
));
1871 gen_op_and32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1872 s
->cc_op
= CC_OP_FLAGS
;
1875 DISAS_INSN(shift_im
)
1880 reg
= DREG(insn
, 0);
1881 tmp
= (insn
>> 9) & 7;
1885 gen_op_shl_im_cc(reg
, tmp
);
1886 s
->cc_op
= CC_OP_SHL
;
1889 gen_op_shr_im_cc(reg
, tmp
);
1890 s
->cc_op
= CC_OP_SHR
;
1892 gen_op_sar_im_cc(reg
, tmp
);
1893 s
->cc_op
= CC_OP_SAR
;
1898 DISAS_INSN(shift_reg
)
1904 reg
= DREG(insn
, 0);
1905 src
= DREG(insn
, 9);
1906 tmp
= gen_new_qreg(QMODE_I32
);
1907 gen_op_and32(tmp
, src
, gen_im32(63));
1909 gen_op_shl_cc(reg
, tmp
);
1910 s
->cc_op
= CC_OP_SHL
;
1913 gen_op_shr_cc(reg
, tmp
);
1914 s
->cc_op
= CC_OP_SHR
;
1916 gen_op_sar_cc(reg
, tmp
);
1917 s
->cc_op
= CC_OP_SAR
;
1925 reg
= DREG(insn
, 0);
1926 gen_logic_cc(s
, reg
);
1927 gen_op_ff1(reg
, reg
);
1930 static int gen_get_sr(DisasContext
*s
)
1935 ccr
= gen_get_ccr(s
);
1936 sr
= gen_new_qreg(QMODE_I32
);
1937 gen_op_and32(sr
, QREG_SR
, gen_im32(0xffe0));
1938 gen_op_or32(sr
, sr
, ccr
);
1948 ext
= lduw_code(s
->pc
);
1950 if (ext
!= 0x46FC) {
1951 gen_exception(s
, addr
, EXCP_UNSUPPORTED
);
1954 ext
= lduw_code(s
->pc
);
1956 if (IS_USER(s
) || (ext
& SR_S
) == 0) {
1957 gen_exception(s
, addr
, EXCP_PRIVILEGE
);
1960 gen_push(s
, gen_get_sr(s
));
1961 gen_set_sr_im(s
, ext
, 0);
1964 DISAS_INSN(move_from_sr
)
1970 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1974 reg
= DREG(insn
, 0);
1975 gen_partset_reg(OS_WORD
, reg
, sr
);
1978 DISAS_INSN(move_to_sr
)
1981 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1984 gen_set_sr(s
, insn
, 0);
1988 DISAS_INSN(move_from_usp
)
1991 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1994 /* TODO: Implement USP. */
1995 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1998 DISAS_INSN(move_to_usp
)
2001 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2004 /* TODO: Implement USP. */
2005 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
2010 gen_jmp(s
, gen_im32(s
->pc
));
2019 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2023 ext
= lduw_code(s
->pc
);
2026 gen_set_sr_im(s
, ext
, 0);
2027 gen_jmp(s
, gen_im32(s
->pc
));
2034 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2037 gen_exception(s
, s
->pc
- 2, EXCP_RTE
);
2046 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2050 ext
= lduw_code(s
->pc
);
2054 reg
= AREG(ext
, 12);
2056 reg
= DREG(ext
, 12);
2058 gen_op_movec(gen_im32(ext
& 0xfff), reg
);
2065 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2068 /* ICache fetch. Implement as no-op. */
2074 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2077 /* Cache push/invalidate. Implement as no-op. */
2082 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2088 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2091 /* TODO: Implement wdebug. */
2092 qemu_assert(0, "WDEBUG not implemented");
2097 gen_exception(s
, s
->pc
- 2, EXCP_TRAP0
+ (insn
& 0xf));
2100 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2101 immediately before the next FP instruction is executed. */
2112 ext
= lduw_code(s
->pc
);
2114 opmode
= ext
& 0x7f;
2115 switch ((ext
>> 13) & 7) {
2120 case 3: /* fmove out */
2123 /* ??? TODO: Proper behavior on overflow. */
2124 switch ((ext
>> 10) & 7) {
2127 res
= gen_new_qreg(QMODE_I32
);
2128 gen_op_f64_to_i32(res
, src
);
2132 res
= gen_new_qreg(QMODE_F32
);
2133 gen_op_f64_to_f32(res
, src
);
2137 res
= gen_new_qreg(QMODE_I32
);
2138 gen_op_f64_to_i32(res
, src
);
2146 res
= gen_new_qreg(QMODE_I32
);
2147 gen_op_f64_to_i32(res
, src
);
2152 DEST_EA(insn
, opsize
, res
, NULL
);
2154 case 4: /* fmove to control register. */
2155 switch ((ext
>> 10) & 7) {
2157 /* Not implemented. Ignore writes. */
2162 cpu_abort(NULL
, "Unimplemented: fmove to control %d",
2166 case 5: /* fmove from control register. */
2167 switch ((ext
>> 10) & 7) {
2169 /* Not implemented. Always return zero. */
2175 cpu_abort(NULL
, "Unimplemented: fmove from control %d",
2179 DEST_EA(insn
, OS_LONG
, res
, NULL
);
2181 case 6: /* fmovem */
2186 if ((ext
& 0x1f00) != 0x1000 || (ext
& 0xff) == 0)
2188 src
= gen_lea(s
, insn
, OS_LONG
);
2193 addr
= gen_new_qreg(QMODE_I32
);
2194 gen_op_mov32(addr
, src
);
2200 if (ext
& (1 << 13)) {
2202 gen_st(s
, f64
, addr
, dest
);
2205 gen_ld(s
, f64
, dest
, addr
);
2207 if (ext
& (mask
- 1))
2208 gen_op_add32(addr
, addr
, gen_im32(8));
2216 if (ext
& (1 << 14)) {
2219 /* Source effective address. */
2220 switch ((ext
>> 10) & 7) {
2221 case 0: opsize
= OS_LONG
; break;
2222 case 1: opsize
= OS_SINGLE
; break;
2223 case 4: opsize
= OS_WORD
; break;
2224 case 5: opsize
= OS_DOUBLE
; break;
2225 case 6: opsize
= OS_BYTE
; break;
2229 SRC_EA(tmp
, opsize
, -1, NULL
);
2230 if (opsize
== OS_DOUBLE
) {
2233 src
= gen_new_qreg(QMODE_F64
);
2238 gen_op_i32_to_f64(src
, tmp
);
2241 gen_op_f32_to_f64(src
, tmp
);
2246 /* Source register. */
2247 src
= FREG(ext
, 10);
2249 dest
= FREG(ext
, 7);
2250 res
= gen_new_qreg(QMODE_F64
);
2252 gen_op_movf64(res
, dest
);
2255 case 0: case 0x40: case 0x44: /* fmove */
2256 gen_op_movf64(res
, src
);
2259 gen_op_iround_f64(res
, src
);
2262 case 3: /* fintrz */
2263 gen_op_itrunc_f64(res
, src
);
2266 case 4: case 0x41: case 0x45: /* fsqrt */
2267 gen_op_sqrtf64(res
, src
);
2269 case 0x18: case 0x58: case 0x5c: /* fabs */
2270 gen_op_absf64(res
, src
);
2272 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2273 gen_op_chsf64(res
, src
);
2275 case 0x20: case 0x60: case 0x64: /* fdiv */
2276 gen_op_divf64(res
, res
, src
);
2278 case 0x22: case 0x62: case 0x66: /* fadd */
2279 gen_op_addf64(res
, res
, src
);
2281 case 0x23: case 0x63: case 0x67: /* fmul */
2282 gen_op_mulf64(res
, res
, src
);
2284 case 0x28: case 0x68: case 0x6c: /* fsub */
2285 gen_op_subf64(res
, res
, src
);
2287 case 0x38: /* fcmp */
2288 gen_op_sub_cmpf64(res
, res
, src
);
2292 case 0x3a: /* ftst */
2293 gen_op_movf64(res
, src
);
2301 if (opmode
& 0x40) {
2302 if ((opmode
& 0x4) != 0)
2304 } else if ((s
->fpcr
& M68K_FPCR_PREC
) == 0) {
2311 tmp
= gen_new_qreg(QMODE_F32
);
2312 gen_op_f64_to_f32(tmp
, res
);
2313 gen_op_f32_to_f64(res
, tmp
);
2315 gen_op_fp_result(res
);
2317 gen_op_movf64(dest
, res
);
2322 disas_undef_fpu(s
, insn
);
2334 offset
= ldsw_code(s
->pc
);
2336 if (insn
& (1 << 6)) {
2337 offset
= (offset
<< 16) | lduw_code(s
->pc
);
2341 l1
= gen_new_label();
2342 /* TODO: Raise BSUN exception. */
2343 flag
= gen_new_qreg(QMODE_I32
);
2344 zero
= gen_new_qreg(QMODE_F64
);
2345 gen_op_zerof64(zero
);
2346 gen_op_compare_quietf64(flag
, QREG_FP_RESULT
, zero
);
2347 /* Jump to l1 if condition is true. */
2348 switch (insn
& 0xf) {
2351 case 1: /* eq (=0) */
2352 gen_op_jmp_z32(flag
, l1
);
2354 case 2: /* ogt (=1) */
2355 gen_op_sub32(flag
, flag
, gen_im32(1));
2356 gen_op_jmp_z32(flag
, l1
);
2358 case 3: /* oge (=0 or =1) */
2359 gen_op_jmp_z32(flag
, l1
);
2360 gen_op_sub32(flag
, flag
, gen_im32(1));
2361 gen_op_jmp_z32(flag
, l1
);
2363 case 4: /* olt (=-1) */
2364 gen_op_jmp_s32(flag
, l1
);
2366 case 5: /* ole (=-1 or =0) */
2367 gen_op_jmp_s32(flag
, l1
);
2368 gen_op_jmp_z32(flag
, l1
);
2370 case 6: /* ogl (=-1 or =1) */
2371 gen_op_jmp_s32(flag
, l1
);
2372 gen_op_sub32(flag
, flag
, gen_im32(1));
2373 gen_op_jmp_z32(flag
, l1
);
2375 case 7: /* or (=2) */
2376 gen_op_sub32(flag
, flag
, gen_im32(2));
2377 gen_op_jmp_z32(flag
, l1
);
2379 case 8: /* un (<2) */
2380 gen_op_sub32(flag
, flag
, gen_im32(2));
2381 gen_op_jmp_s32(flag
, l1
);
2383 case 9: /* ueq (=0 or =2) */
2384 gen_op_jmp_z32(flag
, l1
);
2385 gen_op_sub32(flag
, flag
, gen_im32(2));
2386 gen_op_jmp_z32(flag
, l1
);
2388 case 10: /* ugt (>0) */
2389 /* ??? Add jmp_gtu. */
2390 gen_op_sub32(flag
, flag
, gen_im32(1));
2391 gen_op_jmp_ns32(flag
, l1
);
2393 case 11: /* uge (>=0) */
2394 gen_op_jmp_ns32(flag
, l1
);
2396 case 12: /* ult (=-1 or =2) */
2397 gen_op_jmp_s32(flag
, l1
);
2398 gen_op_sub32(flag
, flag
, gen_im32(2));
2399 gen_op_jmp_z32(flag
, l1
);
2401 case 13: /* ule (!=1) */
2402 gen_op_sub32(flag
, flag
, gen_im32(1));
2403 gen_op_jmp_nz32(flag
, l1
);
2405 case 14: /* ne (!=0) */
2406 gen_op_jmp_nz32(flag
, l1
);
2409 gen_op_mov32(flag
, gen_im32(1));
2412 gen_jmp_tb(s
, 0, s
->pc
);
2414 gen_jmp_tb(s
, 1, addr
+ offset
);
2417 DISAS_INSN(frestore
)
2419 /* TODO: Implement frestore. */
2420 qemu_assert(0, "FRESTORE not implemented");
2425 /* TODO: Implement fsave. */
2426 qemu_assert(0, "FSAVE not implemented");
2429 static inline int gen_mac_extract_word(DisasContext
*s
, int val
, int upper
)
2431 int tmp
= gen_new_qreg(QMODE_I32
);
2432 if (s
->env
->macsr
& MACSR_FI
) {
2434 gen_op_and32(tmp
, val
, gen_im32(0xffff0000));
2436 gen_op_shl32(tmp
, val
, gen_im32(16));
2437 } else if (s
->env
->macsr
& MACSR_SU
) {
2439 gen_op_sar32(tmp
, val
, gen_im32(16));
2441 gen_op_ext16s32(tmp
, val
);
2444 gen_op_shr32(tmp
, val
, gen_im32(16));
2446 gen_op_ext16u32(tmp
, val
);
2462 int saved_flags
= -1;
2464 ext
= lduw_code(s
->pc
);
2467 acc
= ((insn
>> 7) & 1) | ((ext
>> 3) & 2);
2468 dual
= ((insn
& 0x30) != 0 && (ext
& 3) != 0);
2469 if (dual
&& !m68k_feature(s
->env
, M68K_FEATURE_CF_EMAC_B
)) {
2470 disas_undef(s
, insn
);
2474 /* MAC with load. */
2475 tmp
= gen_lea(s
, insn
, OS_LONG
);
2476 addr
= gen_new_qreg(QMODE_I32
);
2477 gen_op_and32(addr
, tmp
, QREG_MAC_MASK
);
2478 /* Load the value now to ensure correct exception behavior.
2479 Perform writeback after reading the MAC inputs. */
2480 loadval
= gen_load(s
, OS_LONG
, addr
, 0);
2483 rx
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(insn
, 12);
2484 ry
= (ext
& 8) ? AREG(ext
, 0) : DREG(ext
, 0);
2486 loadval
= addr
= -1;
2487 rx
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2488 ry
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2491 gen_op_mac_clear_flags();
2493 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && !dual
) {
2494 /* Skip the multiply if we know we will ignore it. */
2495 l1
= gen_new_label();
2496 tmp
= gen_new_qreg(QMODE_I32
);
2497 gen_op_and32(tmp
, QREG_MACSR
, gen_im32(1 << (acc
+ 8)));
2498 gen_op_jmp_nz32(tmp
, l1
);
2501 if ((ext
& 0x0800) == 0) {
2503 rx
= gen_mac_extract_word(s
, rx
, (ext
& 0x80) != 0);
2504 ry
= gen_mac_extract_word(s
, ry
, (ext
& 0x40) != 0);
2506 if (s
->env
->macsr
& MACSR_FI
) {
2507 gen_op_macmulf(rx
, ry
);
2509 if (s
->env
->macsr
& MACSR_SU
)
2510 gen_op_macmuls(rx
, ry
);
2512 gen_op_macmulu(rx
, ry
);
2513 switch ((ext
>> 9) & 3) {
2524 /* Save the overflow flag from the multiply. */
2525 saved_flags
= gen_new_qreg(QMODE_I32
);
2526 gen_op_mov32(saved_flags
, QREG_MACSR
);
2529 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && dual
) {
2530 /* Skip the accumulate if the value is already saturated. */
2531 l1
= gen_new_label();
2532 tmp
= gen_new_qreg(QMODE_I32
);
2533 gen_op_and32(tmp
, QREG_MACSR
, gen_im32(MACSR_PAV0
<< acc
));
2534 gen_op_jmp_nz32(tmp
, l1
);
2542 if (s
->env
->macsr
& MACSR_FI
)
2543 gen_op_macsatf(acc
);
2544 else if (s
->env
->macsr
& MACSR_SU
)
2545 gen_op_macsats(acc
);
2547 gen_op_macsatu(acc
);
2553 /* Dual accumulate variant. */
2554 acc
= (ext
>> 2) & 3;
2555 /* Restore the overflow flag from the multiplier. */
2556 gen_op_mov32(QREG_MACSR
, saved_flags
);
2557 if ((s
->env
->macsr
& MACSR_OMC
) != 0) {
2558 /* Skip the accumulate if the value is already saturated. */
2559 l1
= gen_new_label();
2560 tmp
= gen_new_qreg(QMODE_I32
);
2561 gen_op_and32(tmp
, QREG_MACSR
, gen_im32(MACSR_PAV0
<< acc
));
2562 gen_op_jmp_nz32(tmp
, l1
);
2568 if (s
->env
->macsr
& MACSR_FI
)
2569 gen_op_macsatf(acc
);
2570 else if (s
->env
->macsr
& MACSR_SU
)
2571 gen_op_macsats(acc
);
2573 gen_op_macsatu(acc
);
2577 gen_op_mac_set_flags(acc
);
2581 rw
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2582 gen_op_mov32(rw
, loadval
);
2583 /* FIXME: Should address writeback happen with the masked or
2585 switch ((insn
>> 3) & 7) {
2586 case 3: /* Post-increment. */
2587 gen_op_add32(AREG(insn
, 0), addr
, gen_im32(4));
2589 case 4: /* Pre-decrement. */
2590 gen_op_mov32(AREG(insn
, 0), addr
);
2595 DISAS_INSN(from_mac
)
2600 rx
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2601 acc
= (insn
>> 9) & 3;
2602 if (s
->env
->macsr
& MACSR_FI
) {
2603 gen_op_get_macf(rx
, acc
);
2604 } else if ((s
->env
->macsr
& MACSR_OMC
) == 0) {
2605 gen_op_get_maci(rx
, acc
);
2606 } else if (s
->env
->macsr
& MACSR_SU
) {
2607 gen_op_get_macs(rx
, acc
);
2609 gen_op_get_macu(rx
, acc
);
2612 gen_op_clear_mac(acc
);
2615 DISAS_INSN(move_mac
)
2620 dest
= (insn
>> 9) & 3;
2621 gen_op_move_mac(dest
, src
);
2622 gen_op_mac_clear_flags();
2623 gen_op_mac_set_flags(dest
);
2626 DISAS_INSN(from_macsr
)
2630 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2631 gen_op_mov32(reg
, QREG_MACSR
);
2634 DISAS_INSN(from_mask
)
2637 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2638 gen_op_mov32(reg
, QREG_MAC_MASK
);
2641 DISAS_INSN(from_mext
)
2645 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2646 acc
= (insn
& 0x400) ? 2 : 0;
2647 if (s
->env
->macsr
& MACSR_FI
)
2648 gen_op_get_mac_extf(reg
, acc
);
2650 gen_op_get_mac_exti(reg
, acc
);
2653 DISAS_INSN(macsr_to_ccr
)
2655 gen_op_mov32(QREG_CC_X
, gen_im32(0));
2656 gen_op_and32(QREG_CC_DEST
, QREG_MACSR
, gen_im32(0xf));
2657 s
->cc_op
= CC_OP_FLAGS
;
2664 acc
= (insn
>>9) & 3;
2665 SRC_EA(val
, OS_LONG
, 0, NULL
);
2666 if (s
->env
->macsr
& MACSR_FI
) {
2667 gen_op_set_macf(val
, acc
);
2668 } else if (s
->env
->macsr
& MACSR_SU
) {
2669 gen_op_set_macs(val
, acc
);
2671 gen_op_set_macu(val
, acc
);
2673 gen_op_mac_clear_flags();
2674 gen_op_mac_set_flags(acc
);
2677 DISAS_INSN(to_macsr
)
2680 SRC_EA(val
, OS_LONG
, 0, NULL
);
2681 gen_op_set_macsr(val
);
2688 SRC_EA(val
, OS_LONG
, 0, NULL
);
2689 gen_op_or32(QREG_MAC_MASK
, val
, gen_im32(0xffff0000));
2696 SRC_EA(val
, OS_LONG
, 0, NULL
);
2697 acc
= (insn
& 0x400) ? 2 : 0;
2698 if (s
->env
->macsr
& MACSR_FI
)
2699 gen_op_set_mac_extf(val
, acc
);
2700 else if (s
->env
->macsr
& MACSR_SU
)
2701 gen_op_set_mac_exts(val
, acc
);
2703 gen_op_set_mac_extu(val
, acc
);
2706 static disas_proc opcode_table
[65536];
2709 register_opcode (disas_proc proc
, uint16_t opcode
, uint16_t mask
)
2715 /* Sanity check. All set bits must be included in the mask. */
2716 if (opcode
& ~mask
) {
2718 "qemu internal error: bogus opcode definition %04x/%04x\n",
2722 /* This could probably be cleverer. For now just optimize the case where
2723 the top bits are known. */
2724 /* Find the first zero bit in the mask. */
2726 while ((i
& mask
) != 0)
2728 /* Iterate over all combinations of this and lower bits. */
2733 from
= opcode
& ~(i
- 1);
2735 for (i
= from
; i
< to
; i
++) {
2736 if ((i
& mask
) == opcode
)
2737 opcode_table
[i
] = proc
;
2741 /* Register m68k opcode handlers. Order is important.
2742 Later insn override earlier ones. */
2743 void register_m68k_insns (CPUM68KState
*env
)
2745 #define INSN(name, opcode, mask, feature) do { \
2746 if (m68k_feature(env, M68K_FEATURE_##feature)) \
2747 register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2749 INSN(undef
, 0000, 0000, CF_ISA_A
);
2750 INSN(arith_im
, 0080, fff8
, CF_ISA_A
);
2751 INSN(bitrev
, 00c0
, fff8
, CF_ISA_APLUSC
);
2752 INSN(bitop_reg
, 0100, f1c0
, CF_ISA_A
);
2753 INSN(bitop_reg
, 0140, f1c0
, CF_ISA_A
);
2754 INSN(bitop_reg
, 0180, f1c0
, CF_ISA_A
);
2755 INSN(bitop_reg
, 01c0
, f1c0
, CF_ISA_A
);
2756 INSN(arith_im
, 0280, fff8
, CF_ISA_A
);
2757 INSN(byterev
, 02c0
, fff8
, CF_ISA_APLUSC
);
2758 INSN(arith_im
, 0480, fff8
, CF_ISA_A
);
2759 INSN(ff1
, 04c0
, fff8
, CF_ISA_APLUSC
);
2760 INSN(arith_im
, 0680, fff8
, CF_ISA_A
);
2761 INSN(bitop_im
, 0800, ffc0
, CF_ISA_A
);
2762 INSN(bitop_im
, 0840, ffc0
, CF_ISA_A
);
2763 INSN(bitop_im
, 0880, ffc0
, CF_ISA_A
);
2764 INSN(bitop_im
, 08c0
, ffc0
, CF_ISA_A
);
2765 INSN(arith_im
, 0a80
, fff8
, CF_ISA_A
);
2766 INSN(arith_im
, 0c00
, ff38
, CF_ISA_A
);
2767 INSN(move
, 1000, f000
, CF_ISA_A
);
2768 INSN(move
, 2000, f000
, CF_ISA_A
);
2769 INSN(move
, 3000, f000
, CF_ISA_A
);
2770 INSN(strldsr
, 40e7
, ffff
, CF_ISA_APLUSC
);
2771 INSN(negx
, 4080, fff8
, CF_ISA_A
);
2772 INSN(move_from_sr
, 40c0
, fff8
, CF_ISA_A
);
2773 INSN(lea
, 41c0
, f1c0
, CF_ISA_A
);
2774 INSN(clr
, 4200, ff00
, CF_ISA_A
);
2775 INSN(undef
, 42c0
, ffc0
, CF_ISA_A
);
2776 INSN(move_from_ccr
, 42c0
, fff8
, CF_ISA_A
);
2777 INSN(neg
, 4480, fff8
, CF_ISA_A
);
2778 INSN(move_to_ccr
, 44c0
, ffc0
, CF_ISA_A
);
2779 INSN(not, 4680, fff8
, CF_ISA_A
);
2780 INSN(move_to_sr
, 46c0
, ffc0
, CF_ISA_A
);
2781 INSN(pea
, 4840, ffc0
, CF_ISA_A
);
2782 INSN(swap
, 4840, fff8
, CF_ISA_A
);
2783 INSN(movem
, 48c0
, fbc0
, CF_ISA_A
);
2784 INSN(ext
, 4880, fff8
, CF_ISA_A
);
2785 INSN(ext
, 48c0
, fff8
, CF_ISA_A
);
2786 INSN(ext
, 49c0
, fff8
, CF_ISA_A
);
2787 INSN(tst
, 4a00
, ff00
, CF_ISA_A
);
2788 INSN(tas
, 4ac0
, ffc0
, CF_ISA_B
);
2789 INSN(halt
, 4ac8
, ffff
, CF_ISA_A
);
2790 INSN(pulse
, 4acc
, ffff
, CF_ISA_A
);
2791 INSN(illegal
, 4afc
, ffff
, CF_ISA_A
);
2792 INSN(mull
, 4c00
, ffc0
, CF_ISA_A
);
2793 INSN(divl
, 4c40
, ffc0
, CF_ISA_A
);
2794 INSN(sats
, 4c80
, fff8
, CF_ISA_B
);
2795 INSN(trap
, 4e40
, fff0
, CF_ISA_A
);
2796 INSN(link
, 4e50
, fff8
, CF_ISA_A
);
2797 INSN(unlk
, 4e58
, fff8
, CF_ISA_A
);
2798 INSN(move_to_usp
, 4e60
, fff8
, USP
);
2799 INSN(move_from_usp
, 4e68
, fff8
, USP
);
2800 INSN(nop
, 4e71
, ffff
, CF_ISA_A
);
2801 INSN(stop
, 4e72
, ffff
, CF_ISA_A
);
2802 INSN(rte
, 4e73
, ffff
, CF_ISA_A
);
2803 INSN(rts
, 4e75
, ffff
, CF_ISA_A
);
2804 INSN(movec
, 4e7b
, ffff
, CF_ISA_A
);
2805 INSN(jump
, 4e80
, ffc0
, CF_ISA_A
);
2806 INSN(jump
, 4ec0
, ffc0
, CF_ISA_A
);
2807 INSN(addsubq
, 5180, f1c0
, CF_ISA_A
);
2808 INSN(scc
, 50c0
, f0f8
, CF_ISA_A
);
2809 INSN(addsubq
, 5080, f1c0
, CF_ISA_A
);
2810 INSN(tpf
, 51f8
, fff8
, CF_ISA_A
);
2812 /* Branch instructions. */
2813 INSN(branch
, 6000, f000
, CF_ISA_A
);
2814 /* Disable long branch instructions, then add back the ones we want. */
2815 INSN(undef
, 60ff
, f0ff
, CF_ISA_A
); /* All long branches. */
2816 INSN(branch
, 60ff
, f0ff
, CF_ISA_B
);
2817 INSN(undef
, 60ff
, ffff
, CF_ISA_B
); /* bra.l */
2818 INSN(branch
, 60ff
, ffff
, BRAL
);
2820 INSN(moveq
, 7000, f100
, CF_ISA_A
);
2821 INSN(mvzs
, 7100, f100
, CF_ISA_B
);
2822 INSN(or, 8000, f000
, CF_ISA_A
);
2823 INSN(divw
, 80c0
, f0c0
, CF_ISA_A
);
2824 INSN(addsub
, 9000, f000
, CF_ISA_A
);
2825 INSN(subx
, 9180, f1f8
, CF_ISA_A
);
2826 INSN(suba
, 91c0
, f1c0
, CF_ISA_A
);
2828 INSN(undef_mac
, a000
, f000
, CF_ISA_A
);
2829 INSN(mac
, a000
, f100
, CF_EMAC
);
2830 INSN(from_mac
, a180
, f9b0
, CF_EMAC
);
2831 INSN(move_mac
, a110
, f9fc
, CF_EMAC
);
2832 INSN(from_macsr
,a980
, f9f0
, CF_EMAC
);
2833 INSN(from_mask
, ad80
, fff0
, CF_EMAC
);
2834 INSN(from_mext
, ab80
, fbf0
, CF_EMAC
);
2835 INSN(macsr_to_ccr
, a9c0
, ffff
, CF_EMAC
);
2836 INSN(to_mac
, a100
, f9c0
, CF_EMAC
);
2837 INSN(to_macsr
, a900
, ffc0
, CF_EMAC
);
2838 INSN(to_mext
, ab00
, fbc0
, CF_EMAC
);
2839 INSN(to_mask
, ad00
, ffc0
, CF_EMAC
);
2841 INSN(mov3q
, a140
, f1c0
, CF_ISA_B
);
2842 INSN(cmp
, b000
, f1c0
, CF_ISA_B
); /* cmp.b */
2843 INSN(cmp
, b040
, f1c0
, CF_ISA_B
); /* cmp.w */
2844 INSN(cmpa
, b0c0
, f1c0
, CF_ISA_B
); /* cmpa.w */
2845 INSN(cmp
, b080
, f1c0
, CF_ISA_A
);
2846 INSN(cmpa
, b1c0
, f1c0
, CF_ISA_A
);
2847 INSN(eor
, b180
, f1c0
, CF_ISA_A
);
2848 INSN(and, c000
, f000
, CF_ISA_A
);
2849 INSN(mulw
, c0c0
, f0c0
, CF_ISA_A
);
2850 INSN(addsub
, d000
, f000
, CF_ISA_A
);
2851 INSN(addx
, d180
, f1f8
, CF_ISA_A
);
2852 INSN(adda
, d1c0
, f1c0
, CF_ISA_A
);
2853 INSN(shift_im
, e080
, f0f0
, CF_ISA_A
);
2854 INSN(shift_reg
, e0a0
, f0f0
, CF_ISA_A
);
2855 INSN(undef_fpu
, f000
, f000
, CF_ISA_A
);
2856 INSN(fpu
, f200
, ffc0
, CF_FPU
);
2857 INSN(fbcc
, f280
, ffc0
, CF_FPU
);
2858 INSN(frestore
, f340
, ffc0
, CF_FPU
);
2859 INSN(fsave
, f340
, ffc0
, CF_FPU
);
2860 INSN(intouch
, f340
, ffc0
, CF_ISA_A
);
2861 INSN(cpushl
, f428
, ff38
, CF_ISA_A
);
2862 INSN(wddata
, fb00
, ff00
, CF_ISA_A
);
2863 INSN(wdebug
, fbc0
, ffc0
, CF_ISA_A
);
2867 /* ??? Some of this implementation is not exception safe. We should always
2868 write back the result to memory before setting the condition codes. */
2869 static void disas_m68k_insn(CPUState
* env
, DisasContext
*s
)
2873 insn
= lduw_code(s
->pc
);
2876 opcode_table
[insn
](s
, insn
);
2880 /* Save the result of a floating point operation. */
2881 static void expand_op_fp_result(qOP
*qop
)
2883 gen_op_movf64(QREG_FP_RESULT
, qop
->args
[0]);
2886 /* Dummy op to indicate that the flags have been set. */
2887 static void expand_op_flags_set(qOP
*qop
)
2891 /* Convert the confition codes into CC_OP_FLAGS format. */
2892 static void expand_op_flush_flags(qOP
*qop
)
2896 if (qop
->args
[0] == CC_OP_DYNAMIC
)
2897 cc_opreg
= QREG_CC_OP
;
2899 cc_opreg
= gen_im32(qop
->args
[0]);
2900 gen_op_helper32(QREG_NULL
, cc_opreg
, HELPER_flush_flags
);
2903 /* Set CC_DEST after a logical or direct flag setting operation. */
2904 static void expand_op_logic_cc(qOP
*qop
)
2906 gen_op_mov32(QREG_CC_DEST
, qop
->args
[0]);
2909 /* Set CC_SRC and CC_DEST after an arithmetic operation. */
2910 static void expand_op_update_cc_add(qOP
*qop
)
2912 gen_op_mov32(QREG_CC_DEST
, qop
->args
[0]);
2913 gen_op_mov32(QREG_CC_SRC
, qop
->args
[1]);
2916 /* Update the X flag. */
2917 static void expand_op_update_xflag(qOP
*qop
)
2922 arg0
= qop
->args
[0];
2923 arg1
= qop
->args
[1];
2924 if (arg1
== QREG_NULL
) {
2926 gen_op_mov32(QREG_CC_X
, arg0
);
2928 /* CC_X = arg0 < (unsigned)arg1. */
2929 gen_op_set_ltu32(QREG_CC_X
, arg0
, arg1
);
2933 /* Set arg0 to the contents of the X flag. */
2934 static void expand_op_get_xflag(qOP
*qop
)
2936 gen_op_mov32(qop
->args
[0], QREG_CC_X
);
2939 /* Expand a shift by immediate. The ISA only allows shifts by 1-8, so we
2940 already know the shift is within range. */
2941 static inline void expand_shift_im(qOP
*qop
, int right
, int arith
)
2951 val
= gen_new_qreg(QMODE_I32
);
2952 gen_op_mov32(val
, reg
);
2953 gen_op_mov32(QREG_CC_DEST
, val
);
2954 gen_op_mov32(QREG_CC_SRC
, tmp
);
2957 gen_op_sar32(reg
, val
, tmp
);
2959 gen_op_shr32(reg
, val
, tmp
);
2964 tmp
= gen_im32(im
- 1);
2966 gen_op_shl32(reg
, val
, tmp
);
2967 tmp
= gen_im32(32 - im
);
2969 if (tmp
!= QREG_NULL
)
2970 gen_op_shr32(val
, val
, tmp
);
2971 gen_op_and32(QREG_CC_X
, val
, gen_im32(1));
2974 static void expand_op_shl_im_cc(qOP
*qop
)
2976 expand_shift_im(qop
, 0, 0);
2979 static void expand_op_shr_im_cc(qOP
*qop
)
2981 expand_shift_im(qop
, 1, 0);
2984 static void expand_op_sar_im_cc(qOP
*qop
)
2986 expand_shift_im(qop
, 1, 1);
2989 /* Expand a shift by register. */
2990 /* ??? This gives incorrect answers for shifts by 0 or >= 32 */
2991 static inline void expand_shift_reg(qOP
*qop
, int right
, int arith
)
2999 shift
= qop
->args
[1];
3000 val
= gen_new_qreg(QMODE_I32
);
3001 gen_op_mov32(val
, reg
);
3002 gen_op_mov32(QREG_CC_DEST
, val
);
3003 gen_op_mov32(QREG_CC_SRC
, shift
);
3004 tmp
= gen_new_qreg(QMODE_I32
);
3007 gen_op_sar32(reg
, val
, shift
);
3009 gen_op_shr32(reg
, val
, shift
);
3011 gen_op_sub32(tmp
, shift
, gen_im32(1));
3013 gen_op_shl32(reg
, val
, shift
);
3014 gen_op_sub32(tmp
, gen_im32(31), shift
);
3016 gen_op_shl32(val
, val
, tmp
);
3017 gen_op_and32(QREG_CC_X
, val
, gen_im32(1));
3020 static void expand_op_shl_cc(qOP
*qop
)
3022 expand_shift_reg(qop
, 0, 0);
3025 static void expand_op_shr_cc(qOP
*qop
)
3027 expand_shift_reg(qop
, 1, 0);
3030 static void expand_op_sar_cc(qOP
*qop
)
3032 expand_shift_reg(qop
, 1, 1);
3035 /* Set the Z flag to (arg0 & arg1) == 0. */
3036 static void expand_op_btest(qOP
*qop
)
3041 l1
= gen_new_label();
3042 tmp
= gen_new_qreg(QMODE_I32
);
3043 gen_op_and32(tmp
, qop
->args
[0], qop
->args
[1]);
3044 gen_op_and32(QREG_CC_DEST
, QREG_CC_DEST
, gen_im32(~(uint32_t)CCF_Z
));
3045 gen_op_jmp_nz32(tmp
, l1
);
3046 gen_op_or32(QREG_CC_DEST
, QREG_CC_DEST
, gen_im32(CCF_Z
));
3050 /* arg0 += arg1 + CC_X */
3051 static void expand_op_addx_cc(qOP
*qop
)
3053 int arg0
= qop
->args
[0];
3054 int arg1
= qop
->args
[1];
3057 gen_op_add32 (arg0
, arg0
, arg1
);
3058 l1
= gen_new_label();
3059 l2
= gen_new_label();
3060 gen_op_jmp_z32(QREG_CC_X
, l1
);
3061 gen_op_add32(arg0
, arg0
, gen_im32(1));
3062 gen_op_mov32(QREG_CC_OP
, gen_im32(CC_OP_ADDX
));
3063 gen_op_set_leu32(QREG_CC_X
, arg0
, arg1
);
3066 gen_op_mov32(QREG_CC_OP
, gen_im32(CC_OP_ADD
));
3067 gen_op_set_ltu32(QREG_CC_X
, arg0
, arg1
);
3071 /* arg0 -= arg1 + CC_X */
3072 static void expand_op_subx_cc(qOP
*qop
)
3074 int arg0
= qop
->args
[0];
3075 int arg1
= qop
->args
[1];
3078 l1
= gen_new_label();
3079 l2
= gen_new_label();
3080 gen_op_jmp_z32(QREG_CC_X
, l1
);
3081 gen_op_set_leu32(QREG_CC_X
, arg0
, arg1
);
3082 gen_op_sub32(arg0
, arg0
, gen_im32(1));
3083 gen_op_mov32(QREG_CC_OP
, gen_im32(CC_OP_SUBX
));
3086 gen_op_set_ltu32(QREG_CC_X
, arg0
, arg1
);
3087 gen_op_mov32(QREG_CC_OP
, gen_im32(CC_OP_SUB
));
3089 gen_op_sub32 (arg0
, arg0
, arg1
);
3092 /* Expand target specific ops to generic qops. */
3093 static void expand_target_qops(void)
3099 /* Copy the list of qops, expanding target specific ops as we go. */
3100 qop
= gen_first_qop
;
3101 gen_first_qop
= NULL
;
3102 gen_last_qop
= NULL
;
3103 for (; qop
; qop
= next
) {
3106 if (c
< FIRST_TARGET_OP
) {
3107 qop
->prev
= gen_last_qop
;
3110 gen_last_qop
->next
= qop
;
3112 gen_first_qop
= qop
;
3117 #define DEF(name, nargs, barrier) \
3118 case INDEX_op_##name: \
3119 expand_op_##name(qop); \
3121 #include "qop-target.def"
3124 cpu_abort(NULL
, "Unexpanded target qop");
3129 /* ??? Implement this. */
3131 optimize_flags(void)
3136 /* generate intermediate code for basic block 'tb'. */
3138 gen_intermediate_code_internal(CPUState
*env
, TranslationBlock
*tb
,
3141 DisasContext dc1
, *dc
= &dc1
;
3142 uint16_t *gen_opc_end
;
3144 target_ulong pc_start
;
3148 /* generate intermediate code */
3153 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3156 dc
->is_jmp
= DISAS_NEXT
;
3158 dc
->cc_op
= CC_OP_DYNAMIC
;
3159 dc
->singlestep_enabled
= env
->singlestep_enabled
;
3160 dc
->fpcr
= env
->fpcr
;
3161 dc
->user
= (env
->sr
& SR_S
) == 0;
3166 pc_offset
= dc
->pc
- pc_start
;
3167 gen_throws_exception
= NULL
;
3168 if (env
->nb_breakpoints
> 0) {
3169 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
3170 if (env
->breakpoints
[j
] == dc
->pc
) {
3171 gen_exception(dc
, dc
->pc
, EXCP_DEBUG
);
3172 dc
->is_jmp
= DISAS_JUMP
;
3180 j
= gen_opc_ptr
- gen_opc_buf
;
3184 gen_opc_instr_start
[lj
++] = 0;
3186 gen_opc_pc
[lj
] = dc
->pc
;
3187 gen_opc_instr_start
[lj
] = 1;
3189 last_cc_op
= dc
->cc_op
;
3190 dc
->insn_pc
= dc
->pc
;
3191 disas_m68k_insn(env
, dc
);
3193 /* Terminate the TB on memory ops if watchpoints are present. */
3194 /* FIXME: This should be replacd by the deterministic execution
3195 * IRQ raising bits. */
3196 if (dc
->is_mem
&& env
->nb_watchpoints
)
3198 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
3199 !env
->singlestep_enabled
&&
3200 (pc_offset
) < (TARGET_PAGE_SIZE
- 32));
3202 if (__builtin_expect(env
->singlestep_enabled
, 0)) {
3203 /* Make sure the pc is updated, and raise a debug exception. */
3205 gen_flush_cc_op(dc
);
3206 gen_op_mov32(QREG_PC
, gen_im32((long)dc
->pc
));
3208 gen_op_raise_exception(EXCP_DEBUG
);
3210 switch(dc
->is_jmp
) {
3212 gen_flush_cc_op(dc
);
3213 gen_jmp_tb(dc
, 0, dc
->pc
);
3218 gen_flush_cc_op(dc
);
3219 /* indicate that the hash table must be used to find the next TB */
3223 /* nothing more to generate */
3227 *gen_opc_ptr
= INDEX_op_end
;
3230 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
3231 fprintf(logfile
, "----------------\n");
3232 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
3233 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, 0);
3234 fprintf(logfile
, "\n");
3238 j
= gen_opc_ptr
- gen_opc_buf
;
3241 gen_opc_instr_start
[lj
++] = 0;
3243 tb
->size
= dc
->pc
- pc_start
;
3247 //expand_target_qops();
3251 int gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
3253 return gen_intermediate_code_internal(env
, tb
, 0);
3256 int gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
3258 return gen_intermediate_code_internal(env
, tb
, 1);
3261 void cpu_dump_state(CPUState
*env
, FILE *f
,
3262 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
3268 for (i
= 0; i
< 8; i
++)
3270 u
.d
= env
->fregs
[i
];
3271 cpu_fprintf (f
, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3272 i
, env
->dregs
[i
], i
, env
->aregs
[i
],
3273 i
, u
.l
.upper
, u
.l
.lower
, *(double *)&u
.d
);
3275 cpu_fprintf (f
, "PC = %08x ", env
->pc
);
3277 cpu_fprintf (f
, "SR = %04x %c%c%c%c%c ", sr
, (sr
& 0x10) ? 'X' : '-',
3278 (sr
& CCF_N
) ? 'N' : '-', (sr
& CCF_Z
) ? 'Z' : '-',
3279 (sr
& CCF_V
) ? 'V' : '-', (sr
& CCF_C
) ? 'C' : '-');
3280 cpu_fprintf (f
, "FPRESULT = %12g\n", *(double *)&env
->fp_result
);