4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 //#define DEBUG_DISPATCH 1
38 /* Fake floating point. */
39 #define tcg_gen_mov_f64 tcg_gen_mov_i64
40 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
41 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
43 #define DEFO32(name, offset) static TCGv QREG_##name;
44 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
45 #define DEFF64(name, offset) static TCGv_i64 QREG_##name;
51 static TCGv_ptr cpu_env
;
53 static char cpu_reg_names
[3*8*3 + 5*4];
54 static TCGv cpu_dregs
[8];
55 static TCGv cpu_aregs
[8];
56 static TCGv_i64 cpu_fregs
[8];
57 static TCGv_i64 cpu_macc
[4];
59 #define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
60 #define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
61 #define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
62 #define MACREG(acc) cpu_macc[acc]
63 #define QREG_SP cpu_aregs[7]
65 static TCGv NULL_QREG
;
66 #define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
67 /* Used to distinguish stores from bad addressing modes. */
68 static TCGv store_dummy
;
70 #include "gen-icount.h"
72 void m68k_tcg_init(void)
77 #define DEFO32(name, offset) QREG_##name = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, offset), #name);
78 #define DEFO64(name, offset) QREG_##name = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, offset), #name);
79 #define DEFF64(name, offset) DEFO64(name, offset)
85 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
88 for (i
= 0; i
< 8; i
++) {
90 cpu_dregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
91 offsetof(CPUM68KState
, dregs
[i
]), p
);
94 cpu_aregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
95 offsetof(CPUM68KState
, aregs
[i
]), p
);
98 cpu_fregs
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
99 offsetof(CPUM68KState
, fregs
[i
]), p
);
102 for (i
= 0; i
< 4; i
++) {
103 sprintf(p
, "ACC%d", i
);
104 cpu_macc
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
105 offsetof(CPUM68KState
, macc
[i
]), p
);
109 NULL_QREG
= tcg_global_mem_new(TCG_AREG0
, -4, "NULL");
110 store_dummy
= tcg_global_mem_new(TCG_AREG0
, -8, "NULL");
116 static inline void qemu_assert(int cond
, const char *msg
)
119 fprintf (stderr
, "badness: %s\n", msg
);
124 /* internal defines */
125 typedef struct DisasContext
{
127 target_ulong insn_pc
; /* Start of the current instruction. */
133 struct TranslationBlock
*tb
;
134 int singlestep_enabled
;
140 #define DISAS_JUMP_NEXT 4
142 #if defined(CONFIG_USER_ONLY)
145 #define IS_USER(s) s->user
148 /* XXX: move that elsewhere */
149 /* ??? Fix exceptions. */
150 static void *gen_throws_exception
;
151 #define gen_last_qop NULL
159 typedef void (*disas_proc
)(DisasContext
*, uint16_t);
161 #ifdef DEBUG_DISPATCH
162 #define DISAS_INSN(name) \
163 static void real_disas_##name (DisasContext *s, uint16_t insn); \
164 static void disas_##name (DisasContext *s, uint16_t insn) { \
165 qemu_log("Dispatch " #name "\n"); \
166 real_disas_##name(s, insn); } \
167 static void real_disas_##name (DisasContext *s, uint16_t insn)
169 #define DISAS_INSN(name) \
170 static void disas_##name (DisasContext *s, uint16_t insn)
173 /* Generate a load from the specified address. Narrow values are
174 sign extended to full register width. */
175 static inline TCGv
gen_load(DisasContext
* s
, int opsize
, TCGv addr
, int sign
)
178 int index
= IS_USER(s
);
180 tmp
= tcg_temp_new_i32();
184 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
186 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
190 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
192 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
196 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
199 qemu_assert(0, "bad load size");
201 gen_throws_exception
= gen_last_qop
;
205 static inline TCGv_i64
gen_load64(DisasContext
* s
, TCGv addr
)
208 int index
= IS_USER(s
);
210 tmp
= tcg_temp_new_i64();
211 tcg_gen_qemu_ldf64(tmp
, addr
, index
);
212 gen_throws_exception
= gen_last_qop
;
216 /* Generate a store. */
217 static inline void gen_store(DisasContext
*s
, int opsize
, TCGv addr
, TCGv val
)
219 int index
= IS_USER(s
);
223 tcg_gen_qemu_st8(val
, addr
, index
);
226 tcg_gen_qemu_st16(val
, addr
, index
);
230 tcg_gen_qemu_st32(val
, addr
, index
);
233 qemu_assert(0, "bad store size");
235 gen_throws_exception
= gen_last_qop
;
238 static inline void gen_store64(DisasContext
*s
, TCGv addr
, TCGv_i64 val
)
240 int index
= IS_USER(s
);
242 tcg_gen_qemu_stf64(val
, addr
, index
);
243 gen_throws_exception
= gen_last_qop
;
252 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
253 otherwise generate a store. */
254 static TCGv
gen_ldst(DisasContext
*s
, int opsize
, TCGv addr
, TCGv val
,
257 if (what
== EA_STORE
) {
258 gen_store(s
, opsize
, addr
, val
);
261 return gen_load(s
, opsize
, addr
, what
== EA_LOADS
);
265 /* Read a 32-bit immediate constant. */
266 static inline uint32_t read_im32(DisasContext
*s
)
269 im
= ((uint32_t)lduw_code(s
->pc
)) << 16;
271 im
|= lduw_code(s
->pc
);
276 /* Calculate and address index. */
277 static TCGv
gen_addr_index(uint16_t ext
, TCGv tmp
)
282 add
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(ext
, 12);
283 if ((ext
& 0x800) == 0) {
284 tcg_gen_ext16s_i32(tmp
, add
);
287 scale
= (ext
>> 9) & 3;
289 tcg_gen_shli_i32(tmp
, add
, scale
);
295 /* Handle a base + index + displacement effective addresss.
296 A NULL_QREG base means pc-relative. */
297 static TCGv
gen_lea_indexed(DisasContext
*s
, int opsize
, TCGv base
)
306 ext
= lduw_code(s
->pc
);
309 if ((ext
& 0x800) == 0 && !m68k_feature(s
->env
, M68K_FEATURE_WORD_INDEX
))
313 /* full extension word format */
314 if (!m68k_feature(s
->env
, M68K_FEATURE_EXT_FULL
))
317 if ((ext
& 0x30) > 0x10) {
318 /* base displacement */
319 if ((ext
& 0x30) == 0x20) {
320 bd
= (int16_t)lduw_code(s
->pc
);
328 tmp
= tcg_temp_new();
329 if ((ext
& 0x44) == 0) {
331 add
= gen_addr_index(ext
, tmp
);
335 if ((ext
& 0x80) == 0) {
336 /* base not suppressed */
337 if (IS_NULL_QREG(base
)) {
338 base
= tcg_const_i32(offset
+ bd
);
341 if (!IS_NULL_QREG(add
)) {
342 tcg_gen_add_i32(tmp
, add
, base
);
348 if (!IS_NULL_QREG(add
)) {
350 tcg_gen_addi_i32(tmp
, add
, bd
);
354 add
= tcg_const_i32(bd
);
356 if ((ext
& 3) != 0) {
357 /* memory indirect */
358 base
= gen_load(s
, OS_LONG
, add
, 0);
359 if ((ext
& 0x44) == 4) {
360 add
= gen_addr_index(ext
, tmp
);
361 tcg_gen_add_i32(tmp
, add
, base
);
367 /* outer displacement */
368 if ((ext
& 3) == 2) {
369 od
= (int16_t)lduw_code(s
->pc
);
378 tcg_gen_addi_i32(tmp
, add
, od
);
383 /* brief extension word format */
384 tmp
= tcg_temp_new();
385 add
= gen_addr_index(ext
, tmp
);
386 if (!IS_NULL_QREG(base
)) {
387 tcg_gen_add_i32(tmp
, add
, base
);
389 tcg_gen_addi_i32(tmp
, tmp
, (int8_t)ext
);
391 tcg_gen_addi_i32(tmp
, add
, offset
+ (int8_t)ext
);
398 /* Update the CPU env CC_OP state. */
399 static inline void gen_flush_cc_op(DisasContext
*s
)
401 if (s
->cc_op
!= CC_OP_DYNAMIC
)
402 tcg_gen_movi_i32(QREG_CC_OP
, s
->cc_op
);
405 /* Evaluate all the CC flags. */
406 static inline void gen_flush_flags(DisasContext
*s
)
408 if (s
->cc_op
== CC_OP_FLAGS
)
411 gen_helper_flush_flags(cpu_env
, QREG_CC_OP
);
412 s
->cc_op
= CC_OP_FLAGS
;
415 static void gen_logic_cc(DisasContext
*s
, TCGv val
)
417 tcg_gen_mov_i32(QREG_CC_DEST
, val
);
418 s
->cc_op
= CC_OP_LOGIC
;
421 static void gen_update_cc_add(TCGv dest
, TCGv src
)
423 tcg_gen_mov_i32(QREG_CC_DEST
, dest
);
424 tcg_gen_mov_i32(QREG_CC_SRC
, src
);
427 static inline int opsize_bytes(int opsize
)
430 case OS_BYTE
: return 1;
431 case OS_WORD
: return 2;
432 case OS_LONG
: return 4;
433 case OS_SINGLE
: return 4;
434 case OS_DOUBLE
: return 8;
436 qemu_assert(0, "bad operand size");
441 /* Assign value to a register. If the width is less than the register width
442 only the low part of the register is set. */
443 static void gen_partset_reg(int opsize
, TCGv reg
, TCGv val
)
448 tcg_gen_andi_i32(reg
, reg
, 0xffffff00);
449 tmp
= tcg_temp_new();
450 tcg_gen_ext8u_i32(tmp
, val
);
451 tcg_gen_or_i32(reg
, reg
, tmp
);
454 tcg_gen_andi_i32(reg
, reg
, 0xffff0000);
455 tmp
= tcg_temp_new();
456 tcg_gen_ext16u_i32(tmp
, val
);
457 tcg_gen_or_i32(reg
, reg
, tmp
);
461 tcg_gen_mov_i32(reg
, val
);
464 qemu_assert(0, "Bad operand size");
469 /* Sign or zero extend a value. */
470 static inline TCGv
gen_extend(TCGv val
, int opsize
, int sign
)
476 tmp
= tcg_temp_new();
478 tcg_gen_ext8s_i32(tmp
, val
);
480 tcg_gen_ext8u_i32(tmp
, val
);
483 tmp
= tcg_temp_new();
485 tcg_gen_ext16s_i32(tmp
, val
);
487 tcg_gen_ext16u_i32(tmp
, val
);
494 qemu_assert(0, "Bad operand size");
499 /* Generate code for an "effective address". Does not adjust the base
500 register for autoincrement addressing modes. */
501 static TCGv
gen_lea(DisasContext
*s
, uint16_t insn
, int opsize
)
508 switch ((insn
>> 3) & 7) {
509 case 0: /* Data register direct. */
510 case 1: /* Address register direct. */
512 case 2: /* Indirect register */
513 case 3: /* Indirect postincrement. */
514 return AREG(insn
, 0);
515 case 4: /* Indirect predecrememnt. */
517 tmp
= tcg_temp_new();
518 tcg_gen_subi_i32(tmp
, reg
, opsize_bytes(opsize
));
520 case 5: /* Indirect displacement. */
522 tmp
= tcg_temp_new();
523 ext
= lduw_code(s
->pc
);
525 tcg_gen_addi_i32(tmp
, reg
, (int16_t)ext
);
527 case 6: /* Indirect index + displacement. */
529 return gen_lea_indexed(s
, opsize
, reg
);
532 case 0: /* Absolute short. */
533 offset
= ldsw_code(s
->pc
);
535 return tcg_const_i32(offset
);
536 case 1: /* Absolute long. */
537 offset
= read_im32(s
);
538 return tcg_const_i32(offset
);
539 case 2: /* pc displacement */
541 offset
+= ldsw_code(s
->pc
);
543 return tcg_const_i32(offset
);
544 case 3: /* pc index+displacement. */
545 return gen_lea_indexed(s
, opsize
, NULL_QREG
);
546 case 4: /* Immediate. */
551 /* Should never happen. */
555 /* Helper function for gen_ea. Reuse the computed address between the
556 for read/write operands. */
557 static inline TCGv
gen_ea_once(DisasContext
*s
, uint16_t insn
, int opsize
,
558 TCGv val
, TCGv
*addrp
, ea_what what
)
562 if (addrp
&& what
== EA_STORE
) {
565 tmp
= gen_lea(s
, insn
, opsize
);
566 if (IS_NULL_QREG(tmp
))
571 return gen_ldst(s
, opsize
, tmp
, val
, what
);
574 /* Generate code to load/store a value ito/from an EA. If VAL > 0 this is
575 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
576 ADDRP is non-null for readwrite operands. */
577 static TCGv
gen_ea(DisasContext
*s
, uint16_t insn
, int opsize
, TCGv val
,
578 TCGv
*addrp
, ea_what what
)
584 switch ((insn
>> 3) & 7) {
585 case 0: /* Data register direct. */
587 if (what
== EA_STORE
) {
588 gen_partset_reg(opsize
, reg
, val
);
591 return gen_extend(reg
, opsize
, what
== EA_LOADS
);
593 case 1: /* Address register direct. */
595 if (what
== EA_STORE
) {
596 tcg_gen_mov_i32(reg
, val
);
599 return gen_extend(reg
, opsize
, what
== EA_LOADS
);
601 case 2: /* Indirect register */
603 return gen_ldst(s
, opsize
, reg
, val
, what
);
604 case 3: /* Indirect postincrement. */
606 result
= gen_ldst(s
, opsize
, reg
, val
, what
);
607 /* ??? This is not exception safe. The instruction may still
608 fault after this point. */
609 if (what
== EA_STORE
|| !addrp
)
610 tcg_gen_addi_i32(reg
, reg
, opsize_bytes(opsize
));
612 case 4: /* Indirect predecrememnt. */
615 if (addrp
&& what
== EA_STORE
) {
618 tmp
= gen_lea(s
, insn
, opsize
);
619 if (IS_NULL_QREG(tmp
))
624 result
= gen_ldst(s
, opsize
, tmp
, val
, what
);
625 /* ??? This is not exception safe. The instruction may still
626 fault after this point. */
627 if (what
== EA_STORE
|| !addrp
) {
629 tcg_gen_mov_i32(reg
, tmp
);
633 case 5: /* Indirect displacement. */
634 case 6: /* Indirect index + displacement. */
635 return gen_ea_once(s
, insn
, opsize
, val
, addrp
, what
);
638 case 0: /* Absolute short. */
639 case 1: /* Absolute long. */
640 case 2: /* pc displacement */
641 case 3: /* pc index+displacement. */
642 return gen_ea_once(s
, insn
, opsize
, val
, addrp
, what
);
643 case 4: /* Immediate. */
644 /* Sign extend values for consistency. */
647 if (what
== EA_LOADS
)
648 offset
= ldsb_code(s
->pc
+ 1);
650 offset
= ldub_code(s
->pc
+ 1);
654 if (what
== EA_LOADS
)
655 offset
= ldsw_code(s
->pc
);
657 offset
= lduw_code(s
->pc
);
661 offset
= read_im32(s
);
664 qemu_assert(0, "Bad immediate operand");
666 return tcg_const_i32(offset
);
671 /* Should never happen. */
675 /* This generates a conditional branch, clobbering all temporaries. */
676 static void gen_jmpcc(DisasContext
*s
, int cond
, int l1
)
680 /* TODO: Optimize compare/branch pairs rather than always flushing
681 flag state to CC_OP_FLAGS. */
689 case 2: /* HI (!C && !Z) */
690 tmp
= tcg_temp_new();
691 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
| CCF_Z
);
692 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
694 case 3: /* LS (C || Z) */
695 tmp
= tcg_temp_new();
696 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
| CCF_Z
);
697 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
699 case 4: /* CC (!C) */
700 tmp
= tcg_temp_new();
701 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
);
702 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
705 tmp
= tcg_temp_new();
706 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
);
707 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
709 case 6: /* NE (!Z) */
710 tmp
= tcg_temp_new();
711 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_Z
);
712 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
715 tmp
= tcg_temp_new();
716 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_Z
);
717 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
719 case 8: /* VC (!V) */
720 tmp
= tcg_temp_new();
721 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_V
);
722 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
725 tmp
= tcg_temp_new();
726 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_V
);
727 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
729 case 10: /* PL (!N) */
730 tmp
= tcg_temp_new();
731 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
732 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
734 case 11: /* MI (N) */
735 tmp
= tcg_temp_new();
736 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
737 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
739 case 12: /* GE (!(N ^ V)) */
740 tmp
= tcg_temp_new();
741 assert(CCF_V
== (CCF_N
>> 2));
742 tcg_gen_shri_i32(tmp
, QREG_CC_DEST
, 2);
743 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
744 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
);
745 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
747 case 13: /* LT (N ^ V) */
748 tmp
= tcg_temp_new();
749 assert(CCF_V
== (CCF_N
>> 2));
750 tcg_gen_shri_i32(tmp
, QREG_CC_DEST
, 2);
751 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
752 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
);
753 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
755 case 14: /* GT (!(Z || (N ^ V))) */
756 tmp
= tcg_temp_new();
757 assert(CCF_V
== (CCF_N
>> 2));
758 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
759 tcg_gen_shri_i32(tmp
, tmp
, 2);
760 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
761 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
| CCF_Z
);
762 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
764 case 15: /* LE (Z || (N ^ V)) */
765 tmp
= tcg_temp_new();
766 assert(CCF_V
== (CCF_N
>> 2));
767 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
768 tcg_gen_shri_i32(tmp
, tmp
, 2);
769 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
770 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
| CCF_Z
);
771 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
774 /* Should ever happen. */
785 l1
= gen_new_label();
786 cond
= (insn
>> 8) & 0xf;
788 tcg_gen_andi_i32(reg
, reg
, 0xffffff00);
789 /* This is safe because we modify the reg directly, with no other values
791 gen_jmpcc(s
, cond
^ 1, l1
);
792 tcg_gen_ori_i32(reg
, reg
, 0xff);
796 /* Force a TB lookup after an instruction that changes the CPU state. */
797 static void gen_lookup_tb(DisasContext
*s
)
800 tcg_gen_movi_i32(QREG_PC
, s
->pc
);
801 s
->is_jmp
= DISAS_UPDATE
;
804 /* Generate a jump to an immediate address. */
805 static void gen_jmp_im(DisasContext
*s
, uint32_t dest
)
808 tcg_gen_movi_i32(QREG_PC
, dest
);
809 s
->is_jmp
= DISAS_JUMP
;
812 /* Generate a jump to the address in qreg DEST. */
813 static void gen_jmp(DisasContext
*s
, TCGv dest
)
816 tcg_gen_mov_i32(QREG_PC
, dest
);
817 s
->is_jmp
= DISAS_JUMP
;
820 static void gen_exception(DisasContext
*s
, uint32_t where
, int nr
)
823 gen_jmp_im(s
, where
);
824 gen_helper_raise_exception(tcg_const_i32(nr
));
827 static inline void gen_addr_fault(DisasContext
*s
)
829 gen_exception(s
, s
->insn_pc
, EXCP_ADDRESS
);
832 #define SRC_EA(result, opsize, op_sign, addrp) do { \
833 result = gen_ea(s, insn, opsize, NULL_QREG, addrp, op_sign ? EA_LOADS : EA_LOADU); \
834 if (IS_NULL_QREG(result)) { \
840 #define DEST_EA(insn, opsize, val, addrp) do { \
841 TCGv ea_result = gen_ea(s, insn, opsize, val, addrp, EA_STORE); \
842 if (IS_NULL_QREG(ea_result)) { \
848 /* Generate a jump to an immediate address. */
849 static void gen_jmp_tb(DisasContext
*s
, int n
, uint32_t dest
)
851 TranslationBlock
*tb
;
854 if (unlikely(s
->singlestep_enabled
)) {
855 gen_exception(s
, dest
, EXCP_DEBUG
);
856 } else if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
857 (s
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
859 tcg_gen_movi_i32(QREG_PC
, dest
);
860 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
865 s
->is_jmp
= DISAS_TB_JUMP
;
868 DISAS_INSN(undef_mac
)
870 gen_exception(s
, s
->pc
- 2, EXCP_LINEA
);
873 DISAS_INSN(undef_fpu
)
875 gen_exception(s
, s
->pc
- 2, EXCP_LINEF
);
880 gen_exception(s
, s
->pc
- 2, EXCP_UNSUPPORTED
);
881 cpu_abort(cpu_single_env
, "Illegal instruction: %04x @ %08x",
892 sign
= (insn
& 0x100) != 0;
894 tmp
= tcg_temp_new();
896 tcg_gen_ext16s_i32(tmp
, reg
);
898 tcg_gen_ext16u_i32(tmp
, reg
);
899 SRC_EA(src
, OS_WORD
, sign
, NULL
);
900 tcg_gen_mul_i32(tmp
, tmp
, src
);
901 tcg_gen_mov_i32(reg
, tmp
);
902 /* Unlike m68k, coldfire always clears the overflow bit. */
903 gen_logic_cc(s
, tmp
);
913 sign
= (insn
& 0x100) != 0;
916 tcg_gen_ext16s_i32(QREG_DIV1
, reg
);
918 tcg_gen_ext16u_i32(QREG_DIV1
, reg
);
920 SRC_EA(src
, OS_WORD
, sign
, NULL
);
921 tcg_gen_mov_i32(QREG_DIV2
, src
);
923 gen_helper_divs(cpu_env
, tcg_const_i32(1));
925 gen_helper_divu(cpu_env
, tcg_const_i32(1));
928 tmp
= tcg_temp_new();
929 src
= tcg_temp_new();
930 tcg_gen_ext16u_i32(tmp
, QREG_DIV1
);
931 tcg_gen_shli_i32(src
, QREG_DIV2
, 16);
932 tcg_gen_or_i32(reg
, tmp
, src
);
933 s
->cc_op
= CC_OP_FLAGS
;
943 ext
= lduw_code(s
->pc
);
946 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
951 tcg_gen_mov_i32(QREG_DIV1
, num
);
952 SRC_EA(den
, OS_LONG
, 0, NULL
);
953 tcg_gen_mov_i32(QREG_DIV2
, den
);
955 gen_helper_divs(cpu_env
, tcg_const_i32(0));
957 gen_helper_divu(cpu_env
, tcg_const_i32(0));
959 if ((ext
& 7) == ((ext
>> 12) & 7)) {
961 tcg_gen_mov_i32 (reg
, QREG_DIV1
);
964 tcg_gen_mov_i32 (reg
, QREG_DIV2
);
966 s
->cc_op
= CC_OP_FLAGS
;
978 add
= (insn
& 0x4000) != 0;
980 dest
= tcg_temp_new();
982 SRC_EA(tmp
, OS_LONG
, 0, &addr
);
986 SRC_EA(src
, OS_LONG
, 0, NULL
);
989 tcg_gen_add_i32(dest
, tmp
, src
);
990 gen_helper_xflag_lt(QREG_CC_X
, dest
, src
);
991 s
->cc_op
= CC_OP_ADD
;
993 gen_helper_xflag_lt(QREG_CC_X
, tmp
, src
);
994 tcg_gen_sub_i32(dest
, tmp
, src
);
995 s
->cc_op
= CC_OP_SUB
;
997 gen_update_cc_add(dest
, src
);
999 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1001 tcg_gen_mov_i32(reg
, dest
);
1006 /* Reverse the order of the bits in REG. */
1010 reg
= DREG(insn
, 0);
1011 gen_helper_bitrev(reg
, reg
);
1014 DISAS_INSN(bitop_reg
)
1024 if ((insn
& 0x38) != 0)
1028 op
= (insn
>> 6) & 3;
1029 SRC_EA(src1
, opsize
, 0, op
? &addr
: NULL
);
1030 src2
= DREG(insn
, 9);
1031 dest
= tcg_temp_new();
1034 tmp
= tcg_temp_new();
1035 if (opsize
== OS_BYTE
)
1036 tcg_gen_andi_i32(tmp
, src2
, 7);
1038 tcg_gen_andi_i32(tmp
, src2
, 31);
1040 tmp
= tcg_temp_new();
1041 tcg_gen_shr_i32(tmp
, src1
, src2
);
1042 tcg_gen_andi_i32(tmp
, tmp
, 1);
1043 tcg_gen_shli_i32(tmp
, tmp
, 2);
1044 /* Clear CCF_Z if bit set. */
1045 tcg_gen_ori_i32(QREG_CC_DEST
, QREG_CC_DEST
, CCF_Z
);
1046 tcg_gen_xor_i32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1048 tcg_gen_shl_i32(tmp
, tcg_const_i32(1), src2
);
1051 tcg_gen_xor_i32(dest
, src1
, tmp
);
1054 tcg_gen_not_i32(tmp
, tmp
);
1055 tcg_gen_and_i32(dest
, src1
, tmp
);
1058 tcg_gen_or_i32(dest
, src1
, tmp
);
1064 DEST_EA(insn
, opsize
, dest
, &addr
);
1070 reg
= DREG(insn
, 0);
1072 gen_helper_sats(reg
, reg
, QREG_CC_DEST
);
1073 gen_logic_cc(s
, reg
);
1076 static void gen_push(DisasContext
*s
, TCGv val
)
1080 tmp
= tcg_temp_new();
1081 tcg_gen_subi_i32(tmp
, QREG_SP
, 4);
1082 gen_store(s
, OS_LONG
, tmp
, val
);
1083 tcg_gen_mov_i32(QREG_SP
, tmp
);
1095 mask
= lduw_code(s
->pc
);
1097 tmp
= gen_lea(s
, insn
, OS_LONG
);
1098 if (IS_NULL_QREG(tmp
)) {
1102 addr
= tcg_temp_new();
1103 tcg_gen_mov_i32(addr
, tmp
);
1104 is_load
= ((insn
& 0x0400) != 0);
1105 for (i
= 0; i
< 16; i
++, mask
>>= 1) {
1112 tmp
= gen_load(s
, OS_LONG
, addr
, 0);
1113 tcg_gen_mov_i32(reg
, tmp
);
1115 gen_store(s
, OS_LONG
, addr
, reg
);
1118 tcg_gen_addi_i32(addr
, addr
, 4);
1123 DISAS_INSN(bitop_im
)
1133 if ((insn
& 0x38) != 0)
1137 op
= (insn
>> 6) & 3;
1139 bitnum
= lduw_code(s
->pc
);
1141 if (bitnum
& 0xff00) {
1142 disas_undef(s
, insn
);
1146 SRC_EA(src1
, opsize
, 0, op
? &addr
: NULL
);
1149 if (opsize
== OS_BYTE
)
1155 tmp
= tcg_temp_new();
1156 assert (CCF_Z
== (1 << 2));
1158 tcg_gen_shri_i32(tmp
, src1
, bitnum
- 2);
1159 else if (bitnum
< 2)
1160 tcg_gen_shli_i32(tmp
, src1
, 2 - bitnum
);
1162 tcg_gen_mov_i32(tmp
, src1
);
1163 tcg_gen_andi_i32(tmp
, tmp
, CCF_Z
);
1164 /* Clear CCF_Z if bit set. */
1165 tcg_gen_ori_i32(QREG_CC_DEST
, QREG_CC_DEST
, CCF_Z
);
1166 tcg_gen_xor_i32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1170 tcg_gen_xori_i32(tmp
, src1
, mask
);
1173 tcg_gen_andi_i32(tmp
, src1
, ~mask
);
1176 tcg_gen_ori_i32(tmp
, src1
, mask
);
1181 DEST_EA(insn
, opsize
, tmp
, &addr
);
1185 DISAS_INSN(arith_im
)
1193 op
= (insn
>> 9) & 7;
1194 SRC_EA(src1
, OS_LONG
, 0, (op
== 6) ? NULL
: &addr
);
1196 dest
= tcg_temp_new();
1199 tcg_gen_ori_i32(dest
, src1
, im
);
1200 gen_logic_cc(s
, dest
);
1203 tcg_gen_andi_i32(dest
, src1
, im
);
1204 gen_logic_cc(s
, dest
);
1207 tcg_gen_mov_i32(dest
, src1
);
1208 gen_helper_xflag_lt(QREG_CC_X
, dest
, tcg_const_i32(im
));
1209 tcg_gen_subi_i32(dest
, dest
, im
);
1210 gen_update_cc_add(dest
, tcg_const_i32(im
));
1211 s
->cc_op
= CC_OP_SUB
;
1214 tcg_gen_mov_i32(dest
, src1
);
1215 tcg_gen_addi_i32(dest
, dest
, im
);
1216 gen_update_cc_add(dest
, tcg_const_i32(im
));
1217 gen_helper_xflag_lt(QREG_CC_X
, dest
, tcg_const_i32(im
));
1218 s
->cc_op
= CC_OP_ADD
;
1221 tcg_gen_xori_i32(dest
, src1
, im
);
1222 gen_logic_cc(s
, dest
);
1225 tcg_gen_mov_i32(dest
, src1
);
1226 tcg_gen_subi_i32(dest
, dest
, im
);
1227 gen_update_cc_add(dest
, tcg_const_i32(im
));
1228 s
->cc_op
= CC_OP_SUB
;
1234 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1242 reg
= DREG(insn
, 0);
1243 tcg_gen_bswap32_i32(reg
, reg
);
1253 switch (insn
>> 12) {
1254 case 1: /* move.b */
1257 case 2: /* move.l */
1260 case 3: /* move.w */
1266 SRC_EA(src
, opsize
, 1, NULL
);
1267 op
= (insn
>> 6) & 7;
1270 /* The value will already have been sign extended. */
1271 dest
= AREG(insn
, 9);
1272 tcg_gen_mov_i32(dest
, src
);
1276 dest_ea
= ((insn
>> 9) & 7) | (op
<< 3);
1277 DEST_EA(dest_ea
, opsize
, src
, NULL
);
1278 /* This will be correct because loads sign extend. */
1279 gen_logic_cc(s
, src
);
1288 reg
= DREG(insn
, 0);
1289 gen_helper_subx_cc(reg
, cpu_env
, tcg_const_i32(0), reg
);
1297 reg
= AREG(insn
, 9);
1298 tmp
= gen_lea(s
, insn
, OS_LONG
);
1299 if (IS_NULL_QREG(tmp
)) {
1303 tcg_gen_mov_i32(reg
, tmp
);
1310 switch ((insn
>> 6) & 3) {
1323 DEST_EA(insn
, opsize
, tcg_const_i32(0), NULL
);
1324 gen_logic_cc(s
, tcg_const_i32(0));
1327 static TCGv
gen_get_ccr(DisasContext
*s
)
1332 dest
= tcg_temp_new();
1333 tcg_gen_shli_i32(dest
, QREG_CC_X
, 4);
1334 tcg_gen_or_i32(dest
, dest
, QREG_CC_DEST
);
1338 DISAS_INSN(move_from_ccr
)
1343 ccr
= gen_get_ccr(s
);
1344 reg
= DREG(insn
, 0);
1345 gen_partset_reg(OS_WORD
, reg
, ccr
);
1353 reg
= DREG(insn
, 0);
1354 src1
= tcg_temp_new();
1355 tcg_gen_mov_i32(src1
, reg
);
1356 tcg_gen_neg_i32(reg
, src1
);
1357 s
->cc_op
= CC_OP_SUB
;
1358 gen_update_cc_add(reg
, src1
);
1359 gen_helper_xflag_lt(QREG_CC_X
, tcg_const_i32(0), src1
);
1360 s
->cc_op
= CC_OP_SUB
;
1363 static void gen_set_sr_im(DisasContext
*s
, uint16_t val
, int ccr_only
)
1365 tcg_gen_movi_i32(QREG_CC_DEST
, val
& 0xf);
1366 tcg_gen_movi_i32(QREG_CC_X
, (val
& 0x10) >> 4);
1368 gen_helper_set_sr(cpu_env
, tcg_const_i32(val
& 0xff00));
1372 static void gen_set_sr(DisasContext
*s
, uint16_t insn
, int ccr_only
)
1377 s
->cc_op
= CC_OP_FLAGS
;
1378 if ((insn
& 0x38) == 0)
1380 tmp
= tcg_temp_new();
1381 reg
= DREG(insn
, 0);
1382 tcg_gen_andi_i32(QREG_CC_DEST
, reg
, 0xf);
1383 tcg_gen_shri_i32(tmp
, reg
, 4);
1384 tcg_gen_andi_i32(QREG_CC_X
, tmp
, 1);
1386 gen_helper_set_sr(cpu_env
, reg
);
1389 else if ((insn
& 0x3f) == 0x3c)
1392 val
= lduw_code(s
->pc
);
1394 gen_set_sr_im(s
, val
, ccr_only
);
1397 disas_undef(s
, insn
);
1400 DISAS_INSN(move_to_ccr
)
1402 gen_set_sr(s
, insn
, 1);
1409 reg
= DREG(insn
, 0);
1410 tcg_gen_not_i32(reg
, reg
);
1411 gen_logic_cc(s
, reg
);
1420 src1
= tcg_temp_new();
1421 src2
= tcg_temp_new();
1422 reg
= DREG(insn
, 0);
1423 tcg_gen_shli_i32(src1
, reg
, 16);
1424 tcg_gen_shri_i32(src2
, reg
, 16);
1425 tcg_gen_or_i32(reg
, src1
, src2
);
1426 gen_logic_cc(s
, reg
);
1433 tmp
= gen_lea(s
, insn
, OS_LONG
);
1434 if (IS_NULL_QREG(tmp
)) {
1447 reg
= DREG(insn
, 0);
1448 op
= (insn
>> 6) & 7;
1449 tmp
= tcg_temp_new();
1451 tcg_gen_ext16s_i32(tmp
, reg
);
1453 tcg_gen_ext8s_i32(tmp
, reg
);
1455 gen_partset_reg(OS_WORD
, reg
, tmp
);
1457 tcg_gen_mov_i32(reg
, tmp
);
1458 gen_logic_cc(s
, tmp
);
1466 switch ((insn
>> 6) & 3) {
1479 SRC_EA(tmp
, opsize
, 1, NULL
);
1480 gen_logic_cc(s
, tmp
);
1485 /* Implemented as a NOP. */
1490 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1493 /* ??? This should be atomic. */
1500 dest
= tcg_temp_new();
1501 SRC_EA(src1
, OS_BYTE
, 1, &addr
);
1502 gen_logic_cc(s
, src1
);
1503 tcg_gen_ori_i32(dest
, src1
, 0x80);
1504 DEST_EA(insn
, OS_BYTE
, dest
, &addr
);
1514 /* The upper 32 bits of the product are discarded, so
1515 muls.l and mulu.l are functionally equivalent. */
1516 ext
= lduw_code(s
->pc
);
1519 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
1522 reg
= DREG(ext
, 12);
1523 SRC_EA(src1
, OS_LONG
, 0, NULL
);
1524 dest
= tcg_temp_new();
1525 tcg_gen_mul_i32(dest
, src1
, reg
);
1526 tcg_gen_mov_i32(reg
, dest
);
1527 /* Unlike m68k, coldfire always clears the overflow bit. */
1528 gen_logic_cc(s
, dest
);
1537 offset
= ldsw_code(s
->pc
);
1539 reg
= AREG(insn
, 0);
1540 tmp
= tcg_temp_new();
1541 tcg_gen_subi_i32(tmp
, QREG_SP
, 4);
1542 gen_store(s
, OS_LONG
, tmp
, reg
);
1543 if ((insn
& 7) != 7)
1544 tcg_gen_mov_i32(reg
, tmp
);
1545 tcg_gen_addi_i32(QREG_SP
, tmp
, offset
);
1554 src
= tcg_temp_new();
1555 reg
= AREG(insn
, 0);
1556 tcg_gen_mov_i32(src
, reg
);
1557 tmp
= gen_load(s
, OS_LONG
, src
, 0);
1558 tcg_gen_mov_i32(reg
, tmp
);
1559 tcg_gen_addi_i32(QREG_SP
, src
, 4);
1570 tmp
= gen_load(s
, OS_LONG
, QREG_SP
, 0);
1571 tcg_gen_addi_i32(QREG_SP
, QREG_SP
, 4);
1579 /* Load the target address first to ensure correct exception
1581 tmp
= gen_lea(s
, insn
, OS_LONG
);
1582 if (IS_NULL_QREG(tmp
)) {
1586 if ((insn
& 0x40) == 0) {
1588 gen_push(s
, tcg_const_i32(s
->pc
));
1601 SRC_EA(src1
, OS_LONG
, 0, &addr
);
1602 val
= (insn
>> 9) & 7;
1605 dest
= tcg_temp_new();
1606 tcg_gen_mov_i32(dest
, src1
);
1607 if ((insn
& 0x38) == 0x08) {
1608 /* Don't update condition codes if the destination is an
1609 address register. */
1610 if (insn
& 0x0100) {
1611 tcg_gen_subi_i32(dest
, dest
, val
);
1613 tcg_gen_addi_i32(dest
, dest
, val
);
1616 src2
= tcg_const_i32(val
);
1617 if (insn
& 0x0100) {
1618 gen_helper_xflag_lt(QREG_CC_X
, dest
, src2
);
1619 tcg_gen_subi_i32(dest
, dest
, val
);
1620 s
->cc_op
= CC_OP_SUB
;
1622 tcg_gen_addi_i32(dest
, dest
, val
);
1623 gen_helper_xflag_lt(QREG_CC_X
, dest
, src2
);
1624 s
->cc_op
= CC_OP_ADD
;
1626 gen_update_cc_add(dest
, src2
);
1628 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1634 case 2: /* One extension word. */
1637 case 3: /* Two extension words. */
1640 case 4: /* No extension words. */
1643 disas_undef(s
, insn
);
1655 op
= (insn
>> 8) & 0xf;
1656 offset
= (int8_t)insn
;
1658 offset
= ldsw_code(s
->pc
);
1660 } else if (offset
== -1) {
1661 offset
= read_im32(s
);
1665 gen_push(s
, tcg_const_i32(s
->pc
));
1670 l1
= gen_new_label();
1671 gen_jmpcc(s
, ((insn
>> 8) & 0xf) ^ 1, l1
);
1672 gen_jmp_tb(s
, 1, base
+ offset
);
1674 gen_jmp_tb(s
, 0, s
->pc
);
1676 /* Unconditional branch. */
1677 gen_jmp_tb(s
, 0, base
+ offset
);
1686 tcg_gen_movi_i32(DREG(insn
, 9), val
);
1687 gen_logic_cc(s
, tcg_const_i32(val
));
1700 SRC_EA(src
, opsize
, (insn
& 0x80) == 0, NULL
);
1701 reg
= DREG(insn
, 9);
1702 tcg_gen_mov_i32(reg
, src
);
1703 gen_logic_cc(s
, src
);
1713 reg
= DREG(insn
, 9);
1714 dest
= tcg_temp_new();
1716 SRC_EA(src
, OS_LONG
, 0, &addr
);
1717 tcg_gen_or_i32(dest
, src
, reg
);
1718 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1720 SRC_EA(src
, OS_LONG
, 0, NULL
);
1721 tcg_gen_or_i32(dest
, src
, reg
);
1722 tcg_gen_mov_i32(reg
, dest
);
1724 gen_logic_cc(s
, dest
);
1732 SRC_EA(src
, OS_LONG
, 0, NULL
);
1733 reg
= AREG(insn
, 9);
1734 tcg_gen_sub_i32(reg
, reg
, src
);
1743 reg
= DREG(insn
, 9);
1744 src
= DREG(insn
, 0);
1745 gen_helper_subx_cc(reg
, cpu_env
, reg
, src
);
1753 val
= (insn
>> 9) & 7;
1756 src
= tcg_const_i32(val
);
1757 gen_logic_cc(s
, src
);
1758 DEST_EA(insn
, OS_LONG
, src
, NULL
);
1769 op
= (insn
>> 6) & 3;
1773 s
->cc_op
= CC_OP_CMPB
;
1777 s
->cc_op
= CC_OP_CMPW
;
1781 s
->cc_op
= CC_OP_SUB
;
1786 SRC_EA(src
, opsize
, 1, NULL
);
1787 reg
= DREG(insn
, 9);
1788 dest
= tcg_temp_new();
1789 tcg_gen_sub_i32(dest
, reg
, src
);
1790 gen_update_cc_add(dest
, src
);
1805 SRC_EA(src
, opsize
, 1, NULL
);
1806 reg
= AREG(insn
, 9);
1807 dest
= tcg_temp_new();
1808 tcg_gen_sub_i32(dest
, reg
, src
);
1809 gen_update_cc_add(dest
, src
);
1810 s
->cc_op
= CC_OP_SUB
;
1820 SRC_EA(src
, OS_LONG
, 0, &addr
);
1821 reg
= DREG(insn
, 9);
1822 dest
= tcg_temp_new();
1823 tcg_gen_xor_i32(dest
, src
, reg
);
1824 gen_logic_cc(s
, dest
);
1825 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1835 reg
= DREG(insn
, 9);
1836 dest
= tcg_temp_new();
1838 SRC_EA(src
, OS_LONG
, 0, &addr
);
1839 tcg_gen_and_i32(dest
, src
, reg
);
1840 DEST_EA(insn
, OS_LONG
, dest
, &addr
);
1842 SRC_EA(src
, OS_LONG
, 0, NULL
);
1843 tcg_gen_and_i32(dest
, src
, reg
);
1844 tcg_gen_mov_i32(reg
, dest
);
1846 gen_logic_cc(s
, dest
);
1854 SRC_EA(src
, OS_LONG
, 0, NULL
);
1855 reg
= AREG(insn
, 9);
1856 tcg_gen_add_i32(reg
, reg
, src
);
1865 reg
= DREG(insn
, 9);
1866 src
= DREG(insn
, 0);
1867 gen_helper_addx_cc(reg
, cpu_env
, reg
, src
);
1868 s
->cc_op
= CC_OP_FLAGS
;
1871 /* TODO: This could be implemented without helper functions. */
1872 DISAS_INSN(shift_im
)
1878 reg
= DREG(insn
, 0);
1879 tmp
= (insn
>> 9) & 7;
1882 shift
= tcg_const_i32(tmp
);
1883 /* No need to flush flags becuse we know we will set C flag. */
1885 gen_helper_shl_cc(reg
, cpu_env
, reg
, shift
);
1888 gen_helper_shr_cc(reg
, cpu_env
, reg
, shift
);
1890 gen_helper_sar_cc(reg
, cpu_env
, reg
, shift
);
1893 s
->cc_op
= CC_OP_SHIFT
;
1896 DISAS_INSN(shift_reg
)
1901 reg
= DREG(insn
, 0);
1902 shift
= DREG(insn
, 9);
1903 /* Shift by zero leaves C flag unmodified. */
1906 gen_helper_shl_cc(reg
, cpu_env
, reg
, shift
);
1909 gen_helper_shr_cc(reg
, cpu_env
, reg
, shift
);
1911 gen_helper_sar_cc(reg
, cpu_env
, reg
, shift
);
1914 s
->cc_op
= CC_OP_SHIFT
;
1920 reg
= DREG(insn
, 0);
1921 gen_logic_cc(s
, reg
);
1922 gen_helper_ff1(reg
, reg
);
1925 static TCGv
gen_get_sr(DisasContext
*s
)
1930 ccr
= gen_get_ccr(s
);
1931 sr
= tcg_temp_new();
1932 tcg_gen_andi_i32(sr
, QREG_SR
, 0xffe0);
1933 tcg_gen_or_i32(sr
, sr
, ccr
);
1943 ext
= lduw_code(s
->pc
);
1945 if (ext
!= 0x46FC) {
1946 gen_exception(s
, addr
, EXCP_UNSUPPORTED
);
1949 ext
= lduw_code(s
->pc
);
1951 if (IS_USER(s
) || (ext
& SR_S
) == 0) {
1952 gen_exception(s
, addr
, EXCP_PRIVILEGE
);
1955 gen_push(s
, gen_get_sr(s
));
1956 gen_set_sr_im(s
, ext
, 0);
1959 DISAS_INSN(move_from_sr
)
1965 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1969 reg
= DREG(insn
, 0);
1970 gen_partset_reg(OS_WORD
, reg
, sr
);
1973 DISAS_INSN(move_to_sr
)
1976 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1979 gen_set_sr(s
, insn
, 0);
1983 DISAS_INSN(move_from_usp
)
1986 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1989 /* TODO: Implement USP. */
1990 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1993 DISAS_INSN(move_to_usp
)
1996 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1999 /* TODO: Implement USP. */
2000 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
2005 gen_exception(s
, s
->pc
, EXCP_HALT_INSN
);
2013 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2017 ext
= lduw_code(s
->pc
);
2020 gen_set_sr_im(s
, ext
, 0);
2021 tcg_gen_movi_i32(QREG_HALTED
, 1);
2022 gen_exception(s
, s
->pc
, EXCP_HLT
);
2028 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2031 gen_exception(s
, s
->pc
- 2, EXCP_RTE
);
2040 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2044 ext
= lduw_code(s
->pc
);
2048 reg
= AREG(ext
, 12);
2050 reg
= DREG(ext
, 12);
2052 gen_helper_movec(cpu_env
, tcg_const_i32(ext
& 0xfff), reg
);
2059 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2062 /* ICache fetch. Implement as no-op. */
2068 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2071 /* Cache push/invalidate. Implement as no-op. */
2076 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2082 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2085 /* TODO: Implement wdebug. */
2086 qemu_assert(0, "WDEBUG not implemented");
2091 gen_exception(s
, s
->pc
- 2, EXCP_TRAP0
+ (insn
& 0xf));
2094 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2095 immediately before the next FP instruction is executed. */
2109 ext
= lduw_code(s
->pc
);
2111 opmode
= ext
& 0x7f;
2112 switch ((ext
>> 13) & 7) {
2117 case 3: /* fmove out */
2119 tmp32
= tcg_temp_new_i32();
2121 /* ??? TODO: Proper behavior on overflow. */
2122 switch ((ext
>> 10) & 7) {
2125 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2129 gen_helper_f64_to_f32(tmp32
, cpu_env
, src
);
2133 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2135 case 5: /* OS_DOUBLE */
2136 tcg_gen_mov_i32(tmp32
, AREG(insn
, 0));
2137 switch ((insn
>> 3) & 7) {
2142 tcg_gen_addi_i32(tmp32
, tmp32
, -8);
2145 offset
= ldsw_code(s
->pc
);
2147 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2152 gen_store64(s
, tmp32
, src
);
2153 switch ((insn
>> 3) & 7) {
2155 tcg_gen_addi_i32(tmp32
, tmp32
, 8);
2156 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2159 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2162 tcg_temp_free_i32(tmp32
);
2166 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2171 DEST_EA(insn
, opsize
, tmp32
, NULL
);
2172 tcg_temp_free_i32(tmp32
);
2174 case 4: /* fmove to control register. */
2175 switch ((ext
>> 10) & 7) {
2177 /* Not implemented. Ignore writes. */
2182 cpu_abort(NULL
, "Unimplemented: fmove to control %d",
2186 case 5: /* fmove from control register. */
2187 switch ((ext
>> 10) & 7) {
2189 /* Not implemented. Always return zero. */
2190 tmp32
= tcg_const_i32(0);
2195 cpu_abort(NULL
, "Unimplemented: fmove from control %d",
2199 DEST_EA(insn
, OS_LONG
, tmp32
, NULL
);
2201 case 6: /* fmovem */
2207 if ((ext
& 0x1f00) != 0x1000 || (ext
& 0xff) == 0)
2209 tmp32
= gen_lea(s
, insn
, OS_LONG
);
2210 if (IS_NULL_QREG(tmp32
)) {
2214 addr
= tcg_temp_new_i32();
2215 tcg_gen_mov_i32(addr
, tmp32
);
2217 for (i
= 0; i
< 8; i
++) {
2221 if (ext
& (1 << 13)) {
2223 tcg_gen_qemu_stf64(dest
, addr
, IS_USER(s
));
2226 tcg_gen_qemu_ldf64(dest
, addr
, IS_USER(s
));
2228 if (ext
& (mask
- 1))
2229 tcg_gen_addi_i32(addr
, addr
, 8);
2233 tcg_temp_free_i32(addr
);
2237 if (ext
& (1 << 14)) {
2238 /* Source effective address. */
2239 switch ((ext
>> 10) & 7) {
2240 case 0: opsize
= OS_LONG
; break;
2241 case 1: opsize
= OS_SINGLE
; break;
2242 case 4: opsize
= OS_WORD
; break;
2243 case 5: opsize
= OS_DOUBLE
; break;
2244 case 6: opsize
= OS_BYTE
; break;
2248 if (opsize
== OS_DOUBLE
) {
2249 tmp32
= tcg_temp_new_i32();
2250 tcg_gen_mov_i32(tmp32
, AREG(insn
, 0));
2251 switch ((insn
>> 3) & 7) {
2256 tcg_gen_addi_i32(tmp32
, tmp32
, -8);
2259 offset
= ldsw_code(s
->pc
);
2261 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2264 offset
= ldsw_code(s
->pc
);
2265 offset
+= s
->pc
- 2;
2267 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2272 src
= gen_load64(s
, tmp32
);
2273 switch ((insn
>> 3) & 7) {
2275 tcg_gen_addi_i32(tmp32
, tmp32
, 8);
2276 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2279 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2282 tcg_temp_free_i32(tmp32
);
2284 SRC_EA(tmp32
, opsize
, 1, NULL
);
2285 src
= tcg_temp_new_i64();
2290 gen_helper_i32_to_f64(src
, cpu_env
, tmp32
);
2293 gen_helper_f32_to_f64(src
, cpu_env
, tmp32
);
2298 /* Source register. */
2299 src
= FREG(ext
, 10);
2301 dest
= FREG(ext
, 7);
2302 res
= tcg_temp_new_i64();
2304 tcg_gen_mov_f64(res
, dest
);
2308 case 0: case 0x40: case 0x44: /* fmove */
2309 tcg_gen_mov_f64(res
, src
);
2312 gen_helper_iround_f64(res
, cpu_env
, src
);
2315 case 3: /* fintrz */
2316 gen_helper_itrunc_f64(res
, cpu_env
, src
);
2319 case 4: case 0x41: case 0x45: /* fsqrt */
2320 gen_helper_sqrt_f64(res
, cpu_env
, src
);
2322 case 0x18: case 0x58: case 0x5c: /* fabs */
2323 gen_helper_abs_f64(res
, src
);
2325 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2326 gen_helper_chs_f64(res
, src
);
2328 case 0x20: case 0x60: case 0x64: /* fdiv */
2329 gen_helper_div_f64(res
, cpu_env
, res
, src
);
2331 case 0x22: case 0x62: case 0x66: /* fadd */
2332 gen_helper_add_f64(res
, cpu_env
, res
, src
);
2334 case 0x23: case 0x63: case 0x67: /* fmul */
2335 gen_helper_mul_f64(res
, cpu_env
, res
, src
);
2337 case 0x28: case 0x68: case 0x6c: /* fsub */
2338 gen_helper_sub_f64(res
, cpu_env
, res
, src
);
2340 case 0x38: /* fcmp */
2341 gen_helper_sub_cmp_f64(res
, cpu_env
, res
, src
);
2345 case 0x3a: /* ftst */
2346 tcg_gen_mov_f64(res
, src
);
2353 if (ext
& (1 << 14)) {
2354 tcg_temp_free_i64(src
);
2357 if (opmode
& 0x40) {
2358 if ((opmode
& 0x4) != 0)
2360 } else if ((s
->fpcr
& M68K_FPCR_PREC
) == 0) {
2365 TCGv tmp
= tcg_temp_new_i32();
2366 gen_helper_f64_to_f32(tmp
, cpu_env
, res
);
2367 gen_helper_f32_to_f64(res
, cpu_env
, tmp
);
2368 tcg_temp_free_i32(tmp
);
2370 tcg_gen_mov_f64(QREG_FP_RESULT
, res
);
2372 tcg_gen_mov_f64(dest
, res
);
2374 tcg_temp_free_i64(res
);
2377 /* FIXME: Is this right for offset addressing modes? */
2379 disas_undef_fpu(s
, insn
);
2390 offset
= ldsw_code(s
->pc
);
2392 if (insn
& (1 << 6)) {
2393 offset
= (offset
<< 16) | lduw_code(s
->pc
);
2397 l1
= gen_new_label();
2398 /* TODO: Raise BSUN exception. */
2399 flag
= tcg_temp_new();
2400 gen_helper_compare_f64(flag
, cpu_env
, QREG_FP_RESULT
);
2401 /* Jump to l1 if condition is true. */
2402 switch (insn
& 0xf) {
2405 case 1: /* eq (=0) */
2406 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(0), l1
);
2408 case 2: /* ogt (=1) */
2409 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(1), l1
);
2411 case 3: /* oge (=0 or =1) */
2412 tcg_gen_brcond_i32(TCG_COND_LEU
, flag
, tcg_const_i32(1), l1
);
2414 case 4: /* olt (=-1) */
2415 tcg_gen_brcond_i32(TCG_COND_LT
, flag
, tcg_const_i32(0), l1
);
2417 case 5: /* ole (=-1 or =0) */
2418 tcg_gen_brcond_i32(TCG_COND_LE
, flag
, tcg_const_i32(0), l1
);
2420 case 6: /* ogl (=-1 or =1) */
2421 tcg_gen_andi_i32(flag
, flag
, 1);
2422 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(0), l1
);
2424 case 7: /* or (=2) */
2425 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(2), l1
);
2427 case 8: /* un (<2) */
2428 tcg_gen_brcond_i32(TCG_COND_LT
, flag
, tcg_const_i32(2), l1
);
2430 case 9: /* ueq (=0 or =2) */
2431 tcg_gen_andi_i32(flag
, flag
, 1);
2432 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(0), l1
);
2434 case 10: /* ugt (>0) */
2435 tcg_gen_brcond_i32(TCG_COND_GT
, flag
, tcg_const_i32(0), l1
);
2437 case 11: /* uge (>=0) */
2438 tcg_gen_brcond_i32(TCG_COND_GE
, flag
, tcg_const_i32(0), l1
);
2440 case 12: /* ult (=-1 or =2) */
2441 tcg_gen_brcond_i32(TCG_COND_GEU
, flag
, tcg_const_i32(2), l1
);
2443 case 13: /* ule (!=1) */
2444 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(1), l1
);
2446 case 14: /* ne (!=0) */
2447 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(0), l1
);
2453 gen_jmp_tb(s
, 0, s
->pc
);
2455 gen_jmp_tb(s
, 1, addr
+ offset
);
2458 DISAS_INSN(frestore
)
2460 /* TODO: Implement frestore. */
2461 qemu_assert(0, "FRESTORE not implemented");
2466 /* TODO: Implement fsave. */
2467 qemu_assert(0, "FSAVE not implemented");
2470 static inline TCGv
gen_mac_extract_word(DisasContext
*s
, TCGv val
, int upper
)
2472 TCGv tmp
= tcg_temp_new();
2473 if (s
->env
->macsr
& MACSR_FI
) {
2475 tcg_gen_andi_i32(tmp
, val
, 0xffff0000);
2477 tcg_gen_shli_i32(tmp
, val
, 16);
2478 } else if (s
->env
->macsr
& MACSR_SU
) {
2480 tcg_gen_sari_i32(tmp
, val
, 16);
2482 tcg_gen_ext16s_i32(tmp
, val
);
2485 tcg_gen_shri_i32(tmp
, val
, 16);
2487 tcg_gen_ext16u_i32(tmp
, val
);
2492 static void gen_mac_clear_flags(void)
2494 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
,
2495 ~(MACSR_V
| MACSR_Z
| MACSR_N
| MACSR_EV
));
2511 s
->mactmp
= tcg_temp_new_i64();
2515 ext
= lduw_code(s
->pc
);
2518 acc
= ((insn
>> 7) & 1) | ((ext
>> 3) & 2);
2519 dual
= ((insn
& 0x30) != 0 && (ext
& 3) != 0);
2520 if (dual
&& !m68k_feature(s
->env
, M68K_FEATURE_CF_EMAC_B
)) {
2521 disas_undef(s
, insn
);
2525 /* MAC with load. */
2526 tmp
= gen_lea(s
, insn
, OS_LONG
);
2527 addr
= tcg_temp_new();
2528 tcg_gen_and_i32(addr
, tmp
, QREG_MAC_MASK
);
2529 /* Load the value now to ensure correct exception behavior.
2530 Perform writeback after reading the MAC inputs. */
2531 loadval
= gen_load(s
, OS_LONG
, addr
, 0);
2534 rx
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(insn
, 12);
2535 ry
= (ext
& 8) ? AREG(ext
, 0) : DREG(ext
, 0);
2537 loadval
= addr
= NULL_QREG
;
2538 rx
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2539 ry
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2542 gen_mac_clear_flags();
2545 /* Disabled because conditional branches clobber temporary vars. */
2546 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && !dual
) {
2547 /* Skip the multiply if we know we will ignore it. */
2548 l1
= gen_new_label();
2549 tmp
= tcg_temp_new();
2550 tcg_gen_andi_i32(tmp
, QREG_MACSR
, 1 << (acc
+ 8));
2551 gen_op_jmp_nz32(tmp
, l1
);
2555 if ((ext
& 0x0800) == 0) {
2557 rx
= gen_mac_extract_word(s
, rx
, (ext
& 0x80) != 0);
2558 ry
= gen_mac_extract_word(s
, ry
, (ext
& 0x40) != 0);
2560 if (s
->env
->macsr
& MACSR_FI
) {
2561 gen_helper_macmulf(s
->mactmp
, cpu_env
, rx
, ry
);
2563 if (s
->env
->macsr
& MACSR_SU
)
2564 gen_helper_macmuls(s
->mactmp
, cpu_env
, rx
, ry
);
2566 gen_helper_macmulu(s
->mactmp
, cpu_env
, rx
, ry
);
2567 switch ((ext
>> 9) & 3) {
2569 tcg_gen_shli_i64(s
->mactmp
, s
->mactmp
, 1);
2572 tcg_gen_shri_i64(s
->mactmp
, s
->mactmp
, 1);
2578 /* Save the overflow flag from the multiply. */
2579 saved_flags
= tcg_temp_new();
2580 tcg_gen_mov_i32(saved_flags
, QREG_MACSR
);
2582 saved_flags
= NULL_QREG
;
2586 /* Disabled because conditional branches clobber temporary vars. */
2587 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && dual
) {
2588 /* Skip the accumulate if the value is already saturated. */
2589 l1
= gen_new_label();
2590 tmp
= tcg_temp_new();
2591 gen_op_and32(tmp
, QREG_MACSR
, tcg_const_i32(MACSR_PAV0
<< acc
));
2592 gen_op_jmp_nz32(tmp
, l1
);
2597 tcg_gen_sub_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2599 tcg_gen_add_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2601 if (s
->env
->macsr
& MACSR_FI
)
2602 gen_helper_macsatf(cpu_env
, tcg_const_i32(acc
));
2603 else if (s
->env
->macsr
& MACSR_SU
)
2604 gen_helper_macsats(cpu_env
, tcg_const_i32(acc
));
2606 gen_helper_macsatu(cpu_env
, tcg_const_i32(acc
));
2609 /* Disabled because conditional branches clobber temporary vars. */
2615 /* Dual accumulate variant. */
2616 acc
= (ext
>> 2) & 3;
2617 /* Restore the overflow flag from the multiplier. */
2618 tcg_gen_mov_i32(QREG_MACSR
, saved_flags
);
2620 /* Disabled because conditional branches clobber temporary vars. */
2621 if ((s
->env
->macsr
& MACSR_OMC
) != 0) {
2622 /* Skip the accumulate if the value is already saturated. */
2623 l1
= gen_new_label();
2624 tmp
= tcg_temp_new();
2625 gen_op_and32(tmp
, QREG_MACSR
, tcg_const_i32(MACSR_PAV0
<< acc
));
2626 gen_op_jmp_nz32(tmp
, l1
);
2630 tcg_gen_sub_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2632 tcg_gen_add_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2633 if (s
->env
->macsr
& MACSR_FI
)
2634 gen_helper_macsatf(cpu_env
, tcg_const_i32(acc
));
2635 else if (s
->env
->macsr
& MACSR_SU
)
2636 gen_helper_macsats(cpu_env
, tcg_const_i32(acc
));
2638 gen_helper_macsatu(cpu_env
, tcg_const_i32(acc
));
2640 /* Disabled because conditional branches clobber temporary vars. */
2645 gen_helper_mac_set_flags(cpu_env
, tcg_const_i32(acc
));
2649 rw
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2650 tcg_gen_mov_i32(rw
, loadval
);
2651 /* FIXME: Should address writeback happen with the masked or
2653 switch ((insn
>> 3) & 7) {
2654 case 3: /* Post-increment. */
2655 tcg_gen_addi_i32(AREG(insn
, 0), addr
, 4);
2657 case 4: /* Pre-decrement. */
2658 tcg_gen_mov_i32(AREG(insn
, 0), addr
);
2663 DISAS_INSN(from_mac
)
2669 rx
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2670 accnum
= (insn
>> 9) & 3;
2671 acc
= MACREG(accnum
);
2672 if (s
->env
->macsr
& MACSR_FI
) {
2673 gen_helper_get_macf(rx
, cpu_env
, acc
);
2674 } else if ((s
->env
->macsr
& MACSR_OMC
) == 0) {
2675 tcg_gen_trunc_i64_i32(rx
, acc
);
2676 } else if (s
->env
->macsr
& MACSR_SU
) {
2677 gen_helper_get_macs(rx
, acc
);
2679 gen_helper_get_macu(rx
, acc
);
2682 tcg_gen_movi_i64(acc
, 0);
2683 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
, ~(MACSR_PAV0
<< accnum
));
2687 DISAS_INSN(move_mac
)
2689 /* FIXME: This can be done without a helper. */
2693 dest
= tcg_const_i32((insn
>> 9) & 3);
2694 gen_helper_mac_move(cpu_env
, dest
, tcg_const_i32(src
));
2695 gen_mac_clear_flags();
2696 gen_helper_mac_set_flags(cpu_env
, dest
);
2699 DISAS_INSN(from_macsr
)
2703 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2704 tcg_gen_mov_i32(reg
, QREG_MACSR
);
2707 DISAS_INSN(from_mask
)
2710 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2711 tcg_gen_mov_i32(reg
, QREG_MAC_MASK
);
2714 DISAS_INSN(from_mext
)
2718 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2719 acc
= tcg_const_i32((insn
& 0x400) ? 2 : 0);
2720 if (s
->env
->macsr
& MACSR_FI
)
2721 gen_helper_get_mac_extf(reg
, cpu_env
, acc
);
2723 gen_helper_get_mac_exti(reg
, cpu_env
, acc
);
2726 DISAS_INSN(macsr_to_ccr
)
2728 tcg_gen_movi_i32(QREG_CC_X
, 0);
2729 tcg_gen_andi_i32(QREG_CC_DEST
, QREG_MACSR
, 0xf);
2730 s
->cc_op
= CC_OP_FLAGS
;
2738 accnum
= (insn
>> 9) & 3;
2739 acc
= MACREG(accnum
);
2740 SRC_EA(val
, OS_LONG
, 0, NULL
);
2741 if (s
->env
->macsr
& MACSR_FI
) {
2742 tcg_gen_ext_i32_i64(acc
, val
);
2743 tcg_gen_shli_i64(acc
, acc
, 8);
2744 } else if (s
->env
->macsr
& MACSR_SU
) {
2745 tcg_gen_ext_i32_i64(acc
, val
);
2747 tcg_gen_extu_i32_i64(acc
, val
);
2749 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
, ~(MACSR_PAV0
<< accnum
));
2750 gen_mac_clear_flags();
2751 gen_helper_mac_set_flags(cpu_env
, tcg_const_i32(accnum
));
2754 DISAS_INSN(to_macsr
)
2757 SRC_EA(val
, OS_LONG
, 0, NULL
);
2758 gen_helper_set_macsr(cpu_env
, val
);
2765 SRC_EA(val
, OS_LONG
, 0, NULL
);
2766 tcg_gen_ori_i32(QREG_MAC_MASK
, val
, 0xffff0000);
2773 SRC_EA(val
, OS_LONG
, 0, NULL
);
2774 acc
= tcg_const_i32((insn
& 0x400) ? 2 : 0);
2775 if (s
->env
->macsr
& MACSR_FI
)
2776 gen_helper_set_mac_extf(cpu_env
, val
, acc
);
2777 else if (s
->env
->macsr
& MACSR_SU
)
2778 gen_helper_set_mac_exts(cpu_env
, val
, acc
);
2780 gen_helper_set_mac_extu(cpu_env
, val
, acc
);
2783 static disas_proc opcode_table
[65536];
2786 register_opcode (disas_proc proc
, uint16_t opcode
, uint16_t mask
)
2792 /* Sanity check. All set bits must be included in the mask. */
2793 if (opcode
& ~mask
) {
2795 "qemu internal error: bogus opcode definition %04x/%04x\n",
2799 /* This could probably be cleverer. For now just optimize the case where
2800 the top bits are known. */
2801 /* Find the first zero bit in the mask. */
2803 while ((i
& mask
) != 0)
2805 /* Iterate over all combinations of this and lower bits. */
2810 from
= opcode
& ~(i
- 1);
2812 for (i
= from
; i
< to
; i
++) {
2813 if ((i
& mask
) == opcode
)
2814 opcode_table
[i
] = proc
;
2818 /* Register m68k opcode handlers. Order is important.
2819 Later insn override earlier ones. */
2820 void register_m68k_insns (CPUM68KState
*env
)
2822 #define INSN(name, opcode, mask, feature) do { \
2823 if (m68k_feature(env, M68K_FEATURE_##feature)) \
2824 register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2826 INSN(undef
, 0000, 0000, CF_ISA_A
);
2827 INSN(arith_im
, 0080, fff8
, CF_ISA_A
);
2828 INSN(bitrev
, 00c0
, fff8
, CF_ISA_APLUSC
);
2829 INSN(bitop_reg
, 0100, f1c0
, CF_ISA_A
);
2830 INSN(bitop_reg
, 0140, f1c0
, CF_ISA_A
);
2831 INSN(bitop_reg
, 0180, f1c0
, CF_ISA_A
);
2832 INSN(bitop_reg
, 01c0
, f1c0
, CF_ISA_A
);
2833 INSN(arith_im
, 0280, fff8
, CF_ISA_A
);
2834 INSN(byterev
, 02c0
, fff8
, CF_ISA_APLUSC
);
2835 INSN(arith_im
, 0480, fff8
, CF_ISA_A
);
2836 INSN(ff1
, 04c0
, fff8
, CF_ISA_APLUSC
);
2837 INSN(arith_im
, 0680, fff8
, CF_ISA_A
);
2838 INSN(bitop_im
, 0800, ffc0
, CF_ISA_A
);
2839 INSN(bitop_im
, 0840, ffc0
, CF_ISA_A
);
2840 INSN(bitop_im
, 0880, ffc0
, CF_ISA_A
);
2841 INSN(bitop_im
, 08c0
, ffc0
, CF_ISA_A
);
2842 INSN(arith_im
, 0a80
, fff8
, CF_ISA_A
);
2843 INSN(arith_im
, 0c00
, ff38
, CF_ISA_A
);
2844 INSN(move
, 1000, f000
, CF_ISA_A
);
2845 INSN(move
, 2000, f000
, CF_ISA_A
);
2846 INSN(move
, 3000, f000
, CF_ISA_A
);
2847 INSN(strldsr
, 40e7
, ffff
, CF_ISA_APLUSC
);
2848 INSN(negx
, 4080, fff8
, CF_ISA_A
);
2849 INSN(move_from_sr
, 40c0
, fff8
, CF_ISA_A
);
2850 INSN(lea
, 41c0
, f1c0
, CF_ISA_A
);
2851 INSN(clr
, 4200, ff00
, CF_ISA_A
);
2852 INSN(undef
, 42c0
, ffc0
, CF_ISA_A
);
2853 INSN(move_from_ccr
, 42c0
, fff8
, CF_ISA_A
);
2854 INSN(neg
, 4480, fff8
, CF_ISA_A
);
2855 INSN(move_to_ccr
, 44c0
, ffc0
, CF_ISA_A
);
2856 INSN(not, 4680, fff8
, CF_ISA_A
);
2857 INSN(move_to_sr
, 46c0
, ffc0
, CF_ISA_A
);
2858 INSN(pea
, 4840, ffc0
, CF_ISA_A
);
2859 INSN(swap
, 4840, fff8
, CF_ISA_A
);
2860 INSN(movem
, 48c0
, fbc0
, CF_ISA_A
);
2861 INSN(ext
, 4880, fff8
, CF_ISA_A
);
2862 INSN(ext
, 48c0
, fff8
, CF_ISA_A
);
2863 INSN(ext
, 49c0
, fff8
, CF_ISA_A
);
2864 INSN(tst
, 4a00
, ff00
, CF_ISA_A
);
2865 INSN(tas
, 4ac0
, ffc0
, CF_ISA_B
);
2866 INSN(halt
, 4ac8
, ffff
, CF_ISA_A
);
2867 INSN(pulse
, 4acc
, ffff
, CF_ISA_A
);
2868 INSN(illegal
, 4afc
, ffff
, CF_ISA_A
);
2869 INSN(mull
, 4c00
, ffc0
, CF_ISA_A
);
2870 INSN(divl
, 4c40
, ffc0
, CF_ISA_A
);
2871 INSN(sats
, 4c80
, fff8
, CF_ISA_B
);
2872 INSN(trap
, 4e40
, fff0
, CF_ISA_A
);
2873 INSN(link
, 4e50
, fff8
, CF_ISA_A
);
2874 INSN(unlk
, 4e58
, fff8
, CF_ISA_A
);
2875 INSN(move_to_usp
, 4e60
, fff8
, USP
);
2876 INSN(move_from_usp
, 4e68
, fff8
, USP
);
2877 INSN(nop
, 4e71
, ffff
, CF_ISA_A
);
2878 INSN(stop
, 4e72
, ffff
, CF_ISA_A
);
2879 INSN(rte
, 4e73
, ffff
, CF_ISA_A
);
2880 INSN(rts
, 4e75
, ffff
, CF_ISA_A
);
2881 INSN(movec
, 4e7b
, ffff
, CF_ISA_A
);
2882 INSN(jump
, 4e80
, ffc0
, CF_ISA_A
);
2883 INSN(jump
, 4ec0
, ffc0
, CF_ISA_A
);
2884 INSN(addsubq
, 5180, f1c0
, CF_ISA_A
);
2885 INSN(scc
, 50c0
, f0f8
, CF_ISA_A
);
2886 INSN(addsubq
, 5080, f1c0
, CF_ISA_A
);
2887 INSN(tpf
, 51f8
, fff8
, CF_ISA_A
);
2889 /* Branch instructions. */
2890 INSN(branch
, 6000, f000
, CF_ISA_A
);
2891 /* Disable long branch instructions, then add back the ones we want. */
2892 INSN(undef
, 60ff
, f0ff
, CF_ISA_A
); /* All long branches. */
2893 INSN(branch
, 60ff
, f0ff
, CF_ISA_B
);
2894 INSN(undef
, 60ff
, ffff
, CF_ISA_B
); /* bra.l */
2895 INSN(branch
, 60ff
, ffff
, BRAL
);
2897 INSN(moveq
, 7000, f100
, CF_ISA_A
);
2898 INSN(mvzs
, 7100, f100
, CF_ISA_B
);
2899 INSN(or, 8000, f000
, CF_ISA_A
);
2900 INSN(divw
, 80c0
, f0c0
, CF_ISA_A
);
2901 INSN(addsub
, 9000, f000
, CF_ISA_A
);
2902 INSN(subx
, 9180, f1f8
, CF_ISA_A
);
2903 INSN(suba
, 91c0
, f1c0
, CF_ISA_A
);
2905 INSN(undef_mac
, a000
, f000
, CF_ISA_A
);
2906 INSN(mac
, a000
, f100
, CF_EMAC
);
2907 INSN(from_mac
, a180
, f9b0
, CF_EMAC
);
2908 INSN(move_mac
, a110
, f9fc
, CF_EMAC
);
2909 INSN(from_macsr
,a980
, f9f0
, CF_EMAC
);
2910 INSN(from_mask
, ad80
, fff0
, CF_EMAC
);
2911 INSN(from_mext
, ab80
, fbf0
, CF_EMAC
);
2912 INSN(macsr_to_ccr
, a9c0
, ffff
, CF_EMAC
);
2913 INSN(to_mac
, a100
, f9c0
, CF_EMAC
);
2914 INSN(to_macsr
, a900
, ffc0
, CF_EMAC
);
2915 INSN(to_mext
, ab00
, fbc0
, CF_EMAC
);
2916 INSN(to_mask
, ad00
, ffc0
, CF_EMAC
);
2918 INSN(mov3q
, a140
, f1c0
, CF_ISA_B
);
2919 INSN(cmp
, b000
, f1c0
, CF_ISA_B
); /* cmp.b */
2920 INSN(cmp
, b040
, f1c0
, CF_ISA_B
); /* cmp.w */
2921 INSN(cmpa
, b0c0
, f1c0
, CF_ISA_B
); /* cmpa.w */
2922 INSN(cmp
, b080
, f1c0
, CF_ISA_A
);
2923 INSN(cmpa
, b1c0
, f1c0
, CF_ISA_A
);
2924 INSN(eor
, b180
, f1c0
, CF_ISA_A
);
2925 INSN(and, c000
, f000
, CF_ISA_A
);
2926 INSN(mulw
, c0c0
, f0c0
, CF_ISA_A
);
2927 INSN(addsub
, d000
, f000
, CF_ISA_A
);
2928 INSN(addx
, d180
, f1f8
, CF_ISA_A
);
2929 INSN(adda
, d1c0
, f1c0
, CF_ISA_A
);
2930 INSN(shift_im
, e080
, f0f0
, CF_ISA_A
);
2931 INSN(shift_reg
, e0a0
, f0f0
, CF_ISA_A
);
2932 INSN(undef_fpu
, f000
, f000
, CF_ISA_A
);
2933 INSN(fpu
, f200
, ffc0
, CF_FPU
);
2934 INSN(fbcc
, f280
, ffc0
, CF_FPU
);
2935 INSN(frestore
, f340
, ffc0
, CF_FPU
);
2936 INSN(fsave
, f340
, ffc0
, CF_FPU
);
2937 INSN(intouch
, f340
, ffc0
, CF_ISA_A
);
2938 INSN(cpushl
, f428
, ff38
, CF_ISA_A
);
2939 INSN(wddata
, fb00
, ff00
, CF_ISA_A
);
2940 INSN(wdebug
, fbc0
, ffc0
, CF_ISA_A
);
2944 /* ??? Some of this implementation is not exception safe. We should always
2945 write back the result to memory before setting the condition codes. */
2946 static void disas_m68k_insn(CPUState
* env
, DisasContext
*s
)
2950 insn
= lduw_code(s
->pc
);
2953 opcode_table
[insn
](s
, insn
);
2956 /* generate intermediate code for basic block 'tb'. */
2958 gen_intermediate_code_internal(CPUState
*env
, TranslationBlock
*tb
,
2961 DisasContext dc1
, *dc
= &dc1
;
2962 uint16_t *gen_opc_end
;
2965 target_ulong pc_start
;
2970 /* generate intermediate code */
2975 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2978 dc
->is_jmp
= DISAS_NEXT
;
2980 dc
->cc_op
= CC_OP_DYNAMIC
;
2981 dc
->singlestep_enabled
= env
->singlestep_enabled
;
2982 dc
->fpcr
= env
->fpcr
;
2983 dc
->user
= (env
->sr
& SR_S
) == 0;
2988 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2990 max_insns
= CF_COUNT_MASK
;
2994 pc_offset
= dc
->pc
- pc_start
;
2995 gen_throws_exception
= NULL
;
2996 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
2997 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
2998 if (bp
->pc
== dc
->pc
) {
2999 gen_exception(dc
, dc
->pc
, EXCP_DEBUG
);
3000 dc
->is_jmp
= DISAS_JUMP
;
3008 j
= gen_opc_ptr
- gen_opc_buf
;
3012 gen_opc_instr_start
[lj
++] = 0;
3014 gen_opc_pc
[lj
] = dc
->pc
;
3015 gen_opc_instr_start
[lj
] = 1;
3016 gen_opc_icount
[lj
] = num_insns
;
3018 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3020 dc
->insn_pc
= dc
->pc
;
3021 disas_m68k_insn(env
, dc
);
3023 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
3024 !env
->singlestep_enabled
&&
3026 (pc_offset
) < (TARGET_PAGE_SIZE
- 32) &&
3027 num_insns
< max_insns
);
3029 if (tb
->cflags
& CF_LAST_IO
)
3031 if (unlikely(env
->singlestep_enabled
)) {
3032 /* Make sure the pc is updated, and raise a debug exception. */
3034 gen_flush_cc_op(dc
);
3035 tcg_gen_movi_i32(QREG_PC
, dc
->pc
);
3037 gen_helper_raise_exception(tcg_const_i32(EXCP_DEBUG
));
3039 switch(dc
->is_jmp
) {
3041 gen_flush_cc_op(dc
);
3042 gen_jmp_tb(dc
, 0, dc
->pc
);
3047 gen_flush_cc_op(dc
);
3048 /* indicate that the hash table must be used to find the next TB */
3052 /* nothing more to generate */
3056 gen_icount_end(tb
, num_insns
);
3057 *gen_opc_ptr
= INDEX_op_end
;
3060 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3061 qemu_log("----------------\n");
3062 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3063 log_target_disas(pc_start
, dc
->pc
- pc_start
, 0);
3068 j
= gen_opc_ptr
- gen_opc_buf
;
3071 gen_opc_instr_start
[lj
++] = 0;
3073 tb
->size
= dc
->pc
- pc_start
;
3074 tb
->icount
= num_insns
;
3078 //expand_target_qops();
3081 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
3083 gen_intermediate_code_internal(env
, tb
, 0);
3086 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
3088 gen_intermediate_code_internal(env
, tb
, 1);
3091 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
3097 for (i
= 0; i
< 8; i
++)
3099 u
.d
= env
->fregs
[i
];
3100 cpu_fprintf (f
, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3101 i
, env
->dregs
[i
], i
, env
->aregs
[i
],
3102 i
, u
.l
.upper
, u
.l
.lower
, *(double *)&u
.d
);
3104 cpu_fprintf (f
, "PC = %08x ", env
->pc
);
3106 cpu_fprintf (f
, "SR = %04x %c%c%c%c%c ", sr
, (sr
& 0x10) ? 'X' : '-',
3107 (sr
& CCF_N
) ? 'N' : '-', (sr
& CCF_Z
) ? 'Z' : '-',
3108 (sr
& CCF_V
) ? 'V' : '-', (sr
& CCF_C
) ? 'C' : '-');
3109 cpu_fprintf (f
, "FPRESULT = %12g\n", *(double *)&env
->fp_result
);
3112 void restore_state_to_opc(CPUState
*env
, TranslationBlock
*tb
, int pc_pos
)
3114 env
->pc
= gen_opc_pc
[pc_pos
];