4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
30 //#define DEBUG_DISPATCH 1
32 /* Fake floating point. */
33 #define tcg_gen_mov_f64 tcg_gen_mov_i64
34 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
35 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
37 #define DEFO32(name, offset) static TCGv QREG_##name;
38 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
39 #define DEFF64(name, offset) static TCGv_i64 QREG_##name;
45 static TCGv_ptr cpu_env
;
47 static char cpu_reg_names
[3*8*3 + 5*4];
48 static TCGv cpu_dregs
[8];
49 static TCGv cpu_aregs
[8];
50 static TCGv_i64 cpu_fregs
[8];
51 static TCGv_i64 cpu_macc
[4];
53 #define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
54 #define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
55 #define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
56 #define MACREG(acc) cpu_macc[acc]
57 #define QREG_SP cpu_aregs[7]
59 static TCGv NULL_QREG
;
60 #define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
61 /* Used to distinguish stores from bad addressing modes. */
62 static TCGv store_dummy
;
64 #include "exec/gen-icount.h"
66 void m68k_tcg_init(void)
71 #define DEFO32(name, offset) QREG_##name = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUM68KState, offset), #name);
72 #define DEFO64(name, offset) QREG_##name = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUM68KState, offset), #name);
73 #define DEFF64(name, offset) DEFO64(name, offset)
79 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
82 for (i
= 0; i
< 8; i
++) {
84 cpu_dregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
85 offsetof(CPUM68KState
, dregs
[i
]), p
);
88 cpu_aregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
89 offsetof(CPUM68KState
, aregs
[i
]), p
);
92 cpu_fregs
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
93 offsetof(CPUM68KState
, fregs
[i
]), p
);
96 for (i
= 0; i
< 4; i
++) {
97 sprintf(p
, "ACC%d", i
);
98 cpu_macc
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
99 offsetof(CPUM68KState
, macc
[i
]), p
);
103 NULL_QREG
= tcg_global_mem_new(TCG_AREG0
, -4, "NULL");
104 store_dummy
= tcg_global_mem_new(TCG_AREG0
, -8, "NULL");
110 static inline void qemu_assert(int cond
, const char *msg
)
113 fprintf (stderr
, "badness: %s\n", msg
);
118 /* internal defines */
119 typedef struct DisasContext
{
121 target_ulong insn_pc
; /* Start of the current instruction. */
127 struct TranslationBlock
*tb
;
128 int singlestep_enabled
;
134 #define DISAS_JUMP_NEXT 4
136 #if defined(CONFIG_USER_ONLY)
139 #define IS_USER(s) s->user
142 /* XXX: move that elsewhere */
143 /* ??? Fix exceptions. */
144 static void *gen_throws_exception
;
145 #define gen_last_qop NULL
153 typedef void (*disas_proc
)(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
);
155 #ifdef DEBUG_DISPATCH
156 #define DISAS_INSN(name) \
157 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
159 static void disas_##name(CPUM68KState *env, DisasContext *s, \
162 qemu_log("Dispatch " #name "\n"); \
163 real_disas_##name(s, env, insn); \
165 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
168 #define DISAS_INSN(name) \
169 static void disas_##name(CPUM68KState *env, DisasContext *s, \
173 /* Generate a load from the specified address. Narrow values are
174 sign extended to full register width. */
175 static inline TCGv
gen_load(DisasContext
* s
, int opsize
, TCGv addr
, int sign
)
178 int index
= IS_USER(s
);
180 tmp
= tcg_temp_new_i32();
184 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
186 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
190 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
192 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
196 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
199 qemu_assert(0, "bad load size");
201 gen_throws_exception
= gen_last_qop
;
205 static inline TCGv_i64
gen_load64(DisasContext
* s
, TCGv addr
)
208 int index
= IS_USER(s
);
210 tmp
= tcg_temp_new_i64();
211 tcg_gen_qemu_ldf64(tmp
, addr
, index
);
212 gen_throws_exception
= gen_last_qop
;
216 /* Generate a store. */
217 static inline void gen_store(DisasContext
*s
, int opsize
, TCGv addr
, TCGv val
)
219 int index
= IS_USER(s
);
223 tcg_gen_qemu_st8(val
, addr
, index
);
226 tcg_gen_qemu_st16(val
, addr
, index
);
230 tcg_gen_qemu_st32(val
, addr
, index
);
233 qemu_assert(0, "bad store size");
235 gen_throws_exception
= gen_last_qop
;
238 static inline void gen_store64(DisasContext
*s
, TCGv addr
, TCGv_i64 val
)
240 int index
= IS_USER(s
);
242 tcg_gen_qemu_stf64(val
, addr
, index
);
243 gen_throws_exception
= gen_last_qop
;
252 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
253 otherwise generate a store. */
254 static TCGv
gen_ldst(DisasContext
*s
, int opsize
, TCGv addr
, TCGv val
,
257 if (what
== EA_STORE
) {
258 gen_store(s
, opsize
, addr
, val
);
261 return gen_load(s
, opsize
, addr
, what
== EA_LOADS
);
265 /* Read a 32-bit immediate constant. */
266 static inline uint32_t read_im32(CPUM68KState
*env
, DisasContext
*s
)
269 im
= ((uint32_t)cpu_lduw_code(env
, s
->pc
)) << 16;
271 im
|= cpu_lduw_code(env
, s
->pc
);
276 /* Calculate and address index. */
277 static TCGv
gen_addr_index(uint16_t ext
, TCGv tmp
)
282 add
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(ext
, 12);
283 if ((ext
& 0x800) == 0) {
284 tcg_gen_ext16s_i32(tmp
, add
);
287 scale
= (ext
>> 9) & 3;
289 tcg_gen_shli_i32(tmp
, add
, scale
);
295 /* Handle a base + index + displacement effective addresss.
296 A NULL_QREG base means pc-relative. */
297 static TCGv
gen_lea_indexed(CPUM68KState
*env
, DisasContext
*s
, int opsize
,
307 ext
= cpu_lduw_code(env
, s
->pc
);
310 if ((ext
& 0x800) == 0 && !m68k_feature(s
->env
, M68K_FEATURE_WORD_INDEX
))
314 /* full extension word format */
315 if (!m68k_feature(s
->env
, M68K_FEATURE_EXT_FULL
))
318 if ((ext
& 0x30) > 0x10) {
319 /* base displacement */
320 if ((ext
& 0x30) == 0x20) {
321 bd
= (int16_t)cpu_lduw_code(env
, s
->pc
);
324 bd
= read_im32(env
, s
);
329 tmp
= tcg_temp_new();
330 if ((ext
& 0x44) == 0) {
332 add
= gen_addr_index(ext
, tmp
);
336 if ((ext
& 0x80) == 0) {
337 /* base not suppressed */
338 if (IS_NULL_QREG(base
)) {
339 base
= tcg_const_i32(offset
+ bd
);
342 if (!IS_NULL_QREG(add
)) {
343 tcg_gen_add_i32(tmp
, add
, base
);
349 if (!IS_NULL_QREG(add
)) {
351 tcg_gen_addi_i32(tmp
, add
, bd
);
355 add
= tcg_const_i32(bd
);
357 if ((ext
& 3) != 0) {
358 /* memory indirect */
359 base
= gen_load(s
, OS_LONG
, add
, 0);
360 if ((ext
& 0x44) == 4) {
361 add
= gen_addr_index(ext
, tmp
);
362 tcg_gen_add_i32(tmp
, add
, base
);
368 /* outer displacement */
369 if ((ext
& 3) == 2) {
370 od
= (int16_t)cpu_lduw_code(env
, s
->pc
);
373 od
= read_im32(env
, s
);
379 tcg_gen_addi_i32(tmp
, add
, od
);
384 /* brief extension word format */
385 tmp
= tcg_temp_new();
386 add
= gen_addr_index(ext
, tmp
);
387 if (!IS_NULL_QREG(base
)) {
388 tcg_gen_add_i32(tmp
, add
, base
);
390 tcg_gen_addi_i32(tmp
, tmp
, (int8_t)ext
);
392 tcg_gen_addi_i32(tmp
, add
, offset
+ (int8_t)ext
);
399 /* Update the CPU env CC_OP state. */
400 static inline void gen_flush_cc_op(DisasContext
*s
)
402 if (s
->cc_op
!= CC_OP_DYNAMIC
)
403 tcg_gen_movi_i32(QREG_CC_OP
, s
->cc_op
);
406 /* Evaluate all the CC flags. */
407 static inline void gen_flush_flags(DisasContext
*s
)
409 if (s
->cc_op
== CC_OP_FLAGS
)
412 gen_helper_flush_flags(cpu_env
, QREG_CC_OP
);
413 s
->cc_op
= CC_OP_FLAGS
;
416 static void gen_logic_cc(DisasContext
*s
, TCGv val
)
418 tcg_gen_mov_i32(QREG_CC_DEST
, val
);
419 s
->cc_op
= CC_OP_LOGIC
;
422 static void gen_update_cc_add(TCGv dest
, TCGv src
)
424 tcg_gen_mov_i32(QREG_CC_DEST
, dest
);
425 tcg_gen_mov_i32(QREG_CC_SRC
, src
);
428 static inline int opsize_bytes(int opsize
)
431 case OS_BYTE
: return 1;
432 case OS_WORD
: return 2;
433 case OS_LONG
: return 4;
434 case OS_SINGLE
: return 4;
435 case OS_DOUBLE
: return 8;
437 qemu_assert(0, "bad operand size");
442 /* Assign value to a register. If the width is less than the register width
443 only the low part of the register is set. */
444 static void gen_partset_reg(int opsize
, TCGv reg
, TCGv val
)
449 tcg_gen_andi_i32(reg
, reg
, 0xffffff00);
450 tmp
= tcg_temp_new();
451 tcg_gen_ext8u_i32(tmp
, val
);
452 tcg_gen_or_i32(reg
, reg
, tmp
);
455 tcg_gen_andi_i32(reg
, reg
, 0xffff0000);
456 tmp
= tcg_temp_new();
457 tcg_gen_ext16u_i32(tmp
, val
);
458 tcg_gen_or_i32(reg
, reg
, tmp
);
462 tcg_gen_mov_i32(reg
, val
);
465 qemu_assert(0, "Bad operand size");
470 /* Sign or zero extend a value. */
471 static inline TCGv
gen_extend(TCGv val
, int opsize
, int sign
)
477 tmp
= tcg_temp_new();
479 tcg_gen_ext8s_i32(tmp
, val
);
481 tcg_gen_ext8u_i32(tmp
, val
);
484 tmp
= tcg_temp_new();
486 tcg_gen_ext16s_i32(tmp
, val
);
488 tcg_gen_ext16u_i32(tmp
, val
);
495 qemu_assert(0, "Bad operand size");
500 /* Generate code for an "effective address". Does not adjust the base
501 register for autoincrement addressing modes. */
502 static TCGv
gen_lea(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
,
510 switch ((insn
>> 3) & 7) {
511 case 0: /* Data register direct. */
512 case 1: /* Address register direct. */
514 case 2: /* Indirect register */
515 case 3: /* Indirect postincrement. */
516 return AREG(insn
, 0);
517 case 4: /* Indirect predecrememnt. */
519 tmp
= tcg_temp_new();
520 tcg_gen_subi_i32(tmp
, reg
, opsize_bytes(opsize
));
522 case 5: /* Indirect displacement. */
524 tmp
= tcg_temp_new();
525 ext
= cpu_lduw_code(env
, s
->pc
);
527 tcg_gen_addi_i32(tmp
, reg
, (int16_t)ext
);
529 case 6: /* Indirect index + displacement. */
531 return gen_lea_indexed(env
, s
, opsize
, reg
);
534 case 0: /* Absolute short. */
535 offset
= cpu_ldsw_code(env
, s
->pc
);
537 return tcg_const_i32(offset
);
538 case 1: /* Absolute long. */
539 offset
= read_im32(env
, s
);
540 return tcg_const_i32(offset
);
541 case 2: /* pc displacement */
543 offset
+= cpu_ldsw_code(env
, s
->pc
);
545 return tcg_const_i32(offset
);
546 case 3: /* pc index+displacement. */
547 return gen_lea_indexed(env
, s
, opsize
, NULL_QREG
);
548 case 4: /* Immediate. */
553 /* Should never happen. */
557 /* Helper function for gen_ea. Reuse the computed address between the
558 for read/write operands. */
559 static inline TCGv
gen_ea_once(CPUM68KState
*env
, DisasContext
*s
,
560 uint16_t insn
, int opsize
, TCGv val
,
561 TCGv
*addrp
, ea_what what
)
565 if (addrp
&& what
== EA_STORE
) {
568 tmp
= gen_lea(env
, s
, insn
, opsize
);
569 if (IS_NULL_QREG(tmp
))
574 return gen_ldst(s
, opsize
, tmp
, val
, what
);
577 /* Generate code to load/store a value ito/from an EA. If VAL > 0 this is
578 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
579 ADDRP is non-null for readwrite operands. */
580 static TCGv
gen_ea(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
,
581 int opsize
, TCGv val
, TCGv
*addrp
, ea_what what
)
587 switch ((insn
>> 3) & 7) {
588 case 0: /* Data register direct. */
590 if (what
== EA_STORE
) {
591 gen_partset_reg(opsize
, reg
, val
);
594 return gen_extend(reg
, opsize
, what
== EA_LOADS
);
596 case 1: /* Address register direct. */
598 if (what
== EA_STORE
) {
599 tcg_gen_mov_i32(reg
, val
);
602 return gen_extend(reg
, opsize
, what
== EA_LOADS
);
604 case 2: /* Indirect register */
606 return gen_ldst(s
, opsize
, reg
, val
, what
);
607 case 3: /* Indirect postincrement. */
609 result
= gen_ldst(s
, opsize
, reg
, val
, what
);
610 /* ??? This is not exception safe. The instruction may still
611 fault after this point. */
612 if (what
== EA_STORE
|| !addrp
)
613 tcg_gen_addi_i32(reg
, reg
, opsize_bytes(opsize
));
615 case 4: /* Indirect predecrememnt. */
618 if (addrp
&& what
== EA_STORE
) {
621 tmp
= gen_lea(env
, s
, insn
, opsize
);
622 if (IS_NULL_QREG(tmp
))
627 result
= gen_ldst(s
, opsize
, tmp
, val
, what
);
628 /* ??? This is not exception safe. The instruction may still
629 fault after this point. */
630 if (what
== EA_STORE
|| !addrp
) {
632 tcg_gen_mov_i32(reg
, tmp
);
636 case 5: /* Indirect displacement. */
637 case 6: /* Indirect index + displacement. */
638 return gen_ea_once(env
, s
, insn
, opsize
, val
, addrp
, what
);
641 case 0: /* Absolute short. */
642 case 1: /* Absolute long. */
643 case 2: /* pc displacement */
644 case 3: /* pc index+displacement. */
645 return gen_ea_once(env
, s
, insn
, opsize
, val
, addrp
, what
);
646 case 4: /* Immediate. */
647 /* Sign extend values for consistency. */
650 if (what
== EA_LOADS
) {
651 offset
= cpu_ldsb_code(env
, s
->pc
+ 1);
653 offset
= cpu_ldub_code(env
, s
->pc
+ 1);
658 if (what
== EA_LOADS
) {
659 offset
= cpu_ldsw_code(env
, s
->pc
);
661 offset
= cpu_lduw_code(env
, s
->pc
);
666 offset
= read_im32(env
, s
);
669 qemu_assert(0, "Bad immediate operand");
671 return tcg_const_i32(offset
);
676 /* Should never happen. */
680 /* This generates a conditional branch, clobbering all temporaries. */
681 static void gen_jmpcc(DisasContext
*s
, int cond
, int l1
)
685 /* TODO: Optimize compare/branch pairs rather than always flushing
686 flag state to CC_OP_FLAGS. */
694 case 2: /* HI (!C && !Z) */
695 tmp
= tcg_temp_new();
696 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
| CCF_Z
);
697 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
699 case 3: /* LS (C || Z) */
700 tmp
= tcg_temp_new();
701 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
| CCF_Z
);
702 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
704 case 4: /* CC (!C) */
705 tmp
= tcg_temp_new();
706 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
);
707 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
710 tmp
= tcg_temp_new();
711 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_C
);
712 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
714 case 6: /* NE (!Z) */
715 tmp
= tcg_temp_new();
716 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_Z
);
717 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
720 tmp
= tcg_temp_new();
721 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_Z
);
722 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
724 case 8: /* VC (!V) */
725 tmp
= tcg_temp_new();
726 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_V
);
727 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
730 tmp
= tcg_temp_new();
731 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_V
);
732 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
734 case 10: /* PL (!N) */
735 tmp
= tcg_temp_new();
736 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
737 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
739 case 11: /* MI (N) */
740 tmp
= tcg_temp_new();
741 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
742 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
744 case 12: /* GE (!(N ^ V)) */
745 tmp
= tcg_temp_new();
746 assert(CCF_V
== (CCF_N
>> 2));
747 tcg_gen_shri_i32(tmp
, QREG_CC_DEST
, 2);
748 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
749 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
);
750 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
752 case 13: /* LT (N ^ V) */
753 tmp
= tcg_temp_new();
754 assert(CCF_V
== (CCF_N
>> 2));
755 tcg_gen_shri_i32(tmp
, QREG_CC_DEST
, 2);
756 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
757 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
);
758 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
760 case 14: /* GT (!(Z || (N ^ V))) */
761 tmp
= tcg_temp_new();
762 assert(CCF_V
== (CCF_N
>> 2));
763 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
764 tcg_gen_shri_i32(tmp
, tmp
, 2);
765 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
766 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
| CCF_Z
);
767 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, l1
);
769 case 15: /* LE (Z || (N ^ V)) */
770 tmp
= tcg_temp_new();
771 assert(CCF_V
== (CCF_N
>> 2));
772 tcg_gen_andi_i32(tmp
, QREG_CC_DEST
, CCF_N
);
773 tcg_gen_shri_i32(tmp
, tmp
, 2);
774 tcg_gen_xor_i32(tmp
, tmp
, QREG_CC_DEST
);
775 tcg_gen_andi_i32(tmp
, tmp
, CCF_V
| CCF_Z
);
776 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, l1
);
779 /* Should ever happen. */
790 l1
= gen_new_label();
791 cond
= (insn
>> 8) & 0xf;
793 tcg_gen_andi_i32(reg
, reg
, 0xffffff00);
794 /* This is safe because we modify the reg directly, with no other values
796 gen_jmpcc(s
, cond
^ 1, l1
);
797 tcg_gen_ori_i32(reg
, reg
, 0xff);
801 /* Force a TB lookup after an instruction that changes the CPU state. */
802 static void gen_lookup_tb(DisasContext
*s
)
805 tcg_gen_movi_i32(QREG_PC
, s
->pc
);
806 s
->is_jmp
= DISAS_UPDATE
;
809 /* Generate a jump to an immediate address. */
810 static void gen_jmp_im(DisasContext
*s
, uint32_t dest
)
813 tcg_gen_movi_i32(QREG_PC
, dest
);
814 s
->is_jmp
= DISAS_JUMP
;
817 /* Generate a jump to the address in qreg DEST. */
818 static void gen_jmp(DisasContext
*s
, TCGv dest
)
821 tcg_gen_mov_i32(QREG_PC
, dest
);
822 s
->is_jmp
= DISAS_JUMP
;
825 static void gen_exception(DisasContext
*s
, uint32_t where
, int nr
)
828 gen_jmp_im(s
, where
);
829 gen_helper_raise_exception(cpu_env
, tcg_const_i32(nr
));
832 static inline void gen_addr_fault(DisasContext
*s
)
834 gen_exception(s
, s
->insn_pc
, EXCP_ADDRESS
);
837 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
838 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
839 op_sign ? EA_LOADS : EA_LOADU); \
840 if (IS_NULL_QREG(result)) { \
846 #define DEST_EA(env, insn, opsize, val, addrp) do { \
847 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
848 if (IS_NULL_QREG(ea_result)) { \
854 /* Generate a jump to an immediate address. */
855 static void gen_jmp_tb(DisasContext
*s
, int n
, uint32_t dest
)
857 TranslationBlock
*tb
;
860 if (unlikely(s
->singlestep_enabled
)) {
861 gen_exception(s
, dest
, EXCP_DEBUG
);
862 } else if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
863 (s
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
865 tcg_gen_movi_i32(QREG_PC
, dest
);
866 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
871 s
->is_jmp
= DISAS_TB_JUMP
;
874 DISAS_INSN(undef_mac
)
876 gen_exception(s
, s
->pc
- 2, EXCP_LINEA
);
879 DISAS_INSN(undef_fpu
)
881 gen_exception(s
, s
->pc
- 2, EXCP_LINEF
);
886 gen_exception(s
, s
->pc
- 2, EXCP_UNSUPPORTED
);
887 cpu_abort(env
, "Illegal instruction: %04x @ %08x", insn
, s
->pc
- 2);
897 sign
= (insn
& 0x100) != 0;
899 tmp
= tcg_temp_new();
901 tcg_gen_ext16s_i32(tmp
, reg
);
903 tcg_gen_ext16u_i32(tmp
, reg
);
904 SRC_EA(env
, src
, OS_WORD
, sign
, NULL
);
905 tcg_gen_mul_i32(tmp
, tmp
, src
);
906 tcg_gen_mov_i32(reg
, tmp
);
907 /* Unlike m68k, coldfire always clears the overflow bit. */
908 gen_logic_cc(s
, tmp
);
918 sign
= (insn
& 0x100) != 0;
921 tcg_gen_ext16s_i32(QREG_DIV1
, reg
);
923 tcg_gen_ext16u_i32(QREG_DIV1
, reg
);
925 SRC_EA(env
, src
, OS_WORD
, sign
, NULL
);
926 tcg_gen_mov_i32(QREG_DIV2
, src
);
928 gen_helper_divs(cpu_env
, tcg_const_i32(1));
930 gen_helper_divu(cpu_env
, tcg_const_i32(1));
933 tmp
= tcg_temp_new();
934 src
= tcg_temp_new();
935 tcg_gen_ext16u_i32(tmp
, QREG_DIV1
);
936 tcg_gen_shli_i32(src
, QREG_DIV2
, 16);
937 tcg_gen_or_i32(reg
, tmp
, src
);
938 s
->cc_op
= CC_OP_FLAGS
;
948 ext
= cpu_lduw_code(env
, s
->pc
);
951 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
956 tcg_gen_mov_i32(QREG_DIV1
, num
);
957 SRC_EA(env
, den
, OS_LONG
, 0, NULL
);
958 tcg_gen_mov_i32(QREG_DIV2
, den
);
960 gen_helper_divs(cpu_env
, tcg_const_i32(0));
962 gen_helper_divu(cpu_env
, tcg_const_i32(0));
964 if ((ext
& 7) == ((ext
>> 12) & 7)) {
966 tcg_gen_mov_i32 (reg
, QREG_DIV1
);
969 tcg_gen_mov_i32 (reg
, QREG_DIV2
);
971 s
->cc_op
= CC_OP_FLAGS
;
983 add
= (insn
& 0x4000) != 0;
985 dest
= tcg_temp_new();
987 SRC_EA(env
, tmp
, OS_LONG
, 0, &addr
);
991 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
994 tcg_gen_add_i32(dest
, tmp
, src
);
995 gen_helper_xflag_lt(QREG_CC_X
, dest
, src
);
996 s
->cc_op
= CC_OP_ADD
;
998 gen_helper_xflag_lt(QREG_CC_X
, tmp
, src
);
999 tcg_gen_sub_i32(dest
, tmp
, src
);
1000 s
->cc_op
= CC_OP_SUB
;
1002 gen_update_cc_add(dest
, src
);
1004 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1006 tcg_gen_mov_i32(reg
, dest
);
1011 /* Reverse the order of the bits in REG. */
1015 reg
= DREG(insn
, 0);
1016 gen_helper_bitrev(reg
, reg
);
1019 DISAS_INSN(bitop_reg
)
1029 if ((insn
& 0x38) != 0)
1033 op
= (insn
>> 6) & 3;
1034 SRC_EA(env
, src1
, opsize
, 0, op
? &addr
: NULL
);
1035 src2
= DREG(insn
, 9);
1036 dest
= tcg_temp_new();
1039 tmp
= tcg_temp_new();
1040 if (opsize
== OS_BYTE
)
1041 tcg_gen_andi_i32(tmp
, src2
, 7);
1043 tcg_gen_andi_i32(tmp
, src2
, 31);
1045 tmp
= tcg_temp_new();
1046 tcg_gen_shr_i32(tmp
, src1
, src2
);
1047 tcg_gen_andi_i32(tmp
, tmp
, 1);
1048 tcg_gen_shli_i32(tmp
, tmp
, 2);
1049 /* Clear CCF_Z if bit set. */
1050 tcg_gen_ori_i32(QREG_CC_DEST
, QREG_CC_DEST
, CCF_Z
);
1051 tcg_gen_xor_i32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1053 tcg_gen_shl_i32(tmp
, tcg_const_i32(1), src2
);
1056 tcg_gen_xor_i32(dest
, src1
, tmp
);
1059 tcg_gen_not_i32(tmp
, tmp
);
1060 tcg_gen_and_i32(dest
, src1
, tmp
);
1063 tcg_gen_or_i32(dest
, src1
, tmp
);
1069 DEST_EA(env
, insn
, opsize
, dest
, &addr
);
1075 reg
= DREG(insn
, 0);
1077 gen_helper_sats(reg
, reg
, QREG_CC_DEST
);
1078 gen_logic_cc(s
, reg
);
1081 static void gen_push(DisasContext
*s
, TCGv val
)
1085 tmp
= tcg_temp_new();
1086 tcg_gen_subi_i32(tmp
, QREG_SP
, 4);
1087 gen_store(s
, OS_LONG
, tmp
, val
);
1088 tcg_gen_mov_i32(QREG_SP
, tmp
);
1100 mask
= cpu_lduw_code(env
, s
->pc
);
1102 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1103 if (IS_NULL_QREG(tmp
)) {
1107 addr
= tcg_temp_new();
1108 tcg_gen_mov_i32(addr
, tmp
);
1109 is_load
= ((insn
& 0x0400) != 0);
1110 for (i
= 0; i
< 16; i
++, mask
>>= 1) {
1117 tmp
= gen_load(s
, OS_LONG
, addr
, 0);
1118 tcg_gen_mov_i32(reg
, tmp
);
1120 gen_store(s
, OS_LONG
, addr
, reg
);
1123 tcg_gen_addi_i32(addr
, addr
, 4);
1128 DISAS_INSN(bitop_im
)
1138 if ((insn
& 0x38) != 0)
1142 op
= (insn
>> 6) & 3;
1144 bitnum
= cpu_lduw_code(env
, s
->pc
);
1146 if (bitnum
& 0xff00) {
1147 disas_undef(env
, s
, insn
);
1151 SRC_EA(env
, src1
, opsize
, 0, op
? &addr
: NULL
);
1154 if (opsize
== OS_BYTE
)
1160 tmp
= tcg_temp_new();
1161 assert (CCF_Z
== (1 << 2));
1163 tcg_gen_shri_i32(tmp
, src1
, bitnum
- 2);
1164 else if (bitnum
< 2)
1165 tcg_gen_shli_i32(tmp
, src1
, 2 - bitnum
);
1167 tcg_gen_mov_i32(tmp
, src1
);
1168 tcg_gen_andi_i32(tmp
, tmp
, CCF_Z
);
1169 /* Clear CCF_Z if bit set. */
1170 tcg_gen_ori_i32(QREG_CC_DEST
, QREG_CC_DEST
, CCF_Z
);
1171 tcg_gen_xor_i32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1175 tcg_gen_xori_i32(tmp
, src1
, mask
);
1178 tcg_gen_andi_i32(tmp
, src1
, ~mask
);
1181 tcg_gen_ori_i32(tmp
, src1
, mask
);
1186 DEST_EA(env
, insn
, opsize
, tmp
, &addr
);
1190 DISAS_INSN(arith_im
)
1198 op
= (insn
>> 9) & 7;
1199 SRC_EA(env
, src1
, OS_LONG
, 0, (op
== 6) ? NULL
: &addr
);
1200 im
= read_im32(env
, s
);
1201 dest
= tcg_temp_new();
1204 tcg_gen_ori_i32(dest
, src1
, im
);
1205 gen_logic_cc(s
, dest
);
1208 tcg_gen_andi_i32(dest
, src1
, im
);
1209 gen_logic_cc(s
, dest
);
1212 tcg_gen_mov_i32(dest
, src1
);
1213 gen_helper_xflag_lt(QREG_CC_X
, dest
, tcg_const_i32(im
));
1214 tcg_gen_subi_i32(dest
, dest
, im
);
1215 gen_update_cc_add(dest
, tcg_const_i32(im
));
1216 s
->cc_op
= CC_OP_SUB
;
1219 tcg_gen_mov_i32(dest
, src1
);
1220 tcg_gen_addi_i32(dest
, dest
, im
);
1221 gen_update_cc_add(dest
, tcg_const_i32(im
));
1222 gen_helper_xflag_lt(QREG_CC_X
, dest
, tcg_const_i32(im
));
1223 s
->cc_op
= CC_OP_ADD
;
1226 tcg_gen_xori_i32(dest
, src1
, im
);
1227 gen_logic_cc(s
, dest
);
1230 tcg_gen_mov_i32(dest
, src1
);
1231 tcg_gen_subi_i32(dest
, dest
, im
);
1232 gen_update_cc_add(dest
, tcg_const_i32(im
));
1233 s
->cc_op
= CC_OP_SUB
;
1239 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1247 reg
= DREG(insn
, 0);
1248 tcg_gen_bswap32_i32(reg
, reg
);
1258 switch (insn
>> 12) {
1259 case 1: /* move.b */
1262 case 2: /* move.l */
1265 case 3: /* move.w */
1271 SRC_EA(env
, src
, opsize
, 1, NULL
);
1272 op
= (insn
>> 6) & 7;
1275 /* The value will already have been sign extended. */
1276 dest
= AREG(insn
, 9);
1277 tcg_gen_mov_i32(dest
, src
);
1281 dest_ea
= ((insn
>> 9) & 7) | (op
<< 3);
1282 DEST_EA(env
, dest_ea
, opsize
, src
, NULL
);
1283 /* This will be correct because loads sign extend. */
1284 gen_logic_cc(s
, src
);
1293 reg
= DREG(insn
, 0);
1294 gen_helper_subx_cc(reg
, cpu_env
, tcg_const_i32(0), reg
);
1302 reg
= AREG(insn
, 9);
1303 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1304 if (IS_NULL_QREG(tmp
)) {
1308 tcg_gen_mov_i32(reg
, tmp
);
1315 switch ((insn
>> 6) & 3) {
1328 DEST_EA(env
, insn
, opsize
, tcg_const_i32(0), NULL
);
1329 gen_logic_cc(s
, tcg_const_i32(0));
1332 static TCGv
gen_get_ccr(DisasContext
*s
)
1337 dest
= tcg_temp_new();
1338 tcg_gen_shli_i32(dest
, QREG_CC_X
, 4);
1339 tcg_gen_or_i32(dest
, dest
, QREG_CC_DEST
);
1343 DISAS_INSN(move_from_ccr
)
1348 ccr
= gen_get_ccr(s
);
1349 reg
= DREG(insn
, 0);
1350 gen_partset_reg(OS_WORD
, reg
, ccr
);
1358 reg
= DREG(insn
, 0);
1359 src1
= tcg_temp_new();
1360 tcg_gen_mov_i32(src1
, reg
);
1361 tcg_gen_neg_i32(reg
, src1
);
1362 s
->cc_op
= CC_OP_SUB
;
1363 gen_update_cc_add(reg
, src1
);
1364 gen_helper_xflag_lt(QREG_CC_X
, tcg_const_i32(0), src1
);
1365 s
->cc_op
= CC_OP_SUB
;
1368 static void gen_set_sr_im(DisasContext
*s
, uint16_t val
, int ccr_only
)
1370 tcg_gen_movi_i32(QREG_CC_DEST
, val
& 0xf);
1371 tcg_gen_movi_i32(QREG_CC_X
, (val
& 0x10) >> 4);
1373 gen_helper_set_sr(cpu_env
, tcg_const_i32(val
& 0xff00));
1377 static void gen_set_sr(CPUM68KState
*env
, DisasContext
*s
, uint16_t insn
,
1383 s
->cc_op
= CC_OP_FLAGS
;
1384 if ((insn
& 0x38) == 0)
1386 tmp
= tcg_temp_new();
1387 reg
= DREG(insn
, 0);
1388 tcg_gen_andi_i32(QREG_CC_DEST
, reg
, 0xf);
1389 tcg_gen_shri_i32(tmp
, reg
, 4);
1390 tcg_gen_andi_i32(QREG_CC_X
, tmp
, 1);
1392 gen_helper_set_sr(cpu_env
, reg
);
1395 else if ((insn
& 0x3f) == 0x3c)
1398 val
= cpu_lduw_code(env
, s
->pc
);
1400 gen_set_sr_im(s
, val
, ccr_only
);
1403 disas_undef(env
, s
, insn
);
1406 DISAS_INSN(move_to_ccr
)
1408 gen_set_sr(env
, s
, insn
, 1);
1415 reg
= DREG(insn
, 0);
1416 tcg_gen_not_i32(reg
, reg
);
1417 gen_logic_cc(s
, reg
);
1426 src1
= tcg_temp_new();
1427 src2
= tcg_temp_new();
1428 reg
= DREG(insn
, 0);
1429 tcg_gen_shli_i32(src1
, reg
, 16);
1430 tcg_gen_shri_i32(src2
, reg
, 16);
1431 tcg_gen_or_i32(reg
, src1
, src2
);
1432 gen_logic_cc(s
, reg
);
1439 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1440 if (IS_NULL_QREG(tmp
)) {
1453 reg
= DREG(insn
, 0);
1454 op
= (insn
>> 6) & 7;
1455 tmp
= tcg_temp_new();
1457 tcg_gen_ext16s_i32(tmp
, reg
);
1459 tcg_gen_ext8s_i32(tmp
, reg
);
1461 gen_partset_reg(OS_WORD
, reg
, tmp
);
1463 tcg_gen_mov_i32(reg
, tmp
);
1464 gen_logic_cc(s
, tmp
);
1472 switch ((insn
>> 6) & 3) {
1485 SRC_EA(env
, tmp
, opsize
, 1, NULL
);
1486 gen_logic_cc(s
, tmp
);
1491 /* Implemented as a NOP. */
1496 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1499 /* ??? This should be atomic. */
1506 dest
= tcg_temp_new();
1507 SRC_EA(env
, src1
, OS_BYTE
, 1, &addr
);
1508 gen_logic_cc(s
, src1
);
1509 tcg_gen_ori_i32(dest
, src1
, 0x80);
1510 DEST_EA(env
, insn
, OS_BYTE
, dest
, &addr
);
1520 /* The upper 32 bits of the product are discarded, so
1521 muls.l and mulu.l are functionally equivalent. */
1522 ext
= cpu_lduw_code(env
, s
->pc
);
1525 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
1528 reg
= DREG(ext
, 12);
1529 SRC_EA(env
, src1
, OS_LONG
, 0, NULL
);
1530 dest
= tcg_temp_new();
1531 tcg_gen_mul_i32(dest
, src1
, reg
);
1532 tcg_gen_mov_i32(reg
, dest
);
1533 /* Unlike m68k, coldfire always clears the overflow bit. */
1534 gen_logic_cc(s
, dest
);
1543 offset
= cpu_ldsw_code(env
, s
->pc
);
1545 reg
= AREG(insn
, 0);
1546 tmp
= tcg_temp_new();
1547 tcg_gen_subi_i32(tmp
, QREG_SP
, 4);
1548 gen_store(s
, OS_LONG
, tmp
, reg
);
1549 if ((insn
& 7) != 7)
1550 tcg_gen_mov_i32(reg
, tmp
);
1551 tcg_gen_addi_i32(QREG_SP
, tmp
, offset
);
1560 src
= tcg_temp_new();
1561 reg
= AREG(insn
, 0);
1562 tcg_gen_mov_i32(src
, reg
);
1563 tmp
= gen_load(s
, OS_LONG
, src
, 0);
1564 tcg_gen_mov_i32(reg
, tmp
);
1565 tcg_gen_addi_i32(QREG_SP
, src
, 4);
1576 tmp
= gen_load(s
, OS_LONG
, QREG_SP
, 0);
1577 tcg_gen_addi_i32(QREG_SP
, QREG_SP
, 4);
1585 /* Load the target address first to ensure correct exception
1587 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
1588 if (IS_NULL_QREG(tmp
)) {
1592 if ((insn
& 0x40) == 0) {
1594 gen_push(s
, tcg_const_i32(s
->pc
));
1607 SRC_EA(env
, src1
, OS_LONG
, 0, &addr
);
1608 val
= (insn
>> 9) & 7;
1611 dest
= tcg_temp_new();
1612 tcg_gen_mov_i32(dest
, src1
);
1613 if ((insn
& 0x38) == 0x08) {
1614 /* Don't update condition codes if the destination is an
1615 address register. */
1616 if (insn
& 0x0100) {
1617 tcg_gen_subi_i32(dest
, dest
, val
);
1619 tcg_gen_addi_i32(dest
, dest
, val
);
1622 src2
= tcg_const_i32(val
);
1623 if (insn
& 0x0100) {
1624 gen_helper_xflag_lt(QREG_CC_X
, dest
, src2
);
1625 tcg_gen_subi_i32(dest
, dest
, val
);
1626 s
->cc_op
= CC_OP_SUB
;
1628 tcg_gen_addi_i32(dest
, dest
, val
);
1629 gen_helper_xflag_lt(QREG_CC_X
, dest
, src2
);
1630 s
->cc_op
= CC_OP_ADD
;
1632 gen_update_cc_add(dest
, src2
);
1634 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1640 case 2: /* One extension word. */
1643 case 3: /* Two extension words. */
1646 case 4: /* No extension words. */
1649 disas_undef(env
, s
, insn
);
1661 op
= (insn
>> 8) & 0xf;
1662 offset
= (int8_t)insn
;
1664 offset
= cpu_ldsw_code(env
, s
->pc
);
1666 } else if (offset
== -1) {
1667 offset
= read_im32(env
, s
);
1671 gen_push(s
, tcg_const_i32(s
->pc
));
1676 l1
= gen_new_label();
1677 gen_jmpcc(s
, ((insn
>> 8) & 0xf) ^ 1, l1
);
1678 gen_jmp_tb(s
, 1, base
+ offset
);
1680 gen_jmp_tb(s
, 0, s
->pc
);
1682 /* Unconditional branch. */
1683 gen_jmp_tb(s
, 0, base
+ offset
);
1692 tcg_gen_movi_i32(DREG(insn
, 9), val
);
1693 gen_logic_cc(s
, tcg_const_i32(val
));
1706 SRC_EA(env
, src
, opsize
, (insn
& 0x80) == 0, NULL
);
1707 reg
= DREG(insn
, 9);
1708 tcg_gen_mov_i32(reg
, src
);
1709 gen_logic_cc(s
, src
);
1719 reg
= DREG(insn
, 9);
1720 dest
= tcg_temp_new();
1722 SRC_EA(env
, src
, OS_LONG
, 0, &addr
);
1723 tcg_gen_or_i32(dest
, src
, reg
);
1724 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1726 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1727 tcg_gen_or_i32(dest
, src
, reg
);
1728 tcg_gen_mov_i32(reg
, dest
);
1730 gen_logic_cc(s
, dest
);
1738 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1739 reg
= AREG(insn
, 9);
1740 tcg_gen_sub_i32(reg
, reg
, src
);
1749 reg
= DREG(insn
, 9);
1750 src
= DREG(insn
, 0);
1751 gen_helper_subx_cc(reg
, cpu_env
, reg
, src
);
1759 val
= (insn
>> 9) & 7;
1762 src
= tcg_const_i32(val
);
1763 gen_logic_cc(s
, src
);
1764 DEST_EA(env
, insn
, OS_LONG
, src
, NULL
);
1775 op
= (insn
>> 6) & 3;
1779 s
->cc_op
= CC_OP_CMPB
;
1783 s
->cc_op
= CC_OP_CMPW
;
1787 s
->cc_op
= CC_OP_SUB
;
1792 SRC_EA(env
, src
, opsize
, 1, NULL
);
1793 reg
= DREG(insn
, 9);
1794 dest
= tcg_temp_new();
1795 tcg_gen_sub_i32(dest
, reg
, src
);
1796 gen_update_cc_add(dest
, src
);
1811 SRC_EA(env
, src
, opsize
, 1, NULL
);
1812 reg
= AREG(insn
, 9);
1813 dest
= tcg_temp_new();
1814 tcg_gen_sub_i32(dest
, reg
, src
);
1815 gen_update_cc_add(dest
, src
);
1816 s
->cc_op
= CC_OP_SUB
;
1826 SRC_EA(env
, src
, OS_LONG
, 0, &addr
);
1827 reg
= DREG(insn
, 9);
1828 dest
= tcg_temp_new();
1829 tcg_gen_xor_i32(dest
, src
, reg
);
1830 gen_logic_cc(s
, dest
);
1831 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1841 reg
= DREG(insn
, 9);
1842 dest
= tcg_temp_new();
1844 SRC_EA(env
, src
, OS_LONG
, 0, &addr
);
1845 tcg_gen_and_i32(dest
, src
, reg
);
1846 DEST_EA(env
, insn
, OS_LONG
, dest
, &addr
);
1848 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1849 tcg_gen_and_i32(dest
, src
, reg
);
1850 tcg_gen_mov_i32(reg
, dest
);
1852 gen_logic_cc(s
, dest
);
1860 SRC_EA(env
, src
, OS_LONG
, 0, NULL
);
1861 reg
= AREG(insn
, 9);
1862 tcg_gen_add_i32(reg
, reg
, src
);
1871 reg
= DREG(insn
, 9);
1872 src
= DREG(insn
, 0);
1873 gen_helper_addx_cc(reg
, cpu_env
, reg
, src
);
1874 s
->cc_op
= CC_OP_FLAGS
;
1877 /* TODO: This could be implemented without helper functions. */
1878 DISAS_INSN(shift_im
)
1884 reg
= DREG(insn
, 0);
1885 tmp
= (insn
>> 9) & 7;
1888 shift
= tcg_const_i32(tmp
);
1889 /* No need to flush flags becuse we know we will set C flag. */
1891 gen_helper_shl_cc(reg
, cpu_env
, reg
, shift
);
1894 gen_helper_shr_cc(reg
, cpu_env
, reg
, shift
);
1896 gen_helper_sar_cc(reg
, cpu_env
, reg
, shift
);
1899 s
->cc_op
= CC_OP_SHIFT
;
1902 DISAS_INSN(shift_reg
)
1907 reg
= DREG(insn
, 0);
1908 shift
= DREG(insn
, 9);
1909 /* Shift by zero leaves C flag unmodified. */
1912 gen_helper_shl_cc(reg
, cpu_env
, reg
, shift
);
1915 gen_helper_shr_cc(reg
, cpu_env
, reg
, shift
);
1917 gen_helper_sar_cc(reg
, cpu_env
, reg
, shift
);
1920 s
->cc_op
= CC_OP_SHIFT
;
1926 reg
= DREG(insn
, 0);
1927 gen_logic_cc(s
, reg
);
1928 gen_helper_ff1(reg
, reg
);
1931 static TCGv
gen_get_sr(DisasContext
*s
)
1936 ccr
= gen_get_ccr(s
);
1937 sr
= tcg_temp_new();
1938 tcg_gen_andi_i32(sr
, QREG_SR
, 0xffe0);
1939 tcg_gen_or_i32(sr
, sr
, ccr
);
1949 ext
= cpu_lduw_code(env
, s
->pc
);
1951 if (ext
!= 0x46FC) {
1952 gen_exception(s
, addr
, EXCP_UNSUPPORTED
);
1955 ext
= cpu_lduw_code(env
, s
->pc
);
1957 if (IS_USER(s
) || (ext
& SR_S
) == 0) {
1958 gen_exception(s
, addr
, EXCP_PRIVILEGE
);
1961 gen_push(s
, gen_get_sr(s
));
1962 gen_set_sr_im(s
, ext
, 0);
1965 DISAS_INSN(move_from_sr
)
1971 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1975 reg
= DREG(insn
, 0);
1976 gen_partset_reg(OS_WORD
, reg
, sr
);
1979 DISAS_INSN(move_to_sr
)
1982 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1985 gen_set_sr(env
, s
, insn
, 0);
1989 DISAS_INSN(move_from_usp
)
1992 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1995 /* TODO: Implement USP. */
1996 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1999 DISAS_INSN(move_to_usp
)
2002 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2005 /* TODO: Implement USP. */
2006 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
2011 gen_exception(s
, s
->pc
, EXCP_HALT_INSN
);
2019 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2023 ext
= cpu_lduw_code(env
, s
->pc
);
2026 gen_set_sr_im(s
, ext
, 0);
2027 tcg_gen_movi_i32(QREG_HALTED
, 1);
2028 gen_exception(s
, s
->pc
, EXCP_HLT
);
2034 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2037 gen_exception(s
, s
->pc
- 2, EXCP_RTE
);
2046 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2050 ext
= cpu_lduw_code(env
, s
->pc
);
2054 reg
= AREG(ext
, 12);
2056 reg
= DREG(ext
, 12);
2058 gen_helper_movec(cpu_env
, tcg_const_i32(ext
& 0xfff), reg
);
2065 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2068 /* ICache fetch. Implement as no-op. */
2074 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2077 /* Cache push/invalidate. Implement as no-op. */
2082 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2088 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
2091 /* TODO: Implement wdebug. */
2092 qemu_assert(0, "WDEBUG not implemented");
2097 gen_exception(s
, s
->pc
- 2, EXCP_TRAP0
+ (insn
& 0xf));
2100 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2101 immediately before the next FP instruction is executed. */
2115 ext
= cpu_lduw_code(env
, s
->pc
);
2117 opmode
= ext
& 0x7f;
2118 switch ((ext
>> 13) & 7) {
2123 case 3: /* fmove out */
2125 tmp32
= tcg_temp_new_i32();
2127 /* ??? TODO: Proper behavior on overflow. */
2128 switch ((ext
>> 10) & 7) {
2131 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2135 gen_helper_f64_to_f32(tmp32
, cpu_env
, src
);
2139 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2141 case 5: /* OS_DOUBLE */
2142 tcg_gen_mov_i32(tmp32
, AREG(insn
, 0));
2143 switch ((insn
>> 3) & 7) {
2148 tcg_gen_addi_i32(tmp32
, tmp32
, -8);
2151 offset
= cpu_ldsw_code(env
, s
->pc
);
2153 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2158 gen_store64(s
, tmp32
, src
);
2159 switch ((insn
>> 3) & 7) {
2161 tcg_gen_addi_i32(tmp32
, tmp32
, 8);
2162 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2165 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2168 tcg_temp_free_i32(tmp32
);
2172 gen_helper_f64_to_i32(tmp32
, cpu_env
, src
);
2177 DEST_EA(env
, insn
, opsize
, tmp32
, NULL
);
2178 tcg_temp_free_i32(tmp32
);
2180 case 4: /* fmove to control register. */
2181 switch ((ext
>> 10) & 7) {
2183 /* Not implemented. Ignore writes. */
2188 cpu_abort(NULL
, "Unimplemented: fmove to control %d",
2192 case 5: /* fmove from control register. */
2193 switch ((ext
>> 10) & 7) {
2195 /* Not implemented. Always return zero. */
2196 tmp32
= tcg_const_i32(0);
2201 cpu_abort(NULL
, "Unimplemented: fmove from control %d",
2205 DEST_EA(env
, insn
, OS_LONG
, tmp32
, NULL
);
2207 case 6: /* fmovem */
2213 if ((ext
& 0x1f00) != 0x1000 || (ext
& 0xff) == 0)
2215 tmp32
= gen_lea(env
, s
, insn
, OS_LONG
);
2216 if (IS_NULL_QREG(tmp32
)) {
2220 addr
= tcg_temp_new_i32();
2221 tcg_gen_mov_i32(addr
, tmp32
);
2223 for (i
= 0; i
< 8; i
++) {
2227 if (ext
& (1 << 13)) {
2229 tcg_gen_qemu_stf64(dest
, addr
, IS_USER(s
));
2232 tcg_gen_qemu_ldf64(dest
, addr
, IS_USER(s
));
2234 if (ext
& (mask
- 1))
2235 tcg_gen_addi_i32(addr
, addr
, 8);
2239 tcg_temp_free_i32(addr
);
2243 if (ext
& (1 << 14)) {
2244 /* Source effective address. */
2245 switch ((ext
>> 10) & 7) {
2246 case 0: opsize
= OS_LONG
; break;
2247 case 1: opsize
= OS_SINGLE
; break;
2248 case 4: opsize
= OS_WORD
; break;
2249 case 5: opsize
= OS_DOUBLE
; break;
2250 case 6: opsize
= OS_BYTE
; break;
2254 if (opsize
== OS_DOUBLE
) {
2255 tmp32
= tcg_temp_new_i32();
2256 tcg_gen_mov_i32(tmp32
, AREG(insn
, 0));
2257 switch ((insn
>> 3) & 7) {
2262 tcg_gen_addi_i32(tmp32
, tmp32
, -8);
2265 offset
= cpu_ldsw_code(env
, s
->pc
);
2267 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2270 offset
= cpu_ldsw_code(env
, s
->pc
);
2271 offset
+= s
->pc
- 2;
2273 tcg_gen_addi_i32(tmp32
, tmp32
, offset
);
2278 src
= gen_load64(s
, tmp32
);
2279 switch ((insn
>> 3) & 7) {
2281 tcg_gen_addi_i32(tmp32
, tmp32
, 8);
2282 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2285 tcg_gen_mov_i32(AREG(insn
, 0), tmp32
);
2288 tcg_temp_free_i32(tmp32
);
2290 SRC_EA(env
, tmp32
, opsize
, 1, NULL
);
2291 src
= tcg_temp_new_i64();
2296 gen_helper_i32_to_f64(src
, cpu_env
, tmp32
);
2299 gen_helper_f32_to_f64(src
, cpu_env
, tmp32
);
2304 /* Source register. */
2305 src
= FREG(ext
, 10);
2307 dest
= FREG(ext
, 7);
2308 res
= tcg_temp_new_i64();
2310 tcg_gen_mov_f64(res
, dest
);
2314 case 0: case 0x40: case 0x44: /* fmove */
2315 tcg_gen_mov_f64(res
, src
);
2318 gen_helper_iround_f64(res
, cpu_env
, src
);
2321 case 3: /* fintrz */
2322 gen_helper_itrunc_f64(res
, cpu_env
, src
);
2325 case 4: case 0x41: case 0x45: /* fsqrt */
2326 gen_helper_sqrt_f64(res
, cpu_env
, src
);
2328 case 0x18: case 0x58: case 0x5c: /* fabs */
2329 gen_helper_abs_f64(res
, src
);
2331 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2332 gen_helper_chs_f64(res
, src
);
2334 case 0x20: case 0x60: case 0x64: /* fdiv */
2335 gen_helper_div_f64(res
, cpu_env
, res
, src
);
2337 case 0x22: case 0x62: case 0x66: /* fadd */
2338 gen_helper_add_f64(res
, cpu_env
, res
, src
);
2340 case 0x23: case 0x63: case 0x67: /* fmul */
2341 gen_helper_mul_f64(res
, cpu_env
, res
, src
);
2343 case 0x28: case 0x68: case 0x6c: /* fsub */
2344 gen_helper_sub_f64(res
, cpu_env
, res
, src
);
2346 case 0x38: /* fcmp */
2347 gen_helper_sub_cmp_f64(res
, cpu_env
, res
, src
);
2351 case 0x3a: /* ftst */
2352 tcg_gen_mov_f64(res
, src
);
2359 if (ext
& (1 << 14)) {
2360 tcg_temp_free_i64(src
);
2363 if (opmode
& 0x40) {
2364 if ((opmode
& 0x4) != 0)
2366 } else if ((s
->fpcr
& M68K_FPCR_PREC
) == 0) {
2371 TCGv tmp
= tcg_temp_new_i32();
2372 gen_helper_f64_to_f32(tmp
, cpu_env
, res
);
2373 gen_helper_f32_to_f64(res
, cpu_env
, tmp
);
2374 tcg_temp_free_i32(tmp
);
2376 tcg_gen_mov_f64(QREG_FP_RESULT
, res
);
2378 tcg_gen_mov_f64(dest
, res
);
2380 tcg_temp_free_i64(res
);
2383 /* FIXME: Is this right for offset addressing modes? */
2385 disas_undef_fpu(env
, s
, insn
);
2396 offset
= cpu_ldsw_code(env
, s
->pc
);
2398 if (insn
& (1 << 6)) {
2399 offset
= (offset
<< 16) | cpu_lduw_code(env
, s
->pc
);
2403 l1
= gen_new_label();
2404 /* TODO: Raise BSUN exception. */
2405 flag
= tcg_temp_new();
2406 gen_helper_compare_f64(flag
, cpu_env
, QREG_FP_RESULT
);
2407 /* Jump to l1 if condition is true. */
2408 switch (insn
& 0xf) {
2411 case 1: /* eq (=0) */
2412 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(0), l1
);
2414 case 2: /* ogt (=1) */
2415 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(1), l1
);
2417 case 3: /* oge (=0 or =1) */
2418 tcg_gen_brcond_i32(TCG_COND_LEU
, flag
, tcg_const_i32(1), l1
);
2420 case 4: /* olt (=-1) */
2421 tcg_gen_brcond_i32(TCG_COND_LT
, flag
, tcg_const_i32(0), l1
);
2423 case 5: /* ole (=-1 or =0) */
2424 tcg_gen_brcond_i32(TCG_COND_LE
, flag
, tcg_const_i32(0), l1
);
2426 case 6: /* ogl (=-1 or =1) */
2427 tcg_gen_andi_i32(flag
, flag
, 1);
2428 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(0), l1
);
2430 case 7: /* or (=2) */
2431 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(2), l1
);
2433 case 8: /* un (<2) */
2434 tcg_gen_brcond_i32(TCG_COND_LT
, flag
, tcg_const_i32(2), l1
);
2436 case 9: /* ueq (=0 or =2) */
2437 tcg_gen_andi_i32(flag
, flag
, 1);
2438 tcg_gen_brcond_i32(TCG_COND_EQ
, flag
, tcg_const_i32(0), l1
);
2440 case 10: /* ugt (>0) */
2441 tcg_gen_brcond_i32(TCG_COND_GT
, flag
, tcg_const_i32(0), l1
);
2443 case 11: /* uge (>=0) */
2444 tcg_gen_brcond_i32(TCG_COND_GE
, flag
, tcg_const_i32(0), l1
);
2446 case 12: /* ult (=-1 or =2) */
2447 tcg_gen_brcond_i32(TCG_COND_GEU
, flag
, tcg_const_i32(2), l1
);
2449 case 13: /* ule (!=1) */
2450 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(1), l1
);
2452 case 14: /* ne (!=0) */
2453 tcg_gen_brcond_i32(TCG_COND_NE
, flag
, tcg_const_i32(0), l1
);
2459 gen_jmp_tb(s
, 0, s
->pc
);
2461 gen_jmp_tb(s
, 1, addr
+ offset
);
2464 DISAS_INSN(frestore
)
2466 /* TODO: Implement frestore. */
2467 qemu_assert(0, "FRESTORE not implemented");
2472 /* TODO: Implement fsave. */
2473 qemu_assert(0, "FSAVE not implemented");
2476 static inline TCGv
gen_mac_extract_word(DisasContext
*s
, TCGv val
, int upper
)
2478 TCGv tmp
= tcg_temp_new();
2479 if (s
->env
->macsr
& MACSR_FI
) {
2481 tcg_gen_andi_i32(tmp
, val
, 0xffff0000);
2483 tcg_gen_shli_i32(tmp
, val
, 16);
2484 } else if (s
->env
->macsr
& MACSR_SU
) {
2486 tcg_gen_sari_i32(tmp
, val
, 16);
2488 tcg_gen_ext16s_i32(tmp
, val
);
2491 tcg_gen_shri_i32(tmp
, val
, 16);
2493 tcg_gen_ext16u_i32(tmp
, val
);
2498 static void gen_mac_clear_flags(void)
2500 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
,
2501 ~(MACSR_V
| MACSR_Z
| MACSR_N
| MACSR_EV
));
2517 s
->mactmp
= tcg_temp_new_i64();
2521 ext
= cpu_lduw_code(env
, s
->pc
);
2524 acc
= ((insn
>> 7) & 1) | ((ext
>> 3) & 2);
2525 dual
= ((insn
& 0x30) != 0 && (ext
& 3) != 0);
2526 if (dual
&& !m68k_feature(s
->env
, M68K_FEATURE_CF_EMAC_B
)) {
2527 disas_undef(env
, s
, insn
);
2531 /* MAC with load. */
2532 tmp
= gen_lea(env
, s
, insn
, OS_LONG
);
2533 addr
= tcg_temp_new();
2534 tcg_gen_and_i32(addr
, tmp
, QREG_MAC_MASK
);
2535 /* Load the value now to ensure correct exception behavior.
2536 Perform writeback after reading the MAC inputs. */
2537 loadval
= gen_load(s
, OS_LONG
, addr
, 0);
2540 rx
= (ext
& 0x8000) ? AREG(ext
, 12) : DREG(insn
, 12);
2541 ry
= (ext
& 8) ? AREG(ext
, 0) : DREG(ext
, 0);
2543 loadval
= addr
= NULL_QREG
;
2544 rx
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2545 ry
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2548 gen_mac_clear_flags();
2551 /* Disabled because conditional branches clobber temporary vars. */
2552 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && !dual
) {
2553 /* Skip the multiply if we know we will ignore it. */
2554 l1
= gen_new_label();
2555 tmp
= tcg_temp_new();
2556 tcg_gen_andi_i32(tmp
, QREG_MACSR
, 1 << (acc
+ 8));
2557 gen_op_jmp_nz32(tmp
, l1
);
2561 if ((ext
& 0x0800) == 0) {
2563 rx
= gen_mac_extract_word(s
, rx
, (ext
& 0x80) != 0);
2564 ry
= gen_mac_extract_word(s
, ry
, (ext
& 0x40) != 0);
2566 if (s
->env
->macsr
& MACSR_FI
) {
2567 gen_helper_macmulf(s
->mactmp
, cpu_env
, rx
, ry
);
2569 if (s
->env
->macsr
& MACSR_SU
)
2570 gen_helper_macmuls(s
->mactmp
, cpu_env
, rx
, ry
);
2572 gen_helper_macmulu(s
->mactmp
, cpu_env
, rx
, ry
);
2573 switch ((ext
>> 9) & 3) {
2575 tcg_gen_shli_i64(s
->mactmp
, s
->mactmp
, 1);
2578 tcg_gen_shri_i64(s
->mactmp
, s
->mactmp
, 1);
2584 /* Save the overflow flag from the multiply. */
2585 saved_flags
= tcg_temp_new();
2586 tcg_gen_mov_i32(saved_flags
, QREG_MACSR
);
2588 saved_flags
= NULL_QREG
;
2592 /* Disabled because conditional branches clobber temporary vars. */
2593 if ((s
->env
->macsr
& MACSR_OMC
) != 0 && dual
) {
2594 /* Skip the accumulate if the value is already saturated. */
2595 l1
= gen_new_label();
2596 tmp
= tcg_temp_new();
2597 gen_op_and32(tmp
, QREG_MACSR
, tcg_const_i32(MACSR_PAV0
<< acc
));
2598 gen_op_jmp_nz32(tmp
, l1
);
2603 tcg_gen_sub_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2605 tcg_gen_add_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2607 if (s
->env
->macsr
& MACSR_FI
)
2608 gen_helper_macsatf(cpu_env
, tcg_const_i32(acc
));
2609 else if (s
->env
->macsr
& MACSR_SU
)
2610 gen_helper_macsats(cpu_env
, tcg_const_i32(acc
));
2612 gen_helper_macsatu(cpu_env
, tcg_const_i32(acc
));
2615 /* Disabled because conditional branches clobber temporary vars. */
2621 /* Dual accumulate variant. */
2622 acc
= (ext
>> 2) & 3;
2623 /* Restore the overflow flag from the multiplier. */
2624 tcg_gen_mov_i32(QREG_MACSR
, saved_flags
);
2626 /* Disabled because conditional branches clobber temporary vars. */
2627 if ((s
->env
->macsr
& MACSR_OMC
) != 0) {
2628 /* Skip the accumulate if the value is already saturated. */
2629 l1
= gen_new_label();
2630 tmp
= tcg_temp_new();
2631 gen_op_and32(tmp
, QREG_MACSR
, tcg_const_i32(MACSR_PAV0
<< acc
));
2632 gen_op_jmp_nz32(tmp
, l1
);
2636 tcg_gen_sub_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2638 tcg_gen_add_i64(MACREG(acc
), MACREG(acc
), s
->mactmp
);
2639 if (s
->env
->macsr
& MACSR_FI
)
2640 gen_helper_macsatf(cpu_env
, tcg_const_i32(acc
));
2641 else if (s
->env
->macsr
& MACSR_SU
)
2642 gen_helper_macsats(cpu_env
, tcg_const_i32(acc
));
2644 gen_helper_macsatu(cpu_env
, tcg_const_i32(acc
));
2646 /* Disabled because conditional branches clobber temporary vars. */
2651 gen_helper_mac_set_flags(cpu_env
, tcg_const_i32(acc
));
2655 rw
= (insn
& 0x40) ? AREG(insn
, 9) : DREG(insn
, 9);
2656 tcg_gen_mov_i32(rw
, loadval
);
2657 /* FIXME: Should address writeback happen with the masked or
2659 switch ((insn
>> 3) & 7) {
2660 case 3: /* Post-increment. */
2661 tcg_gen_addi_i32(AREG(insn
, 0), addr
, 4);
2663 case 4: /* Pre-decrement. */
2664 tcg_gen_mov_i32(AREG(insn
, 0), addr
);
2669 DISAS_INSN(from_mac
)
2675 rx
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2676 accnum
= (insn
>> 9) & 3;
2677 acc
= MACREG(accnum
);
2678 if (s
->env
->macsr
& MACSR_FI
) {
2679 gen_helper_get_macf(rx
, cpu_env
, acc
);
2680 } else if ((s
->env
->macsr
& MACSR_OMC
) == 0) {
2681 tcg_gen_trunc_i64_i32(rx
, acc
);
2682 } else if (s
->env
->macsr
& MACSR_SU
) {
2683 gen_helper_get_macs(rx
, acc
);
2685 gen_helper_get_macu(rx
, acc
);
2688 tcg_gen_movi_i64(acc
, 0);
2689 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
, ~(MACSR_PAV0
<< accnum
));
2693 DISAS_INSN(move_mac
)
2695 /* FIXME: This can be done without a helper. */
2699 dest
= tcg_const_i32((insn
>> 9) & 3);
2700 gen_helper_mac_move(cpu_env
, dest
, tcg_const_i32(src
));
2701 gen_mac_clear_flags();
2702 gen_helper_mac_set_flags(cpu_env
, dest
);
2705 DISAS_INSN(from_macsr
)
2709 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2710 tcg_gen_mov_i32(reg
, QREG_MACSR
);
2713 DISAS_INSN(from_mask
)
2716 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2717 tcg_gen_mov_i32(reg
, QREG_MAC_MASK
);
2720 DISAS_INSN(from_mext
)
2724 reg
= (insn
& 8) ? AREG(insn
, 0) : DREG(insn
, 0);
2725 acc
= tcg_const_i32((insn
& 0x400) ? 2 : 0);
2726 if (s
->env
->macsr
& MACSR_FI
)
2727 gen_helper_get_mac_extf(reg
, cpu_env
, acc
);
2729 gen_helper_get_mac_exti(reg
, cpu_env
, acc
);
2732 DISAS_INSN(macsr_to_ccr
)
2734 tcg_gen_movi_i32(QREG_CC_X
, 0);
2735 tcg_gen_andi_i32(QREG_CC_DEST
, QREG_MACSR
, 0xf);
2736 s
->cc_op
= CC_OP_FLAGS
;
2744 accnum
= (insn
>> 9) & 3;
2745 acc
= MACREG(accnum
);
2746 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2747 if (s
->env
->macsr
& MACSR_FI
) {
2748 tcg_gen_ext_i32_i64(acc
, val
);
2749 tcg_gen_shli_i64(acc
, acc
, 8);
2750 } else if (s
->env
->macsr
& MACSR_SU
) {
2751 tcg_gen_ext_i32_i64(acc
, val
);
2753 tcg_gen_extu_i32_i64(acc
, val
);
2755 tcg_gen_andi_i32(QREG_MACSR
, QREG_MACSR
, ~(MACSR_PAV0
<< accnum
));
2756 gen_mac_clear_flags();
2757 gen_helper_mac_set_flags(cpu_env
, tcg_const_i32(accnum
));
2760 DISAS_INSN(to_macsr
)
2763 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2764 gen_helper_set_macsr(cpu_env
, val
);
2771 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2772 tcg_gen_ori_i32(QREG_MAC_MASK
, val
, 0xffff0000);
2779 SRC_EA(env
, val
, OS_LONG
, 0, NULL
);
2780 acc
= tcg_const_i32((insn
& 0x400) ? 2 : 0);
2781 if (s
->env
->macsr
& MACSR_FI
)
2782 gen_helper_set_mac_extf(cpu_env
, val
, acc
);
2783 else if (s
->env
->macsr
& MACSR_SU
)
2784 gen_helper_set_mac_exts(cpu_env
, val
, acc
);
2786 gen_helper_set_mac_extu(cpu_env
, val
, acc
);
2789 static disas_proc opcode_table
[65536];
2792 register_opcode (disas_proc proc
, uint16_t opcode
, uint16_t mask
)
2798 /* Sanity check. All set bits must be included in the mask. */
2799 if (opcode
& ~mask
) {
2801 "qemu internal error: bogus opcode definition %04x/%04x\n",
2805 /* This could probably be cleverer. For now just optimize the case where
2806 the top bits are known. */
2807 /* Find the first zero bit in the mask. */
2809 while ((i
& mask
) != 0)
2811 /* Iterate over all combinations of this and lower bits. */
2816 from
= opcode
& ~(i
- 1);
2818 for (i
= from
; i
< to
; i
++) {
2819 if ((i
& mask
) == opcode
)
2820 opcode_table
[i
] = proc
;
2824 /* Register m68k opcode handlers. Order is important.
2825 Later insn override earlier ones. */
2826 void register_m68k_insns (CPUM68KState
*env
)
2828 #define INSN(name, opcode, mask, feature) do { \
2829 if (m68k_feature(env, M68K_FEATURE_##feature)) \
2830 register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2832 INSN(undef
, 0000, 0000, CF_ISA_A
);
2833 INSN(arith_im
, 0080, fff8
, CF_ISA_A
);
2834 INSN(bitrev
, 00c0
, fff8
, CF_ISA_APLUSC
);
2835 INSN(bitop_reg
, 0100, f1c0
, CF_ISA_A
);
2836 INSN(bitop_reg
, 0140, f1c0
, CF_ISA_A
);
2837 INSN(bitop_reg
, 0180, f1c0
, CF_ISA_A
);
2838 INSN(bitop_reg
, 01c0
, f1c0
, CF_ISA_A
);
2839 INSN(arith_im
, 0280, fff8
, CF_ISA_A
);
2840 INSN(byterev
, 02c0
, fff8
, CF_ISA_APLUSC
);
2841 INSN(arith_im
, 0480, fff8
, CF_ISA_A
);
2842 INSN(ff1
, 04c0
, fff8
, CF_ISA_APLUSC
);
2843 INSN(arith_im
, 0680, fff8
, CF_ISA_A
);
2844 INSN(bitop_im
, 0800, ffc0
, CF_ISA_A
);
2845 INSN(bitop_im
, 0840, ffc0
, CF_ISA_A
);
2846 INSN(bitop_im
, 0880, ffc0
, CF_ISA_A
);
2847 INSN(bitop_im
, 08c0
, ffc0
, CF_ISA_A
);
2848 INSN(arith_im
, 0a80
, fff8
, CF_ISA_A
);
2849 INSN(arith_im
, 0c00
, ff38
, CF_ISA_A
);
2850 INSN(move
, 1000, f000
, CF_ISA_A
);
2851 INSN(move
, 2000, f000
, CF_ISA_A
);
2852 INSN(move
, 3000, f000
, CF_ISA_A
);
2853 INSN(strldsr
, 40e7
, ffff
, CF_ISA_APLUSC
);
2854 INSN(negx
, 4080, fff8
, CF_ISA_A
);
2855 INSN(move_from_sr
, 40c0
, fff8
, CF_ISA_A
);
2856 INSN(lea
, 41c0
, f1c0
, CF_ISA_A
);
2857 INSN(clr
, 4200, ff00
, CF_ISA_A
);
2858 INSN(undef
, 42c0
, ffc0
, CF_ISA_A
);
2859 INSN(move_from_ccr
, 42c0
, fff8
, CF_ISA_A
);
2860 INSN(neg
, 4480, fff8
, CF_ISA_A
);
2861 INSN(move_to_ccr
, 44c0
, ffc0
, CF_ISA_A
);
2862 INSN(not, 4680, fff8
, CF_ISA_A
);
2863 INSN(move_to_sr
, 46c0
, ffc0
, CF_ISA_A
);
2864 INSN(pea
, 4840, ffc0
, CF_ISA_A
);
2865 INSN(swap
, 4840, fff8
, CF_ISA_A
);
2866 INSN(movem
, 48c0
, fbc0
, CF_ISA_A
);
2867 INSN(ext
, 4880, fff8
, CF_ISA_A
);
2868 INSN(ext
, 48c0
, fff8
, CF_ISA_A
);
2869 INSN(ext
, 49c0
, fff8
, CF_ISA_A
);
2870 INSN(tst
, 4a00
, ff00
, CF_ISA_A
);
2871 INSN(tas
, 4ac0
, ffc0
, CF_ISA_B
);
2872 INSN(halt
, 4ac8
, ffff
, CF_ISA_A
);
2873 INSN(pulse
, 4acc
, ffff
, CF_ISA_A
);
2874 INSN(illegal
, 4afc
, ffff
, CF_ISA_A
);
2875 INSN(mull
, 4c00
, ffc0
, CF_ISA_A
);
2876 INSN(divl
, 4c40
, ffc0
, CF_ISA_A
);
2877 INSN(sats
, 4c80
, fff8
, CF_ISA_B
);
2878 INSN(trap
, 4e40
, fff0
, CF_ISA_A
);
2879 INSN(link
, 4e50
, fff8
, CF_ISA_A
);
2880 INSN(unlk
, 4e58
, fff8
, CF_ISA_A
);
2881 INSN(move_to_usp
, 4e60
, fff8
, USP
);
2882 INSN(move_from_usp
, 4e68
, fff8
, USP
);
2883 INSN(nop
, 4e71
, ffff
, CF_ISA_A
);
2884 INSN(stop
, 4e72
, ffff
, CF_ISA_A
);
2885 INSN(rte
, 4e73
, ffff
, CF_ISA_A
);
2886 INSN(rts
, 4e75
, ffff
, CF_ISA_A
);
2887 INSN(movec
, 4e7b
, ffff
, CF_ISA_A
);
2888 INSN(jump
, 4e80
, ffc0
, CF_ISA_A
);
2889 INSN(jump
, 4ec0
, ffc0
, CF_ISA_A
);
2890 INSN(addsubq
, 5180, f1c0
, CF_ISA_A
);
2891 INSN(scc
, 50c0
, f0f8
, CF_ISA_A
);
2892 INSN(addsubq
, 5080, f1c0
, CF_ISA_A
);
2893 INSN(tpf
, 51f8
, fff8
, CF_ISA_A
);
2895 /* Branch instructions. */
2896 INSN(branch
, 6000, f000
, CF_ISA_A
);
2897 /* Disable long branch instructions, then add back the ones we want. */
2898 INSN(undef
, 60ff
, f0ff
, CF_ISA_A
); /* All long branches. */
2899 INSN(branch
, 60ff
, f0ff
, CF_ISA_B
);
2900 INSN(undef
, 60ff
, ffff
, CF_ISA_B
); /* bra.l */
2901 INSN(branch
, 60ff
, ffff
, BRAL
);
2903 INSN(moveq
, 7000, f100
, CF_ISA_A
);
2904 INSN(mvzs
, 7100, f100
, CF_ISA_B
);
2905 INSN(or, 8000, f000
, CF_ISA_A
);
2906 INSN(divw
, 80c0
, f0c0
, CF_ISA_A
);
2907 INSN(addsub
, 9000, f000
, CF_ISA_A
);
2908 INSN(subx
, 9180, f1f8
, CF_ISA_A
);
2909 INSN(suba
, 91c0
, f1c0
, CF_ISA_A
);
2911 INSN(undef_mac
, a000
, f000
, CF_ISA_A
);
2912 INSN(mac
, a000
, f100
, CF_EMAC
);
2913 INSN(from_mac
, a180
, f9b0
, CF_EMAC
);
2914 INSN(move_mac
, a110
, f9fc
, CF_EMAC
);
2915 INSN(from_macsr
,a980
, f9f0
, CF_EMAC
);
2916 INSN(from_mask
, ad80
, fff0
, CF_EMAC
);
2917 INSN(from_mext
, ab80
, fbf0
, CF_EMAC
);
2918 INSN(macsr_to_ccr
, a9c0
, ffff
, CF_EMAC
);
2919 INSN(to_mac
, a100
, f9c0
, CF_EMAC
);
2920 INSN(to_macsr
, a900
, ffc0
, CF_EMAC
);
2921 INSN(to_mext
, ab00
, fbc0
, CF_EMAC
);
2922 INSN(to_mask
, ad00
, ffc0
, CF_EMAC
);
2924 INSN(mov3q
, a140
, f1c0
, CF_ISA_B
);
2925 INSN(cmp
, b000
, f1c0
, CF_ISA_B
); /* cmp.b */
2926 INSN(cmp
, b040
, f1c0
, CF_ISA_B
); /* cmp.w */
2927 INSN(cmpa
, b0c0
, f1c0
, CF_ISA_B
); /* cmpa.w */
2928 INSN(cmp
, b080
, f1c0
, CF_ISA_A
);
2929 INSN(cmpa
, b1c0
, f1c0
, CF_ISA_A
);
2930 INSN(eor
, b180
, f1c0
, CF_ISA_A
);
2931 INSN(and, c000
, f000
, CF_ISA_A
);
2932 INSN(mulw
, c0c0
, f0c0
, CF_ISA_A
);
2933 INSN(addsub
, d000
, f000
, CF_ISA_A
);
2934 INSN(addx
, d180
, f1f8
, CF_ISA_A
);
2935 INSN(adda
, d1c0
, f1c0
, CF_ISA_A
);
2936 INSN(shift_im
, e080
, f0f0
, CF_ISA_A
);
2937 INSN(shift_reg
, e0a0
, f0f0
, CF_ISA_A
);
2938 INSN(undef_fpu
, f000
, f000
, CF_ISA_A
);
2939 INSN(fpu
, f200
, ffc0
, CF_FPU
);
2940 INSN(fbcc
, f280
, ffc0
, CF_FPU
);
2941 INSN(frestore
, f340
, ffc0
, CF_FPU
);
2942 INSN(fsave
, f340
, ffc0
, CF_FPU
);
2943 INSN(intouch
, f340
, ffc0
, CF_ISA_A
);
2944 INSN(cpushl
, f428
, ff38
, CF_ISA_A
);
2945 INSN(wddata
, fb00
, ff00
, CF_ISA_A
);
2946 INSN(wdebug
, fbc0
, ffc0
, CF_ISA_A
);
2950 /* ??? Some of this implementation is not exception safe. We should always
2951 write back the result to memory before setting the condition codes. */
2952 static void disas_m68k_insn(CPUM68KState
* env
, DisasContext
*s
)
2956 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2957 tcg_gen_debug_insn_start(s
->pc
);
2960 insn
= cpu_lduw_code(env
, s
->pc
);
2963 opcode_table
[insn
](env
, s
, insn
);
2966 /* generate intermediate code for basic block 'tb'. */
2968 gen_intermediate_code_internal(CPUM68KState
*env
, TranslationBlock
*tb
,
2971 DisasContext dc1
, *dc
= &dc1
;
2972 uint16_t *gen_opc_end
;
2975 target_ulong pc_start
;
2980 /* generate intermediate code */
2985 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
2988 dc
->is_jmp
= DISAS_NEXT
;
2990 dc
->cc_op
= CC_OP_DYNAMIC
;
2991 dc
->singlestep_enabled
= env
->singlestep_enabled
;
2992 dc
->fpcr
= env
->fpcr
;
2993 dc
->user
= (env
->sr
& SR_S
) == 0;
2998 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3000 max_insns
= CF_COUNT_MASK
;
3004 pc_offset
= dc
->pc
- pc_start
;
3005 gen_throws_exception
= NULL
;
3006 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3007 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3008 if (bp
->pc
== dc
->pc
) {
3009 gen_exception(dc
, dc
->pc
, EXCP_DEBUG
);
3010 dc
->is_jmp
= DISAS_JUMP
;
3018 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3022 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3024 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
3025 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
3026 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
3028 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3030 dc
->insn_pc
= dc
->pc
;
3031 disas_m68k_insn(env
, dc
);
3033 } while (!dc
->is_jmp
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
&&
3034 !env
->singlestep_enabled
&&
3036 (pc_offset
) < (TARGET_PAGE_SIZE
- 32) &&
3037 num_insns
< max_insns
);
3039 if (tb
->cflags
& CF_LAST_IO
)
3041 if (unlikely(env
->singlestep_enabled
)) {
3042 /* Make sure the pc is updated, and raise a debug exception. */
3044 gen_flush_cc_op(dc
);
3045 tcg_gen_movi_i32(QREG_PC
, dc
->pc
);
3047 gen_helper_raise_exception(cpu_env
, tcg_const_i32(EXCP_DEBUG
));
3049 switch(dc
->is_jmp
) {
3051 gen_flush_cc_op(dc
);
3052 gen_jmp_tb(dc
, 0, dc
->pc
);
3057 gen_flush_cc_op(dc
);
3058 /* indicate that the hash table must be used to find the next TB */
3062 /* nothing more to generate */
3066 gen_icount_end(tb
, num_insns
);
3067 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
3070 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3071 qemu_log("----------------\n");
3072 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3073 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
, 0);
3078 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3081 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3083 tb
->size
= dc
->pc
- pc_start
;
3084 tb
->icount
= num_insns
;
3088 //expand_target_qops();
3091 void gen_intermediate_code(CPUM68KState
*env
, TranslationBlock
*tb
)
3093 gen_intermediate_code_internal(env
, tb
, 0);
3096 void gen_intermediate_code_pc(CPUM68KState
*env
, TranslationBlock
*tb
)
3098 gen_intermediate_code_internal(env
, tb
, 1);
3101 void cpu_dump_state(CPUM68KState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
3107 for (i
= 0; i
< 8; i
++)
3109 u
.d
= env
->fregs
[i
];
3110 cpu_fprintf (f
, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3111 i
, env
->dregs
[i
], i
, env
->aregs
[i
],
3112 i
, u
.l
.upper
, u
.l
.lower
, *(double *)&u
.d
);
3114 cpu_fprintf (f
, "PC = %08x ", env
->pc
);
3116 cpu_fprintf (f
, "SR = %04x %c%c%c%c%c ", sr
, (sr
& 0x10) ? 'X' : '-',
3117 (sr
& CCF_N
) ? 'N' : '-', (sr
& CCF_Z
) ? 'Z' : '-',
3118 (sr
& CCF_V
) ? 'V' : '-', (sr
& CCF_C
) ? 'C' : '-');
3119 cpu_fprintf (f
, "FPRESULT = %12g\n", *(double *)&env
->fp_result
);
3122 void restore_state_to_opc(CPUM68KState
*env
, TranslationBlock
*tb
, int pc_pos
)
3124 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];