memory: unify loops to sync dirty log bitmap
[qemu/ar7.git] / target / m68k / translate.c
blob34db97b8a04063da016988d036fadf9303addc99
1 /*
2 * m68k translation
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
33 #include "trace-tcg.h"
34 #include "exec/log.h"
36 //#define DEBUG_DISPATCH 1
38 #define DEFO32(name, offset) static TCGv QREG_##name;
39 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
40 #include "qregs.def"
41 #undef DEFO32
42 #undef DEFO64
44 static TCGv_i32 cpu_halted;
45 static TCGv_i32 cpu_exception_index;
47 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
48 static TCGv cpu_dregs[8];
49 static TCGv cpu_aregs[8];
50 static TCGv_i64 cpu_macc[4];
52 #define REG(insn, pos) (((insn) >> (pos)) & 7)
53 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
54 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
55 #define MACREG(acc) cpu_macc[acc]
56 #define QREG_SP get_areg(s, 7)
58 static TCGv NULL_QREG;
59 #define IS_NULL_QREG(t) (t == NULL_QREG)
60 /* Used to distinguish stores from bad addressing modes. */
61 static TCGv store_dummy;
63 #include "exec/gen-icount.h"
65 void m68k_tcg_init(void)
67 char *p;
68 int i;
70 #define DEFO32(name, offset) \
71 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
72 offsetof(CPUM68KState, offset), #name);
73 #define DEFO64(name, offset) \
74 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
75 offsetof(CPUM68KState, offset), #name);
76 #include "qregs.def"
77 #undef DEFO32
78 #undef DEFO64
80 cpu_halted = tcg_global_mem_new_i32(cpu_env,
81 -offsetof(M68kCPU, env) +
82 offsetof(CPUState, halted), "HALTED");
83 cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
84 -offsetof(M68kCPU, env) +
85 offsetof(CPUState, exception_index),
86 "EXCEPTION");
88 p = cpu_reg_names;
89 for (i = 0; i < 8; i++) {
90 sprintf(p, "D%d", i);
91 cpu_dregs[i] = tcg_global_mem_new(cpu_env,
92 offsetof(CPUM68KState, dregs[i]), p);
93 p += 3;
94 sprintf(p, "A%d", i);
95 cpu_aregs[i] = tcg_global_mem_new(cpu_env,
96 offsetof(CPUM68KState, aregs[i]), p);
97 p += 3;
99 for (i = 0; i < 4; i++) {
100 sprintf(p, "ACC%d", i);
101 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
102 offsetof(CPUM68KState, macc[i]), p);
103 p += 5;
106 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
107 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
110 /* internal defines */
111 typedef struct DisasContext {
112 CPUM68KState *env;
113 target_ulong insn_pc; /* Start of the current instruction. */
114 target_ulong pc;
115 int is_jmp;
116 CCOp cc_op; /* Current CC operation */
117 int cc_op_synced;
118 struct TranslationBlock *tb;
119 int singlestep_enabled;
120 TCGv_i64 mactmp;
121 int done_mac;
122 int writeback_mask;
123 TCGv writeback[8];
124 } DisasContext;
126 static TCGv get_areg(DisasContext *s, unsigned regno)
128 if (s->writeback_mask & (1 << regno)) {
129 return s->writeback[regno];
130 } else {
131 return cpu_aregs[regno];
135 static void delay_set_areg(DisasContext *s, unsigned regno,
136 TCGv val, bool give_temp)
138 if (s->writeback_mask & (1 << regno)) {
139 if (give_temp) {
140 tcg_temp_free(s->writeback[regno]);
141 s->writeback[regno] = val;
142 } else {
143 tcg_gen_mov_i32(s->writeback[regno], val);
145 } else {
146 s->writeback_mask |= 1 << regno;
147 if (give_temp) {
148 s->writeback[regno] = val;
149 } else {
150 TCGv tmp = tcg_temp_new();
151 s->writeback[regno] = tmp;
152 tcg_gen_mov_i32(tmp, val);
157 static void do_writebacks(DisasContext *s)
159 unsigned mask = s->writeback_mask;
160 if (mask) {
161 s->writeback_mask = 0;
162 do {
163 unsigned regno = ctz32(mask);
164 tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
165 tcg_temp_free(s->writeback[regno]);
166 mask &= mask - 1;
167 } while (mask);
171 /* is_jmp field values */
172 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
173 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
174 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
175 #define DISAS_JUMP_NEXT DISAS_TARGET_3
177 #if defined(CONFIG_USER_ONLY)
178 #define IS_USER(s) 1
179 #else
180 #define IS_USER(s) (!(s->tb->flags & TB_FLAGS_MSR_S))
181 #define SFC_INDEX(s) ((s->tb->flags & TB_FLAGS_SFC_S) ? \
182 MMU_KERNEL_IDX : MMU_USER_IDX)
183 #define DFC_INDEX(s) ((s->tb->flags & TB_FLAGS_DFC_S) ? \
184 MMU_KERNEL_IDX : MMU_USER_IDX)
185 #endif
187 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
189 #ifdef DEBUG_DISPATCH
190 #define DISAS_INSN(name) \
191 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
192 uint16_t insn); \
193 static void disas_##name(CPUM68KState *env, DisasContext *s, \
194 uint16_t insn) \
196 qemu_log("Dispatch " #name "\n"); \
197 real_disas_##name(env, s, insn); \
199 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
200 uint16_t insn)
201 #else
202 #define DISAS_INSN(name) \
203 static void disas_##name(CPUM68KState *env, DisasContext *s, \
204 uint16_t insn)
205 #endif
207 static const uint8_t cc_op_live[CC_OP_NB] = {
208 [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
209 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
210 [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
211 [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
212 [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
213 [CC_OP_LOGIC] = CCF_X | CCF_N
216 static void set_cc_op(DisasContext *s, CCOp op)
218 CCOp old_op = s->cc_op;
219 int dead;
221 if (old_op == op) {
222 return;
224 s->cc_op = op;
225 s->cc_op_synced = 0;
227 /* Discard CC computation that will no longer be used.
228 Note that X and N are never dead. */
229 dead = cc_op_live[old_op] & ~cc_op_live[op];
230 if (dead & CCF_C) {
231 tcg_gen_discard_i32(QREG_CC_C);
233 if (dead & CCF_Z) {
234 tcg_gen_discard_i32(QREG_CC_Z);
236 if (dead & CCF_V) {
237 tcg_gen_discard_i32(QREG_CC_V);
241 /* Update the CPU env CC_OP state. */
242 static void update_cc_op(DisasContext *s)
244 if (!s->cc_op_synced) {
245 s->cc_op_synced = 1;
246 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
250 /* Generate a jump to an immediate address. */
251 static void gen_jmp_im(DisasContext *s, uint32_t dest)
253 update_cc_op(s);
254 tcg_gen_movi_i32(QREG_PC, dest);
255 s->is_jmp = DISAS_JUMP;
258 /* Generate a jump to the address in qreg DEST. */
259 static void gen_jmp(DisasContext *s, TCGv dest)
261 update_cc_op(s);
262 tcg_gen_mov_i32(QREG_PC, dest);
263 s->is_jmp = DISAS_JUMP;
266 static void gen_raise_exception(int nr)
268 TCGv_i32 tmp = tcg_const_i32(nr);
270 gen_helper_raise_exception(cpu_env, tmp);
271 tcg_temp_free_i32(tmp);
274 static void gen_exception(DisasContext *s, uint32_t where, int nr)
276 gen_jmp_im(s, where);
277 gen_raise_exception(nr);
280 static inline void gen_addr_fault(DisasContext *s)
282 gen_exception(s, s->insn_pc, EXCP_ADDRESS);
285 /* Generate a load from the specified address. Narrow values are
286 sign extended to full register width. */
287 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
288 int sign, int index)
290 TCGv tmp;
291 tmp = tcg_temp_new_i32();
292 switch(opsize) {
293 case OS_BYTE:
294 if (sign)
295 tcg_gen_qemu_ld8s(tmp, addr, index);
296 else
297 tcg_gen_qemu_ld8u(tmp, addr, index);
298 break;
299 case OS_WORD:
300 if (sign)
301 tcg_gen_qemu_ld16s(tmp, addr, index);
302 else
303 tcg_gen_qemu_ld16u(tmp, addr, index);
304 break;
305 case OS_LONG:
306 tcg_gen_qemu_ld32u(tmp, addr, index);
307 break;
308 default:
309 g_assert_not_reached();
311 return tmp;
314 /* Generate a store. */
315 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
316 int index)
318 switch(opsize) {
319 case OS_BYTE:
320 tcg_gen_qemu_st8(val, addr, index);
321 break;
322 case OS_WORD:
323 tcg_gen_qemu_st16(val, addr, index);
324 break;
325 case OS_LONG:
326 tcg_gen_qemu_st32(val, addr, index);
327 break;
328 default:
329 g_assert_not_reached();
333 typedef enum {
334 EA_STORE,
335 EA_LOADU,
336 EA_LOADS
337 } ea_what;
339 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
340 otherwise generate a store. */
341 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
342 ea_what what, int index)
344 if (what == EA_STORE) {
345 gen_store(s, opsize, addr, val, index);
346 return store_dummy;
347 } else {
348 return gen_load(s, opsize, addr, what == EA_LOADS, index);
352 /* Read a 16-bit immediate constant */
353 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
355 uint16_t im;
356 im = cpu_lduw_code(env, s->pc);
357 s->pc += 2;
358 return im;
361 /* Read an 8-bit immediate constant */
362 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
364 return read_im16(env, s);
367 /* Read a 32-bit immediate constant. */
368 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
370 uint32_t im;
371 im = read_im16(env, s) << 16;
372 im |= 0xffff & read_im16(env, s);
373 return im;
376 /* Read a 64-bit immediate constant. */
377 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
379 uint64_t im;
380 im = (uint64_t)read_im32(env, s) << 32;
381 im |= (uint64_t)read_im32(env, s);
382 return im;
385 /* Calculate and address index. */
386 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
388 TCGv add;
389 int scale;
391 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
392 if ((ext & 0x800) == 0) {
393 tcg_gen_ext16s_i32(tmp, add);
394 add = tmp;
396 scale = (ext >> 9) & 3;
397 if (scale != 0) {
398 tcg_gen_shli_i32(tmp, add, scale);
399 add = tmp;
401 return add;
404 /* Handle a base + index + displacement effective addresss.
405 A NULL_QREG base means pc-relative. */
406 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
408 uint32_t offset;
409 uint16_t ext;
410 TCGv add;
411 TCGv tmp;
412 uint32_t bd, od;
414 offset = s->pc;
415 ext = read_im16(env, s);
417 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
418 return NULL_QREG;
420 if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
421 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
422 ext &= ~(3 << 9);
425 if (ext & 0x100) {
426 /* full extension word format */
427 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
428 return NULL_QREG;
430 if ((ext & 0x30) > 0x10) {
431 /* base displacement */
432 if ((ext & 0x30) == 0x20) {
433 bd = (int16_t)read_im16(env, s);
434 } else {
435 bd = read_im32(env, s);
437 } else {
438 bd = 0;
440 tmp = tcg_temp_new();
441 if ((ext & 0x44) == 0) {
442 /* pre-index */
443 add = gen_addr_index(s, ext, tmp);
444 } else {
445 add = NULL_QREG;
447 if ((ext & 0x80) == 0) {
448 /* base not suppressed */
449 if (IS_NULL_QREG(base)) {
450 base = tcg_const_i32(offset + bd);
451 bd = 0;
453 if (!IS_NULL_QREG(add)) {
454 tcg_gen_add_i32(tmp, add, base);
455 add = tmp;
456 } else {
457 add = base;
460 if (!IS_NULL_QREG(add)) {
461 if (bd != 0) {
462 tcg_gen_addi_i32(tmp, add, bd);
463 add = tmp;
465 } else {
466 add = tcg_const_i32(bd);
468 if ((ext & 3) != 0) {
469 /* memory indirect */
470 base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
471 if ((ext & 0x44) == 4) {
472 add = gen_addr_index(s, ext, tmp);
473 tcg_gen_add_i32(tmp, add, base);
474 add = tmp;
475 } else {
476 add = base;
478 if ((ext & 3) > 1) {
479 /* outer displacement */
480 if ((ext & 3) == 2) {
481 od = (int16_t)read_im16(env, s);
482 } else {
483 od = read_im32(env, s);
485 } else {
486 od = 0;
488 if (od != 0) {
489 tcg_gen_addi_i32(tmp, add, od);
490 add = tmp;
493 } else {
494 /* brief extension word format */
495 tmp = tcg_temp_new();
496 add = gen_addr_index(s, ext, tmp);
497 if (!IS_NULL_QREG(base)) {
498 tcg_gen_add_i32(tmp, add, base);
499 if ((int8_t)ext)
500 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
501 } else {
502 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
504 add = tmp;
506 return add;
509 /* Sign or zero extend a value. */
511 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
513 switch (opsize) {
514 case OS_BYTE:
515 if (sign) {
516 tcg_gen_ext8s_i32(res, val);
517 } else {
518 tcg_gen_ext8u_i32(res, val);
520 break;
521 case OS_WORD:
522 if (sign) {
523 tcg_gen_ext16s_i32(res, val);
524 } else {
525 tcg_gen_ext16u_i32(res, val);
527 break;
528 case OS_LONG:
529 tcg_gen_mov_i32(res, val);
530 break;
531 default:
532 g_assert_not_reached();
536 /* Evaluate all the CC flags. */
538 static void gen_flush_flags(DisasContext *s)
540 TCGv t0, t1;
542 switch (s->cc_op) {
543 case CC_OP_FLAGS:
544 return;
546 case CC_OP_ADDB:
547 case CC_OP_ADDW:
548 case CC_OP_ADDL:
549 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
550 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
551 /* Compute signed overflow for addition. */
552 t0 = tcg_temp_new();
553 t1 = tcg_temp_new();
554 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
555 gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
556 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
557 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
558 tcg_temp_free(t0);
559 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
560 tcg_temp_free(t1);
561 break;
563 case CC_OP_SUBB:
564 case CC_OP_SUBW:
565 case CC_OP_SUBL:
566 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
567 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
568 /* Compute signed overflow for subtraction. */
569 t0 = tcg_temp_new();
570 t1 = tcg_temp_new();
571 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
572 gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
573 tcg_gen_xor_i32(t1, QREG_CC_N, t0);
574 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
575 tcg_temp_free(t0);
576 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
577 tcg_temp_free(t1);
578 break;
580 case CC_OP_CMPB:
581 case CC_OP_CMPW:
582 case CC_OP_CMPL:
583 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
584 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
585 gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
586 /* Compute signed overflow for subtraction. */
587 t0 = tcg_temp_new();
588 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
589 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
590 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
591 tcg_temp_free(t0);
592 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
593 break;
595 case CC_OP_LOGIC:
596 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
597 tcg_gen_movi_i32(QREG_CC_C, 0);
598 tcg_gen_movi_i32(QREG_CC_V, 0);
599 break;
601 case CC_OP_DYNAMIC:
602 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
603 s->cc_op_synced = 1;
604 break;
606 default:
607 t0 = tcg_const_i32(s->cc_op);
608 gen_helper_flush_flags(cpu_env, t0);
609 tcg_temp_free(t0);
610 s->cc_op_synced = 1;
611 break;
614 /* Note that flush_flags also assigned to env->cc_op. */
615 s->cc_op = CC_OP_FLAGS;
618 static inline TCGv gen_extend(TCGv val, int opsize, int sign)
620 TCGv tmp;
622 if (opsize == OS_LONG) {
623 tmp = val;
624 } else {
625 tmp = tcg_temp_new();
626 gen_ext(tmp, val, opsize, sign);
629 return tmp;
632 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
634 gen_ext(QREG_CC_N, val, opsize, 1);
635 set_cc_op(s, CC_OP_LOGIC);
638 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
640 tcg_gen_mov_i32(QREG_CC_N, dest);
641 tcg_gen_mov_i32(QREG_CC_V, src);
642 set_cc_op(s, CC_OP_CMPB + opsize);
645 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
647 gen_ext(QREG_CC_N, dest, opsize, 1);
648 tcg_gen_mov_i32(QREG_CC_V, src);
651 static inline int opsize_bytes(int opsize)
653 switch (opsize) {
654 case OS_BYTE: return 1;
655 case OS_WORD: return 2;
656 case OS_LONG: return 4;
657 case OS_SINGLE: return 4;
658 case OS_DOUBLE: return 8;
659 case OS_EXTENDED: return 12;
660 case OS_PACKED: return 12;
661 default:
662 g_assert_not_reached();
666 static inline int insn_opsize(int insn)
668 switch ((insn >> 6) & 3) {
669 case 0: return OS_BYTE;
670 case 1: return OS_WORD;
671 case 2: return OS_LONG;
672 default:
673 g_assert_not_reached();
677 static inline int ext_opsize(int ext, int pos)
679 switch ((ext >> pos) & 7) {
680 case 0: return OS_LONG;
681 case 1: return OS_SINGLE;
682 case 2: return OS_EXTENDED;
683 case 3: return OS_PACKED;
684 case 4: return OS_WORD;
685 case 5: return OS_DOUBLE;
686 case 6: return OS_BYTE;
687 default:
688 g_assert_not_reached();
692 /* Assign value to a register. If the width is less than the register width
693 only the low part of the register is set. */
694 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
696 TCGv tmp;
697 switch (opsize) {
698 case OS_BYTE:
699 tcg_gen_andi_i32(reg, reg, 0xffffff00);
700 tmp = tcg_temp_new();
701 tcg_gen_ext8u_i32(tmp, val);
702 tcg_gen_or_i32(reg, reg, tmp);
703 tcg_temp_free(tmp);
704 break;
705 case OS_WORD:
706 tcg_gen_andi_i32(reg, reg, 0xffff0000);
707 tmp = tcg_temp_new();
708 tcg_gen_ext16u_i32(tmp, val);
709 tcg_gen_or_i32(reg, reg, tmp);
710 tcg_temp_free(tmp);
711 break;
712 case OS_LONG:
713 case OS_SINGLE:
714 tcg_gen_mov_i32(reg, val);
715 break;
716 default:
717 g_assert_not_reached();
721 /* Generate code for an "effective address". Does not adjust the base
722 register for autoincrement addressing modes. */
723 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
724 int mode, int reg0, int opsize)
726 TCGv reg;
727 TCGv tmp;
728 uint16_t ext;
729 uint32_t offset;
731 switch (mode) {
732 case 0: /* Data register direct. */
733 case 1: /* Address register direct. */
734 return NULL_QREG;
735 case 3: /* Indirect postincrement. */
736 if (opsize == OS_UNSIZED) {
737 return NULL_QREG;
739 /* fallthru */
740 case 2: /* Indirect register */
741 return get_areg(s, reg0);
742 case 4: /* Indirect predecrememnt. */
743 if (opsize == OS_UNSIZED) {
744 return NULL_QREG;
746 reg = get_areg(s, reg0);
747 tmp = tcg_temp_new();
748 if (reg0 == 7 && opsize == OS_BYTE &&
749 m68k_feature(s->env, M68K_FEATURE_M68000)) {
750 tcg_gen_subi_i32(tmp, reg, 2);
751 } else {
752 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
754 return tmp;
755 case 5: /* Indirect displacement. */
756 reg = get_areg(s, reg0);
757 tmp = tcg_temp_new();
758 ext = read_im16(env, s);
759 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
760 return tmp;
761 case 6: /* Indirect index + displacement. */
762 reg = get_areg(s, reg0);
763 return gen_lea_indexed(env, s, reg);
764 case 7: /* Other */
765 switch (reg0) {
766 case 0: /* Absolute short. */
767 offset = (int16_t)read_im16(env, s);
768 return tcg_const_i32(offset);
769 case 1: /* Absolute long. */
770 offset = read_im32(env, s);
771 return tcg_const_i32(offset);
772 case 2: /* pc displacement */
773 offset = s->pc;
774 offset += (int16_t)read_im16(env, s);
775 return tcg_const_i32(offset);
776 case 3: /* pc index+displacement. */
777 return gen_lea_indexed(env, s, NULL_QREG);
778 case 4: /* Immediate. */
779 default:
780 return NULL_QREG;
783 /* Should never happen. */
784 return NULL_QREG;
787 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
788 int opsize)
790 int mode = extract32(insn, 3, 3);
791 int reg0 = REG(insn, 0);
792 return gen_lea_mode(env, s, mode, reg0, opsize);
795 /* Generate code to load/store a value from/into an EA. If WHAT > 0 this is
796 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
797 ADDRP is non-null for readwrite operands. */
798 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
799 int opsize, TCGv val, TCGv *addrp, ea_what what,
800 int index)
802 TCGv reg, tmp, result;
803 int32_t offset;
805 switch (mode) {
806 case 0: /* Data register direct. */
807 reg = cpu_dregs[reg0];
808 if (what == EA_STORE) {
809 gen_partset_reg(opsize, reg, val);
810 return store_dummy;
811 } else {
812 return gen_extend(reg, opsize, what == EA_LOADS);
814 case 1: /* Address register direct. */
815 reg = get_areg(s, reg0);
816 if (what == EA_STORE) {
817 tcg_gen_mov_i32(reg, val);
818 return store_dummy;
819 } else {
820 return gen_extend(reg, opsize, what == EA_LOADS);
822 case 2: /* Indirect register */
823 reg = get_areg(s, reg0);
824 return gen_ldst(s, opsize, reg, val, what, index);
825 case 3: /* Indirect postincrement. */
826 reg = get_areg(s, reg0);
827 result = gen_ldst(s, opsize, reg, val, what, index);
828 if (what == EA_STORE || !addrp) {
829 TCGv tmp = tcg_temp_new();
830 if (reg0 == 7 && opsize == OS_BYTE &&
831 m68k_feature(s->env, M68K_FEATURE_M68000)) {
832 tcg_gen_addi_i32(tmp, reg, 2);
833 } else {
834 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
836 delay_set_areg(s, reg0, tmp, true);
838 return result;
839 case 4: /* Indirect predecrememnt. */
840 if (addrp && what == EA_STORE) {
841 tmp = *addrp;
842 } else {
843 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
844 if (IS_NULL_QREG(tmp)) {
845 return tmp;
847 if (addrp) {
848 *addrp = tmp;
851 result = gen_ldst(s, opsize, tmp, val, what, index);
852 if (what == EA_STORE || !addrp) {
853 delay_set_areg(s, reg0, tmp, false);
855 return result;
856 case 5: /* Indirect displacement. */
857 case 6: /* Indirect index + displacement. */
858 do_indirect:
859 if (addrp && what == EA_STORE) {
860 tmp = *addrp;
861 } else {
862 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
863 if (IS_NULL_QREG(tmp)) {
864 return tmp;
866 if (addrp) {
867 *addrp = tmp;
870 return gen_ldst(s, opsize, tmp, val, what, index);
871 case 7: /* Other */
872 switch (reg0) {
873 case 0: /* Absolute short. */
874 case 1: /* Absolute long. */
875 case 2: /* pc displacement */
876 case 3: /* pc index+displacement. */
877 goto do_indirect;
878 case 4: /* Immediate. */
879 /* Sign extend values for consistency. */
880 switch (opsize) {
881 case OS_BYTE:
882 if (what == EA_LOADS) {
883 offset = (int8_t)read_im8(env, s);
884 } else {
885 offset = read_im8(env, s);
887 break;
888 case OS_WORD:
889 if (what == EA_LOADS) {
890 offset = (int16_t)read_im16(env, s);
891 } else {
892 offset = read_im16(env, s);
894 break;
895 case OS_LONG:
896 offset = read_im32(env, s);
897 break;
898 default:
899 g_assert_not_reached();
901 return tcg_const_i32(offset);
902 default:
903 return NULL_QREG;
906 /* Should never happen. */
907 return NULL_QREG;
910 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
911 int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
913 int mode = extract32(insn, 3, 3);
914 int reg0 = REG(insn, 0);
915 return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
918 static TCGv_ptr gen_fp_ptr(int freg)
920 TCGv_ptr fp = tcg_temp_new_ptr();
921 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
922 return fp;
925 static TCGv_ptr gen_fp_result_ptr(void)
927 TCGv_ptr fp = tcg_temp_new_ptr();
928 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
929 return fp;
932 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
934 TCGv t32;
935 TCGv_i64 t64;
937 t32 = tcg_temp_new();
938 tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
939 tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
940 tcg_temp_free(t32);
942 t64 = tcg_temp_new_i64();
943 tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
944 tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
945 tcg_temp_free_i64(t64);
948 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
949 int index)
951 TCGv tmp;
952 TCGv_i64 t64;
954 t64 = tcg_temp_new_i64();
955 tmp = tcg_temp_new();
956 switch (opsize) {
957 case OS_BYTE:
958 tcg_gen_qemu_ld8s(tmp, addr, index);
959 gen_helper_exts32(cpu_env, fp, tmp);
960 break;
961 case OS_WORD:
962 tcg_gen_qemu_ld16s(tmp, addr, index);
963 gen_helper_exts32(cpu_env, fp, tmp);
964 break;
965 case OS_LONG:
966 tcg_gen_qemu_ld32u(tmp, addr, index);
967 gen_helper_exts32(cpu_env, fp, tmp);
968 break;
969 case OS_SINGLE:
970 tcg_gen_qemu_ld32u(tmp, addr, index);
971 gen_helper_extf32(cpu_env, fp, tmp);
972 break;
973 case OS_DOUBLE:
974 tcg_gen_qemu_ld64(t64, addr, index);
975 gen_helper_extf64(cpu_env, fp, t64);
976 break;
977 case OS_EXTENDED:
978 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
979 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
980 break;
982 tcg_gen_qemu_ld32u(tmp, addr, index);
983 tcg_gen_shri_i32(tmp, tmp, 16);
984 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
985 tcg_gen_addi_i32(tmp, addr, 4);
986 tcg_gen_qemu_ld64(t64, tmp, index);
987 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
988 break;
989 case OS_PACKED:
990 /* unimplemented data type on 68040/ColdFire
991 * FIXME if needed for another FPU
993 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
994 break;
995 default:
996 g_assert_not_reached();
998 tcg_temp_free(tmp);
999 tcg_temp_free_i64(t64);
1002 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1003 int index)
1005 TCGv tmp;
1006 TCGv_i64 t64;
1008 t64 = tcg_temp_new_i64();
1009 tmp = tcg_temp_new();
1010 switch (opsize) {
1011 case OS_BYTE:
1012 gen_helper_reds32(tmp, cpu_env, fp);
1013 tcg_gen_qemu_st8(tmp, addr, index);
1014 break;
1015 case OS_WORD:
1016 gen_helper_reds32(tmp, cpu_env, fp);
1017 tcg_gen_qemu_st16(tmp, addr, index);
1018 break;
1019 case OS_LONG:
1020 gen_helper_reds32(tmp, cpu_env, fp);
1021 tcg_gen_qemu_st32(tmp, addr, index);
1022 break;
1023 case OS_SINGLE:
1024 gen_helper_redf32(tmp, cpu_env, fp);
1025 tcg_gen_qemu_st32(tmp, addr, index);
1026 break;
1027 case OS_DOUBLE:
1028 gen_helper_redf64(t64, cpu_env, fp);
1029 tcg_gen_qemu_st64(t64, addr, index);
1030 break;
1031 case OS_EXTENDED:
1032 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1033 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1034 break;
1036 tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1037 tcg_gen_shli_i32(tmp, tmp, 16);
1038 tcg_gen_qemu_st32(tmp, addr, index);
1039 tcg_gen_addi_i32(tmp, addr, 4);
1040 tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1041 tcg_gen_qemu_st64(t64, tmp, index);
1042 break;
1043 case OS_PACKED:
1044 /* unimplemented data type on 68040/ColdFire
1045 * FIXME if needed for another FPU
1047 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1048 break;
1049 default:
1050 g_assert_not_reached();
1052 tcg_temp_free(tmp);
1053 tcg_temp_free_i64(t64);
1056 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1057 TCGv_ptr fp, ea_what what, int index)
1059 if (what == EA_STORE) {
1060 gen_store_fp(s, opsize, addr, fp, index);
1061 } else {
1062 gen_load_fp(s, opsize, addr, fp, index);
1066 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1067 int reg0, int opsize, TCGv_ptr fp, ea_what what,
1068 int index)
1070 TCGv reg, addr, tmp;
1071 TCGv_i64 t64;
1073 switch (mode) {
1074 case 0: /* Data register direct. */
1075 reg = cpu_dregs[reg0];
1076 if (what == EA_STORE) {
1077 switch (opsize) {
1078 case OS_BYTE:
1079 case OS_WORD:
1080 case OS_LONG:
1081 gen_helper_reds32(reg, cpu_env, fp);
1082 break;
1083 case OS_SINGLE:
1084 gen_helper_redf32(reg, cpu_env, fp);
1085 break;
1086 default:
1087 g_assert_not_reached();
1089 } else {
1090 tmp = tcg_temp_new();
1091 switch (opsize) {
1092 case OS_BYTE:
1093 tcg_gen_ext8s_i32(tmp, reg);
1094 gen_helper_exts32(cpu_env, fp, tmp);
1095 break;
1096 case OS_WORD:
1097 tcg_gen_ext16s_i32(tmp, reg);
1098 gen_helper_exts32(cpu_env, fp, tmp);
1099 break;
1100 case OS_LONG:
1101 gen_helper_exts32(cpu_env, fp, reg);
1102 break;
1103 case OS_SINGLE:
1104 gen_helper_extf32(cpu_env, fp, reg);
1105 break;
1106 default:
1107 g_assert_not_reached();
1109 tcg_temp_free(tmp);
1111 return 0;
1112 case 1: /* Address register direct. */
1113 return -1;
1114 case 2: /* Indirect register */
1115 addr = get_areg(s, reg0);
1116 gen_ldst_fp(s, opsize, addr, fp, what, index);
1117 return 0;
1118 case 3: /* Indirect postincrement. */
1119 addr = cpu_aregs[reg0];
1120 gen_ldst_fp(s, opsize, addr, fp, what, index);
1121 tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1122 return 0;
1123 case 4: /* Indirect predecrememnt. */
1124 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1125 if (IS_NULL_QREG(addr)) {
1126 return -1;
1128 gen_ldst_fp(s, opsize, addr, fp, what, index);
1129 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1130 return 0;
1131 case 5: /* Indirect displacement. */
1132 case 6: /* Indirect index + displacement. */
1133 do_indirect:
1134 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1135 if (IS_NULL_QREG(addr)) {
1136 return -1;
1138 gen_ldst_fp(s, opsize, addr, fp, what, index);
1139 return 0;
1140 case 7: /* Other */
1141 switch (reg0) {
1142 case 0: /* Absolute short. */
1143 case 1: /* Absolute long. */
1144 case 2: /* pc displacement */
1145 case 3: /* pc index+displacement. */
1146 goto do_indirect;
1147 case 4: /* Immediate. */
1148 if (what == EA_STORE) {
1149 return -1;
1151 switch (opsize) {
1152 case OS_BYTE:
1153 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1154 gen_helper_exts32(cpu_env, fp, tmp);
1155 tcg_temp_free(tmp);
1156 break;
1157 case OS_WORD:
1158 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1159 gen_helper_exts32(cpu_env, fp, tmp);
1160 tcg_temp_free(tmp);
1161 break;
1162 case OS_LONG:
1163 tmp = tcg_const_i32(read_im32(env, s));
1164 gen_helper_exts32(cpu_env, fp, tmp);
1165 tcg_temp_free(tmp);
1166 break;
1167 case OS_SINGLE:
1168 tmp = tcg_const_i32(read_im32(env, s));
1169 gen_helper_extf32(cpu_env, fp, tmp);
1170 tcg_temp_free(tmp);
1171 break;
1172 case OS_DOUBLE:
1173 t64 = tcg_const_i64(read_im64(env, s));
1174 gen_helper_extf64(cpu_env, fp, t64);
1175 tcg_temp_free_i64(t64);
1176 break;
1177 case OS_EXTENDED:
1178 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1179 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1180 break;
1182 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1183 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1184 tcg_temp_free(tmp);
1185 t64 = tcg_const_i64(read_im64(env, s));
1186 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1187 tcg_temp_free_i64(t64);
1188 break;
1189 case OS_PACKED:
1190 /* unimplemented data type on 68040/ColdFire
1191 * FIXME if needed for another FPU
1193 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1194 break;
1195 default:
1196 g_assert_not_reached();
1198 return 0;
1199 default:
1200 return -1;
1203 return -1;
1206 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1207 int opsize, TCGv_ptr fp, ea_what what, int index)
1209 int mode = extract32(insn, 3, 3);
1210 int reg0 = REG(insn, 0);
1211 return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1214 typedef struct {
1215 TCGCond tcond;
1216 bool g1;
1217 bool g2;
1218 TCGv v1;
1219 TCGv v2;
1220 } DisasCompare;
1222 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1224 TCGv tmp, tmp2;
1225 TCGCond tcond;
1226 CCOp op = s->cc_op;
1228 /* The CC_OP_CMP form can handle most normal comparisons directly. */
1229 if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1230 c->g1 = c->g2 = 1;
1231 c->v1 = QREG_CC_N;
1232 c->v2 = QREG_CC_V;
1233 switch (cond) {
1234 case 2: /* HI */
1235 case 3: /* LS */
1236 tcond = TCG_COND_LEU;
1237 goto done;
1238 case 4: /* CC */
1239 case 5: /* CS */
1240 tcond = TCG_COND_LTU;
1241 goto done;
1242 case 6: /* NE */
1243 case 7: /* EQ */
1244 tcond = TCG_COND_EQ;
1245 goto done;
1246 case 10: /* PL */
1247 case 11: /* MI */
1248 c->g1 = c->g2 = 0;
1249 c->v2 = tcg_const_i32(0);
1250 c->v1 = tmp = tcg_temp_new();
1251 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1252 gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1253 /* fallthru */
1254 case 12: /* GE */
1255 case 13: /* LT */
1256 tcond = TCG_COND_LT;
1257 goto done;
1258 case 14: /* GT */
1259 case 15: /* LE */
1260 tcond = TCG_COND_LE;
1261 goto done;
1265 c->g1 = 1;
1266 c->g2 = 0;
1267 c->v2 = tcg_const_i32(0);
1269 switch (cond) {
1270 case 0: /* T */
1271 case 1: /* F */
1272 c->v1 = c->v2;
1273 tcond = TCG_COND_NEVER;
1274 goto done;
1275 case 14: /* GT (!(Z || (N ^ V))) */
1276 case 15: /* LE (Z || (N ^ V)) */
1277 /* Logic operations clear V, which simplifies LE to (Z || N),
1278 and since Z and N are co-located, this becomes a normal
1279 comparison vs N. */
1280 if (op == CC_OP_LOGIC) {
1281 c->v1 = QREG_CC_N;
1282 tcond = TCG_COND_LE;
1283 goto done;
1285 break;
1286 case 12: /* GE (!(N ^ V)) */
1287 case 13: /* LT (N ^ V) */
1288 /* Logic operations clear V, which simplifies this to N. */
1289 if (op != CC_OP_LOGIC) {
1290 break;
1292 /* fallthru */
1293 case 10: /* PL (!N) */
1294 case 11: /* MI (N) */
1295 /* Several cases represent N normally. */
1296 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1297 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1298 op == CC_OP_LOGIC) {
1299 c->v1 = QREG_CC_N;
1300 tcond = TCG_COND_LT;
1301 goto done;
1303 break;
1304 case 6: /* NE (!Z) */
1305 case 7: /* EQ (Z) */
1306 /* Some cases fold Z into N. */
1307 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1308 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1309 op == CC_OP_LOGIC) {
1310 tcond = TCG_COND_EQ;
1311 c->v1 = QREG_CC_N;
1312 goto done;
1314 break;
1315 case 4: /* CC (!C) */
1316 case 5: /* CS (C) */
1317 /* Some cases fold C into X. */
1318 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1319 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1320 tcond = TCG_COND_NE;
1321 c->v1 = QREG_CC_X;
1322 goto done;
1324 /* fallthru */
1325 case 8: /* VC (!V) */
1326 case 9: /* VS (V) */
1327 /* Logic operations clear V and C. */
1328 if (op == CC_OP_LOGIC) {
1329 tcond = TCG_COND_NEVER;
1330 c->v1 = c->v2;
1331 goto done;
1333 break;
1336 /* Otherwise, flush flag state to CC_OP_FLAGS. */
1337 gen_flush_flags(s);
1339 switch (cond) {
1340 case 0: /* T */
1341 case 1: /* F */
1342 default:
1343 /* Invalid, or handled above. */
1344 abort();
1345 case 2: /* HI (!C && !Z) -> !(C || Z)*/
1346 case 3: /* LS (C || Z) */
1347 c->v1 = tmp = tcg_temp_new();
1348 c->g1 = 0;
1349 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1350 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1351 tcond = TCG_COND_NE;
1352 break;
1353 case 4: /* CC (!C) */
1354 case 5: /* CS (C) */
1355 c->v1 = QREG_CC_C;
1356 tcond = TCG_COND_NE;
1357 break;
1358 case 6: /* NE (!Z) */
1359 case 7: /* EQ (Z) */
1360 c->v1 = QREG_CC_Z;
1361 tcond = TCG_COND_EQ;
1362 break;
1363 case 8: /* VC (!V) */
1364 case 9: /* VS (V) */
1365 c->v1 = QREG_CC_V;
1366 tcond = TCG_COND_LT;
1367 break;
1368 case 10: /* PL (!N) */
1369 case 11: /* MI (N) */
1370 c->v1 = QREG_CC_N;
1371 tcond = TCG_COND_LT;
1372 break;
1373 case 12: /* GE (!(N ^ V)) */
1374 case 13: /* LT (N ^ V) */
1375 c->v1 = tmp = tcg_temp_new();
1376 c->g1 = 0;
1377 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1378 tcond = TCG_COND_LT;
1379 break;
1380 case 14: /* GT (!(Z || (N ^ V))) */
1381 case 15: /* LE (Z || (N ^ V)) */
1382 c->v1 = tmp = tcg_temp_new();
1383 c->g1 = 0;
1384 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1385 tcg_gen_neg_i32(tmp, tmp);
1386 tmp2 = tcg_temp_new();
1387 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1388 tcg_gen_or_i32(tmp, tmp, tmp2);
1389 tcg_temp_free(tmp2);
1390 tcond = TCG_COND_LT;
1391 break;
1394 done:
1395 if ((cond & 1) == 0) {
1396 tcond = tcg_invert_cond(tcond);
1398 c->tcond = tcond;
1401 static void free_cond(DisasCompare *c)
1403 if (!c->g1) {
1404 tcg_temp_free(c->v1);
1406 if (!c->g2) {
1407 tcg_temp_free(c->v2);
1411 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1413 DisasCompare c;
1415 gen_cc_cond(&c, s, cond);
1416 update_cc_op(s);
1417 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1418 free_cond(&c);
1421 /* Force a TB lookup after an instruction that changes the CPU state. */
1422 static void gen_lookup_tb(DisasContext *s)
1424 update_cc_op(s);
1425 tcg_gen_movi_i32(QREG_PC, s->pc);
1426 s->is_jmp = DISAS_UPDATE;
1429 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1430 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1431 op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \
1432 if (IS_NULL_QREG(result)) { \
1433 gen_addr_fault(s); \
1434 return; \
1436 } while (0)
1438 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1439 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \
1440 EA_STORE, IS_USER(s)); \
1441 if (IS_NULL_QREG(ea_result)) { \
1442 gen_addr_fault(s); \
1443 return; \
1445 } while (0)
1447 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1449 #ifndef CONFIG_USER_ONLY
1450 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1451 (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1452 #else
1453 return true;
1454 #endif
1457 /* Generate a jump to an immediate address. */
1458 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1460 if (unlikely(s->singlestep_enabled)) {
1461 gen_exception(s, dest, EXCP_DEBUG);
1462 } else if (use_goto_tb(s, dest)) {
1463 tcg_gen_goto_tb(n);
1464 tcg_gen_movi_i32(QREG_PC, dest);
1465 tcg_gen_exit_tb((uintptr_t)s->tb + n);
1466 } else {
1467 gen_jmp_im(s, dest);
1468 tcg_gen_exit_tb(0);
1470 s->is_jmp = DISAS_TB_JUMP;
1473 DISAS_INSN(scc)
1475 DisasCompare c;
1476 int cond;
1477 TCGv tmp;
1479 cond = (insn >> 8) & 0xf;
1480 gen_cc_cond(&c, s, cond);
1482 tmp = tcg_temp_new();
1483 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1484 free_cond(&c);
1486 tcg_gen_neg_i32(tmp, tmp);
1487 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1488 tcg_temp_free(tmp);
1491 DISAS_INSN(dbcc)
1493 TCGLabel *l1;
1494 TCGv reg;
1495 TCGv tmp;
1496 int16_t offset;
1497 uint32_t base;
1499 reg = DREG(insn, 0);
1500 base = s->pc;
1501 offset = (int16_t)read_im16(env, s);
1502 l1 = gen_new_label();
1503 gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1505 tmp = tcg_temp_new();
1506 tcg_gen_ext16s_i32(tmp, reg);
1507 tcg_gen_addi_i32(tmp, tmp, -1);
1508 gen_partset_reg(OS_WORD, reg, tmp);
1509 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1510 gen_jmp_tb(s, 1, base + offset);
1511 gen_set_label(l1);
1512 gen_jmp_tb(s, 0, s->pc);
1515 DISAS_INSN(undef_mac)
1517 gen_exception(s, s->insn_pc, EXCP_LINEA);
1520 DISAS_INSN(undef_fpu)
1522 gen_exception(s, s->insn_pc, EXCP_LINEF);
1525 DISAS_INSN(undef)
1527 /* ??? This is both instructions that are as yet unimplemented
1528 for the 680x0 series, as well as those that are implemented
1529 but actually illegal for CPU32 or pre-68020. */
1530 qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
1531 insn, s->insn_pc);
1532 gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED);
1535 DISAS_INSN(mulw)
1537 TCGv reg;
1538 TCGv tmp;
1539 TCGv src;
1540 int sign;
1542 sign = (insn & 0x100) != 0;
1543 reg = DREG(insn, 9);
1544 tmp = tcg_temp_new();
1545 if (sign)
1546 tcg_gen_ext16s_i32(tmp, reg);
1547 else
1548 tcg_gen_ext16u_i32(tmp, reg);
1549 SRC_EA(env, src, OS_WORD, sign, NULL);
1550 tcg_gen_mul_i32(tmp, tmp, src);
1551 tcg_gen_mov_i32(reg, tmp);
1552 gen_logic_cc(s, tmp, OS_LONG);
1553 tcg_temp_free(tmp);
1556 DISAS_INSN(divw)
1558 int sign;
1559 TCGv src;
1560 TCGv destr;
1562 /* divX.w <EA>,Dn 32/16 -> 16r:16q */
1564 sign = (insn & 0x100) != 0;
1566 /* dest.l / src.w */
1568 SRC_EA(env, src, OS_WORD, sign, NULL);
1569 destr = tcg_const_i32(REG(insn, 9));
1570 if (sign) {
1571 gen_helper_divsw(cpu_env, destr, src);
1572 } else {
1573 gen_helper_divuw(cpu_env, destr, src);
1575 tcg_temp_free(destr);
1577 set_cc_op(s, CC_OP_FLAGS);
1580 DISAS_INSN(divl)
1582 TCGv num, reg, den;
1583 int sign;
1584 uint16_t ext;
1586 ext = read_im16(env, s);
1588 sign = (ext & 0x0800) != 0;
1590 if (ext & 0x400) {
1591 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1592 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
1593 return;
1596 /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
1598 SRC_EA(env, den, OS_LONG, 0, NULL);
1599 num = tcg_const_i32(REG(ext, 12));
1600 reg = tcg_const_i32(REG(ext, 0));
1601 if (sign) {
1602 gen_helper_divsll(cpu_env, num, reg, den);
1603 } else {
1604 gen_helper_divull(cpu_env, num, reg, den);
1606 tcg_temp_free(reg);
1607 tcg_temp_free(num);
1608 set_cc_op(s, CC_OP_FLAGS);
1609 return;
1612 /* divX.l <EA>, Dq 32/32 -> 32q */
1613 /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
1615 SRC_EA(env, den, OS_LONG, 0, NULL);
1616 num = tcg_const_i32(REG(ext, 12));
1617 reg = tcg_const_i32(REG(ext, 0));
1618 if (sign) {
1619 gen_helper_divsl(cpu_env, num, reg, den);
1620 } else {
1621 gen_helper_divul(cpu_env, num, reg, den);
1623 tcg_temp_free(reg);
1624 tcg_temp_free(num);
1626 set_cc_op(s, CC_OP_FLAGS);
1629 static void bcd_add(TCGv dest, TCGv src)
1631 TCGv t0, t1;
1633 /* dest10 = dest10 + src10 + X
1635 * t1 = src
1636 * t2 = t1 + 0x066
1637 * t3 = t2 + dest + X
1638 * t4 = t2 ^ dest
1639 * t5 = t3 ^ t4
1640 * t6 = ~t5 & 0x110
1641 * t7 = (t6 >> 2) | (t6 >> 3)
1642 * return t3 - t7
1645 /* t1 = (src + 0x066) + dest + X
1646 * = result with some possible exceding 0x6
1649 t0 = tcg_const_i32(0x066);
1650 tcg_gen_add_i32(t0, t0, src);
1652 t1 = tcg_temp_new();
1653 tcg_gen_add_i32(t1, t0, dest);
1654 tcg_gen_add_i32(t1, t1, QREG_CC_X);
1656 /* we will remove exceding 0x6 where there is no carry */
1658 /* t0 = (src + 0x0066) ^ dest
1659 * = t1 without carries
1662 tcg_gen_xor_i32(t0, t0, dest);
1664 /* extract the carries
1665 * t0 = t0 ^ t1
1666 * = only the carries
1669 tcg_gen_xor_i32(t0, t0, t1);
1671 /* generate 0x1 where there is no carry
1672 * and for each 0x10, generate a 0x6
1675 tcg_gen_shri_i32(t0, t0, 3);
1676 tcg_gen_not_i32(t0, t0);
1677 tcg_gen_andi_i32(t0, t0, 0x22);
1678 tcg_gen_add_i32(dest, t0, t0);
1679 tcg_gen_add_i32(dest, dest, t0);
1680 tcg_temp_free(t0);
1682 /* remove the exceding 0x6
1683 * for digits that have not generated a carry
1686 tcg_gen_sub_i32(dest, t1, dest);
1687 tcg_temp_free(t1);
1690 static void bcd_sub(TCGv dest, TCGv src)
1692 TCGv t0, t1, t2;
1694 /* dest10 = dest10 - src10 - X
1695 * = bcd_add(dest + 1 - X, 0x199 - src)
1698 /* t0 = 0x066 + (0x199 - src) */
1700 t0 = tcg_temp_new();
1701 tcg_gen_subfi_i32(t0, 0x1ff, src);
1703 /* t1 = t0 + dest + 1 - X*/
1705 t1 = tcg_temp_new();
1706 tcg_gen_add_i32(t1, t0, dest);
1707 tcg_gen_addi_i32(t1, t1, 1);
1708 tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1710 /* t2 = t0 ^ dest */
1712 t2 = tcg_temp_new();
1713 tcg_gen_xor_i32(t2, t0, dest);
1715 /* t0 = t1 ^ t2 */
1717 tcg_gen_xor_i32(t0, t1, t2);
1719 /* t2 = ~t0 & 0x110
1720 * t0 = (t2 >> 2) | (t2 >> 3)
1722 * to fit on 8bit operands, changed in:
1724 * t2 = ~(t0 >> 3) & 0x22
1725 * t0 = t2 + t2
1726 * t0 = t0 + t2
1729 tcg_gen_shri_i32(t2, t0, 3);
1730 tcg_gen_not_i32(t2, t2);
1731 tcg_gen_andi_i32(t2, t2, 0x22);
1732 tcg_gen_add_i32(t0, t2, t2);
1733 tcg_gen_add_i32(t0, t0, t2);
1734 tcg_temp_free(t2);
1736 /* return t1 - t0 */
1738 tcg_gen_sub_i32(dest, t1, t0);
1739 tcg_temp_free(t0);
1740 tcg_temp_free(t1);
1743 static void bcd_flags(TCGv val)
1745 tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1746 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1748 tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1750 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1753 DISAS_INSN(abcd_reg)
1755 TCGv src;
1756 TCGv dest;
1758 gen_flush_flags(s); /* !Z is sticky */
1760 src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1761 dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1762 bcd_add(dest, src);
1763 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1765 bcd_flags(dest);
1768 DISAS_INSN(abcd_mem)
1770 TCGv src, dest, addr;
1772 gen_flush_flags(s); /* !Z is sticky */
1774 /* Indirect pre-decrement load (mode 4) */
1776 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1777 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1778 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1779 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1781 bcd_add(dest, src);
1783 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1784 EA_STORE, IS_USER(s));
1786 bcd_flags(dest);
1789 DISAS_INSN(sbcd_reg)
1791 TCGv src, dest;
1793 gen_flush_flags(s); /* !Z is sticky */
1795 src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1796 dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1798 bcd_sub(dest, src);
1800 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1802 bcd_flags(dest);
1805 DISAS_INSN(sbcd_mem)
1807 TCGv src, dest, addr;
1809 gen_flush_flags(s); /* !Z is sticky */
1811 /* Indirect pre-decrement load (mode 4) */
1813 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1814 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1815 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1816 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1818 bcd_sub(dest, src);
1820 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1821 EA_STORE, IS_USER(s));
1823 bcd_flags(dest);
1826 DISAS_INSN(nbcd)
1828 TCGv src, dest;
1829 TCGv addr;
1831 gen_flush_flags(s); /* !Z is sticky */
1833 SRC_EA(env, src, OS_BYTE, 0, &addr);
1835 dest = tcg_const_i32(0);
1836 bcd_sub(dest, src);
1838 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1840 bcd_flags(dest);
1842 tcg_temp_free(dest);
1845 DISAS_INSN(addsub)
1847 TCGv reg;
1848 TCGv dest;
1849 TCGv src;
1850 TCGv tmp;
1851 TCGv addr;
1852 int add;
1853 int opsize;
1855 add = (insn & 0x4000) != 0;
1856 opsize = insn_opsize(insn);
1857 reg = gen_extend(DREG(insn, 9), opsize, 1);
1858 dest = tcg_temp_new();
1859 if (insn & 0x100) {
1860 SRC_EA(env, tmp, opsize, 1, &addr);
1861 src = reg;
1862 } else {
1863 tmp = reg;
1864 SRC_EA(env, src, opsize, 1, NULL);
1866 if (add) {
1867 tcg_gen_add_i32(dest, tmp, src);
1868 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1869 set_cc_op(s, CC_OP_ADDB + opsize);
1870 } else {
1871 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1872 tcg_gen_sub_i32(dest, tmp, src);
1873 set_cc_op(s, CC_OP_SUBB + opsize);
1875 gen_update_cc_add(dest, src, opsize);
1876 if (insn & 0x100) {
1877 DEST_EA(env, insn, opsize, dest, &addr);
1878 } else {
1879 gen_partset_reg(opsize, DREG(insn, 9), dest);
1881 tcg_temp_free(dest);
1884 /* Reverse the order of the bits in REG. */
1885 DISAS_INSN(bitrev)
1887 TCGv reg;
1888 reg = DREG(insn, 0);
1889 gen_helper_bitrev(reg, reg);
1892 DISAS_INSN(bitop_reg)
1894 int opsize;
1895 int op;
1896 TCGv src1;
1897 TCGv src2;
1898 TCGv tmp;
1899 TCGv addr;
1900 TCGv dest;
1902 if ((insn & 0x38) != 0)
1903 opsize = OS_BYTE;
1904 else
1905 opsize = OS_LONG;
1906 op = (insn >> 6) & 3;
1907 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1909 gen_flush_flags(s);
1910 src2 = tcg_temp_new();
1911 if (opsize == OS_BYTE)
1912 tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1913 else
1914 tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1916 tmp = tcg_const_i32(1);
1917 tcg_gen_shl_i32(tmp, tmp, src2);
1918 tcg_temp_free(src2);
1920 tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1922 dest = tcg_temp_new();
1923 switch (op) {
1924 case 1: /* bchg */
1925 tcg_gen_xor_i32(dest, src1, tmp);
1926 break;
1927 case 2: /* bclr */
1928 tcg_gen_andc_i32(dest, src1, tmp);
1929 break;
1930 case 3: /* bset */
1931 tcg_gen_or_i32(dest, src1, tmp);
1932 break;
1933 default: /* btst */
1934 break;
1936 tcg_temp_free(tmp);
1937 if (op) {
1938 DEST_EA(env, insn, opsize, dest, &addr);
1940 tcg_temp_free(dest);
1943 DISAS_INSN(sats)
1945 TCGv reg;
1946 reg = DREG(insn, 0);
1947 gen_flush_flags(s);
1948 gen_helper_sats(reg, reg, QREG_CC_V);
1949 gen_logic_cc(s, reg, OS_LONG);
1952 static void gen_push(DisasContext *s, TCGv val)
1954 TCGv tmp;
1956 tmp = tcg_temp_new();
1957 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1958 gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1959 tcg_gen_mov_i32(QREG_SP, tmp);
1960 tcg_temp_free(tmp);
1963 static TCGv mreg(int reg)
1965 if (reg < 8) {
1966 /* Dx */
1967 return cpu_dregs[reg];
1969 /* Ax */
1970 return cpu_aregs[reg & 7];
1973 DISAS_INSN(movem)
1975 TCGv addr, incr, tmp, r[16];
1976 int is_load = (insn & 0x0400) != 0;
1977 int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1978 uint16_t mask = read_im16(env, s);
1979 int mode = extract32(insn, 3, 3);
1980 int reg0 = REG(insn, 0);
1981 int i;
1983 tmp = cpu_aregs[reg0];
1985 switch (mode) {
1986 case 0: /* data register direct */
1987 case 1: /* addr register direct */
1988 do_addr_fault:
1989 gen_addr_fault(s);
1990 return;
1992 case 2: /* indirect */
1993 break;
1995 case 3: /* indirect post-increment */
1996 if (!is_load) {
1997 /* post-increment is not allowed */
1998 goto do_addr_fault;
2000 break;
2002 case 4: /* indirect pre-decrement */
2003 if (is_load) {
2004 /* pre-decrement is not allowed */
2005 goto do_addr_fault;
2007 /* We want a bare copy of the address reg, without any pre-decrement
2008 adjustment, as gen_lea would provide. */
2009 break;
2011 default:
2012 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2013 if (IS_NULL_QREG(tmp)) {
2014 goto do_addr_fault;
2016 break;
2019 addr = tcg_temp_new();
2020 tcg_gen_mov_i32(addr, tmp);
2021 incr = tcg_const_i32(opsize_bytes(opsize));
2023 if (is_load) {
2024 /* memory to register */
2025 for (i = 0; i < 16; i++) {
2026 if (mask & (1 << i)) {
2027 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
2028 tcg_gen_add_i32(addr, addr, incr);
2031 for (i = 0; i < 16; i++) {
2032 if (mask & (1 << i)) {
2033 tcg_gen_mov_i32(mreg(i), r[i]);
2034 tcg_temp_free(r[i]);
2037 if (mode == 3) {
2038 /* post-increment: movem (An)+,X */
2039 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2041 } else {
2042 /* register to memory */
2043 if (mode == 4) {
2044 /* pre-decrement: movem X,-(An) */
2045 for (i = 15; i >= 0; i--) {
2046 if ((mask << i) & 0x8000) {
2047 tcg_gen_sub_i32(addr, addr, incr);
2048 if (reg0 + 8 == i &&
2049 m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2050 /* M68020+: if the addressing register is the
2051 * register moved to memory, the value written
2052 * is the initial value decremented by the size of
2053 * the operation, regardless of how many actual
2054 * stores have been performed until this point.
2055 * M68000/M68010: the value is the initial value.
2057 tmp = tcg_temp_new();
2058 tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2059 gen_store(s, opsize, addr, tmp, IS_USER(s));
2060 tcg_temp_free(tmp);
2061 } else {
2062 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2066 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2067 } else {
2068 for (i = 0; i < 16; i++) {
2069 if (mask & (1 << i)) {
2070 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2071 tcg_gen_add_i32(addr, addr, incr);
2077 tcg_temp_free(incr);
2078 tcg_temp_free(addr);
2081 DISAS_INSN(bitop_im)
2083 int opsize;
2084 int op;
2085 TCGv src1;
2086 uint32_t mask;
2087 int bitnum;
2088 TCGv tmp;
2089 TCGv addr;
2091 if ((insn & 0x38) != 0)
2092 opsize = OS_BYTE;
2093 else
2094 opsize = OS_LONG;
2095 op = (insn >> 6) & 3;
2097 bitnum = read_im16(env, s);
2098 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2099 if (bitnum & 0xfe00) {
2100 disas_undef(env, s, insn);
2101 return;
2103 } else {
2104 if (bitnum & 0xff00) {
2105 disas_undef(env, s, insn);
2106 return;
2110 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2112 gen_flush_flags(s);
2113 if (opsize == OS_BYTE)
2114 bitnum &= 7;
2115 else
2116 bitnum &= 31;
2117 mask = 1 << bitnum;
2119 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2121 if (op) {
2122 tmp = tcg_temp_new();
2123 switch (op) {
2124 case 1: /* bchg */
2125 tcg_gen_xori_i32(tmp, src1, mask);
2126 break;
2127 case 2: /* bclr */
2128 tcg_gen_andi_i32(tmp, src1, ~mask);
2129 break;
2130 case 3: /* bset */
2131 tcg_gen_ori_i32(tmp, src1, mask);
2132 break;
2133 default: /* btst */
2134 break;
2136 DEST_EA(env, insn, opsize, tmp, &addr);
2137 tcg_temp_free(tmp);
2141 static TCGv gen_get_ccr(DisasContext *s)
2143 TCGv dest;
2145 update_cc_op(s);
2146 dest = tcg_temp_new();
2147 gen_helper_get_ccr(dest, cpu_env);
2148 return dest;
2151 static TCGv gen_get_sr(DisasContext *s)
2153 TCGv ccr;
2154 TCGv sr;
2156 ccr = gen_get_ccr(s);
2157 sr = tcg_temp_new();
2158 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2159 tcg_gen_or_i32(sr, sr, ccr);
2160 return sr;
2163 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2165 if (ccr_only) {
2166 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2167 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2168 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2169 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2170 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2171 } else {
2172 TCGv sr = tcg_const_i32(val);
2173 gen_helper_set_sr(cpu_env, sr);
2174 tcg_temp_free(sr);
2176 set_cc_op(s, CC_OP_FLAGS);
2179 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2181 if (ccr_only) {
2182 gen_helper_set_ccr(cpu_env, val);
2183 } else {
2184 gen_helper_set_sr(cpu_env, val);
2186 set_cc_op(s, CC_OP_FLAGS);
2189 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2190 bool ccr_only)
2192 if ((insn & 0x3f) == 0x3c) {
2193 uint16_t val;
2194 val = read_im16(env, s);
2195 gen_set_sr_im(s, val, ccr_only);
2196 } else {
2197 TCGv src;
2198 SRC_EA(env, src, OS_WORD, 0, NULL);
2199 gen_set_sr(s, src, ccr_only);
2203 DISAS_INSN(arith_im)
2205 int op;
2206 TCGv im;
2207 TCGv src1;
2208 TCGv dest;
2209 TCGv addr;
2210 int opsize;
2211 bool with_SR = ((insn & 0x3f) == 0x3c);
2213 op = (insn >> 9) & 7;
2214 opsize = insn_opsize(insn);
2215 switch (opsize) {
2216 case OS_BYTE:
2217 im = tcg_const_i32((int8_t)read_im8(env, s));
2218 break;
2219 case OS_WORD:
2220 im = tcg_const_i32((int16_t)read_im16(env, s));
2221 break;
2222 case OS_LONG:
2223 im = tcg_const_i32(read_im32(env, s));
2224 break;
2225 default:
2226 abort();
2229 if (with_SR) {
2230 /* SR/CCR can only be used with andi/eori/ori */
2231 if (op == 2 || op == 3 || op == 6) {
2232 disas_undef(env, s, insn);
2233 return;
2235 switch (opsize) {
2236 case OS_BYTE:
2237 src1 = gen_get_ccr(s);
2238 break;
2239 case OS_WORD:
2240 if (IS_USER(s)) {
2241 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
2242 return;
2244 src1 = gen_get_sr(s);
2245 break;
2246 case OS_LONG:
2247 disas_undef(env, s, insn);
2248 return;
2250 } else {
2251 SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2253 dest = tcg_temp_new();
2254 switch (op) {
2255 case 0: /* ori */
2256 tcg_gen_or_i32(dest, src1, im);
2257 if (with_SR) {
2258 gen_set_sr(s, dest, opsize == OS_BYTE);
2259 } else {
2260 DEST_EA(env, insn, opsize, dest, &addr);
2261 gen_logic_cc(s, dest, opsize);
2263 break;
2264 case 1: /* andi */
2265 tcg_gen_and_i32(dest, src1, im);
2266 if (with_SR) {
2267 gen_set_sr(s, dest, opsize == OS_BYTE);
2268 } else {
2269 DEST_EA(env, insn, opsize, dest, &addr);
2270 gen_logic_cc(s, dest, opsize);
2272 break;
2273 case 2: /* subi */
2274 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2275 tcg_gen_sub_i32(dest, src1, im);
2276 gen_update_cc_add(dest, im, opsize);
2277 set_cc_op(s, CC_OP_SUBB + opsize);
2278 DEST_EA(env, insn, opsize, dest, &addr);
2279 break;
2280 case 3: /* addi */
2281 tcg_gen_add_i32(dest, src1, im);
2282 gen_update_cc_add(dest, im, opsize);
2283 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2284 set_cc_op(s, CC_OP_ADDB + opsize);
2285 DEST_EA(env, insn, opsize, dest, &addr);
2286 break;
2287 case 5: /* eori */
2288 tcg_gen_xor_i32(dest, src1, im);
2289 if (with_SR) {
2290 gen_set_sr(s, dest, opsize == OS_BYTE);
2291 } else {
2292 DEST_EA(env, insn, opsize, dest, &addr);
2293 gen_logic_cc(s, dest, opsize);
2295 break;
2296 case 6: /* cmpi */
2297 gen_update_cc_cmp(s, src1, im, opsize);
2298 break;
2299 default:
2300 abort();
2302 tcg_temp_free(im);
2303 tcg_temp_free(dest);
2306 DISAS_INSN(cas)
2308 int opsize;
2309 TCGv addr;
2310 uint16_t ext;
2311 TCGv load;
2312 TCGv cmp;
2313 TCGMemOp opc;
2315 switch ((insn >> 9) & 3) {
2316 case 1:
2317 opsize = OS_BYTE;
2318 opc = MO_SB;
2319 break;
2320 case 2:
2321 opsize = OS_WORD;
2322 opc = MO_TESW;
2323 break;
2324 case 3:
2325 opsize = OS_LONG;
2326 opc = MO_TESL;
2327 break;
2328 default:
2329 g_assert_not_reached();
2332 ext = read_im16(env, s);
2334 /* cas Dc,Du,<EA> */
2336 addr = gen_lea(env, s, insn, opsize);
2337 if (IS_NULL_QREG(addr)) {
2338 gen_addr_fault(s);
2339 return;
2342 cmp = gen_extend(DREG(ext, 0), opsize, 1);
2344 /* if <EA> == Dc then
2345 * <EA> = Du
2346 * Dc = <EA> (because <EA> == Dc)
2347 * else
2348 * Dc = <EA>
2351 load = tcg_temp_new();
2352 tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2353 IS_USER(s), opc);
2354 /* update flags before setting cmp to load */
2355 gen_update_cc_cmp(s, load, cmp, opsize);
2356 gen_partset_reg(opsize, DREG(ext, 0), load);
2358 tcg_temp_free(load);
2360 switch (extract32(insn, 3, 3)) {
2361 case 3: /* Indirect postincrement. */
2362 tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2363 break;
2364 case 4: /* Indirect predecrememnt. */
2365 tcg_gen_mov_i32(AREG(insn, 0), addr);
2366 break;
2370 DISAS_INSN(cas2w)
2372 uint16_t ext1, ext2;
2373 TCGv addr1, addr2;
2374 TCGv regs;
2376 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2378 ext1 = read_im16(env, s);
2380 if (ext1 & 0x8000) {
2381 /* Address Register */
2382 addr1 = AREG(ext1, 12);
2383 } else {
2384 /* Data Register */
2385 addr1 = DREG(ext1, 12);
2388 ext2 = read_im16(env, s);
2389 if (ext2 & 0x8000) {
2390 /* Address Register */
2391 addr2 = AREG(ext2, 12);
2392 } else {
2393 /* Data Register */
2394 addr2 = DREG(ext2, 12);
2397 /* if (R1) == Dc1 && (R2) == Dc2 then
2398 * (R1) = Du1
2399 * (R2) = Du2
2400 * else
2401 * Dc1 = (R1)
2402 * Dc2 = (R2)
2405 regs = tcg_const_i32(REG(ext2, 6) |
2406 (REG(ext1, 6) << 3) |
2407 (REG(ext2, 0) << 6) |
2408 (REG(ext1, 0) << 9));
2409 if (tb_cflags(s->tb) & CF_PARALLEL) {
2410 gen_helper_exit_atomic(cpu_env);
2411 } else {
2412 gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2414 tcg_temp_free(regs);
2416 /* Note that cas2w also assigned to env->cc_op. */
2417 s->cc_op = CC_OP_CMPW;
2418 s->cc_op_synced = 1;
2421 DISAS_INSN(cas2l)
2423 uint16_t ext1, ext2;
2424 TCGv addr1, addr2, regs;
2426 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2428 ext1 = read_im16(env, s);
2430 if (ext1 & 0x8000) {
2431 /* Address Register */
2432 addr1 = AREG(ext1, 12);
2433 } else {
2434 /* Data Register */
2435 addr1 = DREG(ext1, 12);
2438 ext2 = read_im16(env, s);
2439 if (ext2 & 0x8000) {
2440 /* Address Register */
2441 addr2 = AREG(ext2, 12);
2442 } else {
2443 /* Data Register */
2444 addr2 = DREG(ext2, 12);
2447 /* if (R1) == Dc1 && (R2) == Dc2 then
2448 * (R1) = Du1
2449 * (R2) = Du2
2450 * else
2451 * Dc1 = (R1)
2452 * Dc2 = (R2)
2455 regs = tcg_const_i32(REG(ext2, 6) |
2456 (REG(ext1, 6) << 3) |
2457 (REG(ext2, 0) << 6) |
2458 (REG(ext1, 0) << 9));
2459 if (tb_cflags(s->tb) & CF_PARALLEL) {
2460 gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2461 } else {
2462 gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2464 tcg_temp_free(regs);
2466 /* Note that cas2l also assigned to env->cc_op. */
2467 s->cc_op = CC_OP_CMPL;
2468 s->cc_op_synced = 1;
2471 DISAS_INSN(byterev)
2473 TCGv reg;
2475 reg = DREG(insn, 0);
2476 tcg_gen_bswap32_i32(reg, reg);
2479 DISAS_INSN(move)
2481 TCGv src;
2482 TCGv dest;
2483 int op;
2484 int opsize;
2486 switch (insn >> 12) {
2487 case 1: /* move.b */
2488 opsize = OS_BYTE;
2489 break;
2490 case 2: /* move.l */
2491 opsize = OS_LONG;
2492 break;
2493 case 3: /* move.w */
2494 opsize = OS_WORD;
2495 break;
2496 default:
2497 abort();
2499 SRC_EA(env, src, opsize, 1, NULL);
2500 op = (insn >> 6) & 7;
2501 if (op == 1) {
2502 /* movea */
2503 /* The value will already have been sign extended. */
2504 dest = AREG(insn, 9);
2505 tcg_gen_mov_i32(dest, src);
2506 } else {
2507 /* normal move */
2508 uint16_t dest_ea;
2509 dest_ea = ((insn >> 9) & 7) | (op << 3);
2510 DEST_EA(env, dest_ea, opsize, src, NULL);
2511 /* This will be correct because loads sign extend. */
2512 gen_logic_cc(s, src, opsize);
2516 DISAS_INSN(negx)
2518 TCGv z;
2519 TCGv src;
2520 TCGv addr;
2521 int opsize;
2523 opsize = insn_opsize(insn);
2524 SRC_EA(env, src, opsize, 1, &addr);
2526 gen_flush_flags(s); /* compute old Z */
2528 /* Perform substract with borrow.
2529 * (X, N) = -(src + X);
2532 z = tcg_const_i32(0);
2533 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2534 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2535 tcg_temp_free(z);
2536 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2538 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2540 /* Compute signed-overflow for negation. The normal formula for
2541 * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2542 * this simplies to res & src.
2545 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2547 /* Copy the rest of the results into place. */
2548 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2549 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2551 set_cc_op(s, CC_OP_FLAGS);
2553 /* result is in QREG_CC_N */
2555 DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2558 DISAS_INSN(lea)
2560 TCGv reg;
2561 TCGv tmp;
2563 reg = AREG(insn, 9);
2564 tmp = gen_lea(env, s, insn, OS_LONG);
2565 if (IS_NULL_QREG(tmp)) {
2566 gen_addr_fault(s);
2567 return;
2569 tcg_gen_mov_i32(reg, tmp);
2572 DISAS_INSN(clr)
2574 int opsize;
2575 TCGv zero;
2577 zero = tcg_const_i32(0);
2579 opsize = insn_opsize(insn);
2580 DEST_EA(env, insn, opsize, zero, NULL);
2581 gen_logic_cc(s, zero, opsize);
2582 tcg_temp_free(zero);
2585 DISAS_INSN(move_from_ccr)
2587 TCGv ccr;
2589 ccr = gen_get_ccr(s);
2590 DEST_EA(env, insn, OS_WORD, ccr, NULL);
2593 DISAS_INSN(neg)
2595 TCGv src1;
2596 TCGv dest;
2597 TCGv addr;
2598 int opsize;
2600 opsize = insn_opsize(insn);
2601 SRC_EA(env, src1, opsize, 1, &addr);
2602 dest = tcg_temp_new();
2603 tcg_gen_neg_i32(dest, src1);
2604 set_cc_op(s, CC_OP_SUBB + opsize);
2605 gen_update_cc_add(dest, src1, opsize);
2606 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2607 DEST_EA(env, insn, opsize, dest, &addr);
2608 tcg_temp_free(dest);
2611 DISAS_INSN(move_to_ccr)
2613 gen_move_to_sr(env, s, insn, true);
2616 DISAS_INSN(not)
2618 TCGv src1;
2619 TCGv dest;
2620 TCGv addr;
2621 int opsize;
2623 opsize = insn_opsize(insn);
2624 SRC_EA(env, src1, opsize, 1, &addr);
2625 dest = tcg_temp_new();
2626 tcg_gen_not_i32(dest, src1);
2627 DEST_EA(env, insn, opsize, dest, &addr);
2628 gen_logic_cc(s, dest, opsize);
2631 DISAS_INSN(swap)
2633 TCGv src1;
2634 TCGv src2;
2635 TCGv reg;
2637 src1 = tcg_temp_new();
2638 src2 = tcg_temp_new();
2639 reg = DREG(insn, 0);
2640 tcg_gen_shli_i32(src1, reg, 16);
2641 tcg_gen_shri_i32(src2, reg, 16);
2642 tcg_gen_or_i32(reg, src1, src2);
2643 tcg_temp_free(src2);
2644 tcg_temp_free(src1);
2645 gen_logic_cc(s, reg, OS_LONG);
2648 DISAS_INSN(bkpt)
2650 gen_exception(s, s->insn_pc, EXCP_DEBUG);
2653 DISAS_INSN(pea)
2655 TCGv tmp;
2657 tmp = gen_lea(env, s, insn, OS_LONG);
2658 if (IS_NULL_QREG(tmp)) {
2659 gen_addr_fault(s);
2660 return;
2662 gen_push(s, tmp);
2665 DISAS_INSN(ext)
2667 int op;
2668 TCGv reg;
2669 TCGv tmp;
2671 reg = DREG(insn, 0);
2672 op = (insn >> 6) & 7;
2673 tmp = tcg_temp_new();
2674 if (op == 3)
2675 tcg_gen_ext16s_i32(tmp, reg);
2676 else
2677 tcg_gen_ext8s_i32(tmp, reg);
2678 if (op == 2)
2679 gen_partset_reg(OS_WORD, reg, tmp);
2680 else
2681 tcg_gen_mov_i32(reg, tmp);
2682 gen_logic_cc(s, tmp, OS_LONG);
2683 tcg_temp_free(tmp);
2686 DISAS_INSN(tst)
2688 int opsize;
2689 TCGv tmp;
2691 opsize = insn_opsize(insn);
2692 SRC_EA(env, tmp, opsize, 1, NULL);
2693 gen_logic_cc(s, tmp, opsize);
2696 DISAS_INSN(pulse)
2698 /* Implemented as a NOP. */
2701 DISAS_INSN(illegal)
2703 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
2706 /* ??? This should be atomic. */
2707 DISAS_INSN(tas)
2709 TCGv dest;
2710 TCGv src1;
2711 TCGv addr;
2713 dest = tcg_temp_new();
2714 SRC_EA(env, src1, OS_BYTE, 1, &addr);
2715 gen_logic_cc(s, src1, OS_BYTE);
2716 tcg_gen_ori_i32(dest, src1, 0x80);
2717 DEST_EA(env, insn, OS_BYTE, dest, &addr);
2718 tcg_temp_free(dest);
2721 DISAS_INSN(mull)
2723 uint16_t ext;
2724 TCGv src1;
2725 int sign;
2727 ext = read_im16(env, s);
2729 sign = ext & 0x800;
2731 if (ext & 0x400) {
2732 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2733 gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED);
2734 return;
2737 SRC_EA(env, src1, OS_LONG, 0, NULL);
2739 if (sign) {
2740 tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2741 } else {
2742 tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2744 /* if Dl == Dh, 68040 returns low word */
2745 tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2746 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2747 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2749 tcg_gen_movi_i32(QREG_CC_V, 0);
2750 tcg_gen_movi_i32(QREG_CC_C, 0);
2752 set_cc_op(s, CC_OP_FLAGS);
2753 return;
2755 SRC_EA(env, src1, OS_LONG, 0, NULL);
2756 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2757 tcg_gen_movi_i32(QREG_CC_C, 0);
2758 if (sign) {
2759 tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2760 /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2761 tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2762 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2763 } else {
2764 tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2765 /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2766 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2768 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2769 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2771 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2773 set_cc_op(s, CC_OP_FLAGS);
2774 } else {
2775 /* The upper 32 bits of the product are discarded, so
2776 muls.l and mulu.l are functionally equivalent. */
2777 tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2778 gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2782 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2784 TCGv reg;
2785 TCGv tmp;
2787 reg = AREG(insn, 0);
2788 tmp = tcg_temp_new();
2789 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2790 gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2791 if ((insn & 7) != 7) {
2792 tcg_gen_mov_i32(reg, tmp);
2794 tcg_gen_addi_i32(QREG_SP, tmp, offset);
2795 tcg_temp_free(tmp);
2798 DISAS_INSN(link)
2800 int16_t offset;
2802 offset = read_im16(env, s);
2803 gen_link(s, insn, offset);
2806 DISAS_INSN(linkl)
2808 int32_t offset;
2810 offset = read_im32(env, s);
2811 gen_link(s, insn, offset);
2814 DISAS_INSN(unlk)
2816 TCGv src;
2817 TCGv reg;
2818 TCGv tmp;
2820 src = tcg_temp_new();
2821 reg = AREG(insn, 0);
2822 tcg_gen_mov_i32(src, reg);
2823 tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2824 tcg_gen_mov_i32(reg, tmp);
2825 tcg_gen_addi_i32(QREG_SP, src, 4);
2826 tcg_temp_free(src);
2829 #if defined(CONFIG_SOFTMMU)
2830 DISAS_INSN(reset)
2832 if (IS_USER(s)) {
2833 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
2834 return;
2837 gen_helper_reset(cpu_env);
2839 #endif
2841 DISAS_INSN(nop)
2845 DISAS_INSN(rtd)
2847 TCGv tmp;
2848 int16_t offset = read_im16(env, s);
2850 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2851 tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2852 gen_jmp(s, tmp);
2855 DISAS_INSN(rts)
2857 TCGv tmp;
2859 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2860 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2861 gen_jmp(s, tmp);
2864 DISAS_INSN(jump)
2866 TCGv tmp;
2868 /* Load the target address first to ensure correct exception
2869 behavior. */
2870 tmp = gen_lea(env, s, insn, OS_LONG);
2871 if (IS_NULL_QREG(tmp)) {
2872 gen_addr_fault(s);
2873 return;
2875 if ((insn & 0x40) == 0) {
2876 /* jsr */
2877 gen_push(s, tcg_const_i32(s->pc));
2879 gen_jmp(s, tmp);
2882 DISAS_INSN(addsubq)
2884 TCGv src;
2885 TCGv dest;
2886 TCGv val;
2887 int imm;
2888 TCGv addr;
2889 int opsize;
2891 if ((insn & 070) == 010) {
2892 /* Operation on address register is always long. */
2893 opsize = OS_LONG;
2894 } else {
2895 opsize = insn_opsize(insn);
2897 SRC_EA(env, src, opsize, 1, &addr);
2898 imm = (insn >> 9) & 7;
2899 if (imm == 0) {
2900 imm = 8;
2902 val = tcg_const_i32(imm);
2903 dest = tcg_temp_new();
2904 tcg_gen_mov_i32(dest, src);
2905 if ((insn & 0x38) == 0x08) {
2906 /* Don't update condition codes if the destination is an
2907 address register. */
2908 if (insn & 0x0100) {
2909 tcg_gen_sub_i32(dest, dest, val);
2910 } else {
2911 tcg_gen_add_i32(dest, dest, val);
2913 } else {
2914 if (insn & 0x0100) {
2915 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2916 tcg_gen_sub_i32(dest, dest, val);
2917 set_cc_op(s, CC_OP_SUBB + opsize);
2918 } else {
2919 tcg_gen_add_i32(dest, dest, val);
2920 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2921 set_cc_op(s, CC_OP_ADDB + opsize);
2923 gen_update_cc_add(dest, val, opsize);
2925 tcg_temp_free(val);
2926 DEST_EA(env, insn, opsize, dest, &addr);
2927 tcg_temp_free(dest);
2930 DISAS_INSN(tpf)
2932 switch (insn & 7) {
2933 case 2: /* One extension word. */
2934 s->pc += 2;
2935 break;
2936 case 3: /* Two extension words. */
2937 s->pc += 4;
2938 break;
2939 case 4: /* No extension words. */
2940 break;
2941 default:
2942 disas_undef(env, s, insn);
2946 DISAS_INSN(branch)
2948 int32_t offset;
2949 uint32_t base;
2950 int op;
2951 TCGLabel *l1;
2953 base = s->pc;
2954 op = (insn >> 8) & 0xf;
2955 offset = (int8_t)insn;
2956 if (offset == 0) {
2957 offset = (int16_t)read_im16(env, s);
2958 } else if (offset == -1) {
2959 offset = read_im32(env, s);
2961 if (op == 1) {
2962 /* bsr */
2963 gen_push(s, tcg_const_i32(s->pc));
2965 if (op > 1) {
2966 /* Bcc */
2967 l1 = gen_new_label();
2968 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2969 gen_jmp_tb(s, 1, base + offset);
2970 gen_set_label(l1);
2971 gen_jmp_tb(s, 0, s->pc);
2972 } else {
2973 /* Unconditional branch. */
2974 update_cc_op(s);
2975 gen_jmp_tb(s, 0, base + offset);
2979 DISAS_INSN(moveq)
2981 tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2982 gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2985 DISAS_INSN(mvzs)
2987 int opsize;
2988 TCGv src;
2989 TCGv reg;
2991 if (insn & 0x40)
2992 opsize = OS_WORD;
2993 else
2994 opsize = OS_BYTE;
2995 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
2996 reg = DREG(insn, 9);
2997 tcg_gen_mov_i32(reg, src);
2998 gen_logic_cc(s, src, opsize);
3001 DISAS_INSN(or)
3003 TCGv reg;
3004 TCGv dest;
3005 TCGv src;
3006 TCGv addr;
3007 int opsize;
3009 opsize = insn_opsize(insn);
3010 reg = gen_extend(DREG(insn, 9), opsize, 0);
3011 dest = tcg_temp_new();
3012 if (insn & 0x100) {
3013 SRC_EA(env, src, opsize, 0, &addr);
3014 tcg_gen_or_i32(dest, src, reg);
3015 DEST_EA(env, insn, opsize, dest, &addr);
3016 } else {
3017 SRC_EA(env, src, opsize, 0, NULL);
3018 tcg_gen_or_i32(dest, src, reg);
3019 gen_partset_reg(opsize, DREG(insn, 9), dest);
3021 gen_logic_cc(s, dest, opsize);
3022 tcg_temp_free(dest);
3025 DISAS_INSN(suba)
3027 TCGv src;
3028 TCGv reg;
3030 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3031 reg = AREG(insn, 9);
3032 tcg_gen_sub_i32(reg, reg, src);
3035 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3037 TCGv tmp;
3039 gen_flush_flags(s); /* compute old Z */
3041 /* Perform substract with borrow.
3042 * (X, N) = dest - (src + X);
3045 tmp = tcg_const_i32(0);
3046 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
3047 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
3048 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3049 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3051 /* Compute signed-overflow for substract. */
3053 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3054 tcg_gen_xor_i32(tmp, dest, src);
3055 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3056 tcg_temp_free(tmp);
3058 /* Copy the rest of the results into place. */
3059 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3060 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3062 set_cc_op(s, CC_OP_FLAGS);
3064 /* result is in QREG_CC_N */
3067 DISAS_INSN(subx_reg)
3069 TCGv dest;
3070 TCGv src;
3071 int opsize;
3073 opsize = insn_opsize(insn);
3075 src = gen_extend(DREG(insn, 0), opsize, 1);
3076 dest = gen_extend(DREG(insn, 9), opsize, 1);
3078 gen_subx(s, src, dest, opsize);
3080 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3083 DISAS_INSN(subx_mem)
3085 TCGv src;
3086 TCGv addr_src;
3087 TCGv dest;
3088 TCGv addr_dest;
3089 int opsize;
3091 opsize = insn_opsize(insn);
3093 addr_src = AREG(insn, 0);
3094 tcg_gen_subi_i32(addr_src, addr_src, opsize);
3095 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3097 addr_dest = AREG(insn, 9);
3098 tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
3099 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3101 gen_subx(s, src, dest, opsize);
3103 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3106 DISAS_INSN(mov3q)
3108 TCGv src;
3109 int val;
3111 val = (insn >> 9) & 7;
3112 if (val == 0)
3113 val = -1;
3114 src = tcg_const_i32(val);
3115 gen_logic_cc(s, src, OS_LONG);
3116 DEST_EA(env, insn, OS_LONG, src, NULL);
3117 tcg_temp_free(src);
3120 DISAS_INSN(cmp)
3122 TCGv src;
3123 TCGv reg;
3124 int opsize;
3126 opsize = insn_opsize(insn);
3127 SRC_EA(env, src, opsize, 1, NULL);
3128 reg = gen_extend(DREG(insn, 9), opsize, 1);
3129 gen_update_cc_cmp(s, reg, src, opsize);
3132 DISAS_INSN(cmpa)
3134 int opsize;
3135 TCGv src;
3136 TCGv reg;
3138 if (insn & 0x100) {
3139 opsize = OS_LONG;
3140 } else {
3141 opsize = OS_WORD;
3143 SRC_EA(env, src, opsize, 1, NULL);
3144 reg = AREG(insn, 9);
3145 gen_update_cc_cmp(s, reg, src, OS_LONG);
3148 DISAS_INSN(cmpm)
3150 int opsize = insn_opsize(insn);
3151 TCGv src, dst;
3153 /* Post-increment load (mode 3) from Ay. */
3154 src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3155 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3156 /* Post-increment load (mode 3) from Ax. */
3157 dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3158 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3160 gen_update_cc_cmp(s, dst, src, opsize);
3163 DISAS_INSN(eor)
3165 TCGv src;
3166 TCGv dest;
3167 TCGv addr;
3168 int opsize;
3170 opsize = insn_opsize(insn);
3172 SRC_EA(env, src, opsize, 0, &addr);
3173 dest = tcg_temp_new();
3174 tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3175 gen_logic_cc(s, dest, opsize);
3176 DEST_EA(env, insn, opsize, dest, &addr);
3177 tcg_temp_free(dest);
3180 static void do_exg(TCGv reg1, TCGv reg2)
3182 TCGv temp = tcg_temp_new();
3183 tcg_gen_mov_i32(temp, reg1);
3184 tcg_gen_mov_i32(reg1, reg2);
3185 tcg_gen_mov_i32(reg2, temp);
3186 tcg_temp_free(temp);
3189 DISAS_INSN(exg_dd)
3191 /* exchange Dx and Dy */
3192 do_exg(DREG(insn, 9), DREG(insn, 0));
3195 DISAS_INSN(exg_aa)
3197 /* exchange Ax and Ay */
3198 do_exg(AREG(insn, 9), AREG(insn, 0));
3201 DISAS_INSN(exg_da)
3203 /* exchange Dx and Ay */
3204 do_exg(DREG(insn, 9), AREG(insn, 0));
3207 DISAS_INSN(and)
3209 TCGv src;
3210 TCGv reg;
3211 TCGv dest;
3212 TCGv addr;
3213 int opsize;
3215 dest = tcg_temp_new();
3217 opsize = insn_opsize(insn);
3218 reg = DREG(insn, 9);
3219 if (insn & 0x100) {
3220 SRC_EA(env, src, opsize, 0, &addr);
3221 tcg_gen_and_i32(dest, src, reg);
3222 DEST_EA(env, insn, opsize, dest, &addr);
3223 } else {
3224 SRC_EA(env, src, opsize, 0, NULL);
3225 tcg_gen_and_i32(dest, src, reg);
3226 gen_partset_reg(opsize, reg, dest);
3228 gen_logic_cc(s, dest, opsize);
3229 tcg_temp_free(dest);
3232 DISAS_INSN(adda)
3234 TCGv src;
3235 TCGv reg;
3237 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3238 reg = AREG(insn, 9);
3239 tcg_gen_add_i32(reg, reg, src);
3242 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3244 TCGv tmp;
3246 gen_flush_flags(s); /* compute old Z */
3248 /* Perform addition with carry.
3249 * (X, N) = src + dest + X;
3252 tmp = tcg_const_i32(0);
3253 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3254 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3255 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3257 /* Compute signed-overflow for addition. */
3259 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3260 tcg_gen_xor_i32(tmp, dest, src);
3261 tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3262 tcg_temp_free(tmp);
3264 /* Copy the rest of the results into place. */
3265 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3266 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3268 set_cc_op(s, CC_OP_FLAGS);
3270 /* result is in QREG_CC_N */
3273 DISAS_INSN(addx_reg)
3275 TCGv dest;
3276 TCGv src;
3277 int opsize;
3279 opsize = insn_opsize(insn);
3281 dest = gen_extend(DREG(insn, 9), opsize, 1);
3282 src = gen_extend(DREG(insn, 0), opsize, 1);
3284 gen_addx(s, src, dest, opsize);
3286 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3289 DISAS_INSN(addx_mem)
3291 TCGv src;
3292 TCGv addr_src;
3293 TCGv dest;
3294 TCGv addr_dest;
3295 int opsize;
3297 opsize = insn_opsize(insn);
3299 addr_src = AREG(insn, 0);
3300 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3301 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3303 addr_dest = AREG(insn, 9);
3304 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3305 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3307 gen_addx(s, src, dest, opsize);
3309 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3312 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3314 int count = (insn >> 9) & 7;
3315 int logical = insn & 8;
3316 int left = insn & 0x100;
3317 int bits = opsize_bytes(opsize) * 8;
3318 TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3320 if (count == 0) {
3321 count = 8;
3324 tcg_gen_movi_i32(QREG_CC_V, 0);
3325 if (left) {
3326 tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3327 tcg_gen_shli_i32(QREG_CC_N, reg, count);
3329 /* Note that ColdFire always clears V (done above),
3330 while M68000 sets if the most significant bit is changed at
3331 any time during the shift operation */
3332 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3333 /* if shift count >= bits, V is (reg != 0) */
3334 if (count >= bits) {
3335 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3336 } else {
3337 TCGv t0 = tcg_temp_new();
3338 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3339 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3340 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3341 tcg_temp_free(t0);
3343 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3345 } else {
3346 tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3347 if (logical) {
3348 tcg_gen_shri_i32(QREG_CC_N, reg, count);
3349 } else {
3350 tcg_gen_sari_i32(QREG_CC_N, reg, count);
3354 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3355 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3356 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3357 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3359 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3360 set_cc_op(s, CC_OP_FLAGS);
3363 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3365 int logical = insn & 8;
3366 int left = insn & 0x100;
3367 int bits = opsize_bytes(opsize) * 8;
3368 TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3369 TCGv s32;
3370 TCGv_i64 t64, s64;
3372 t64 = tcg_temp_new_i64();
3373 s64 = tcg_temp_new_i64();
3374 s32 = tcg_temp_new();
3376 /* Note that m68k truncates the shift count modulo 64, not 32.
3377 In addition, a 64-bit shift makes it easy to find "the last
3378 bit shifted out", for the carry flag. */
3379 tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3380 tcg_gen_extu_i32_i64(s64, s32);
3381 tcg_gen_extu_i32_i64(t64, reg);
3383 /* Optimistically set V=0. Also used as a zero source below. */
3384 tcg_gen_movi_i32(QREG_CC_V, 0);
3385 if (left) {
3386 tcg_gen_shl_i64(t64, t64, s64);
3388 if (opsize == OS_LONG) {
3389 tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3390 /* Note that C=0 if shift count is 0, and we get that for free. */
3391 } else {
3392 TCGv zero = tcg_const_i32(0);
3393 tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3394 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3395 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3396 s32, zero, zero, QREG_CC_C);
3397 tcg_temp_free(zero);
3399 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3401 /* X = C, but only if the shift count was non-zero. */
3402 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3403 QREG_CC_C, QREG_CC_X);
3405 /* M68000 sets V if the most significant bit is changed at
3406 * any time during the shift operation. Do this via creating
3407 * an extension of the sign bit, comparing, and discarding
3408 * the bits below the sign bit. I.e.
3409 * int64_t s = (intN_t)reg;
3410 * int64_t t = (int64_t)(intN_t)reg << count;
3411 * V = ((s ^ t) & (-1 << (bits - 1))) != 0
3413 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3414 TCGv_i64 tt = tcg_const_i64(32);
3415 /* if shift is greater than 32, use 32 */
3416 tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3417 tcg_temp_free_i64(tt);
3418 /* Sign extend the input to 64 bits; re-do the shift. */
3419 tcg_gen_ext_i32_i64(t64, reg);
3420 tcg_gen_shl_i64(s64, t64, s64);
3421 /* Clear all bits that are unchanged. */
3422 tcg_gen_xor_i64(t64, t64, s64);
3423 /* Ignore the bits below the sign bit. */
3424 tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3425 /* If any bits remain set, we have overflow. */
3426 tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3427 tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3428 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3430 } else {
3431 tcg_gen_shli_i64(t64, t64, 32);
3432 if (logical) {
3433 tcg_gen_shr_i64(t64, t64, s64);
3434 } else {
3435 tcg_gen_sar_i64(t64, t64, s64);
3437 tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3439 /* Note that C=0 if shift count is 0, and we get that for free. */
3440 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3442 /* X = C, but only if the shift count was non-zero. */
3443 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3444 QREG_CC_C, QREG_CC_X);
3446 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3447 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3449 tcg_temp_free(s32);
3450 tcg_temp_free_i64(s64);
3451 tcg_temp_free_i64(t64);
3453 /* Write back the result. */
3454 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3455 set_cc_op(s, CC_OP_FLAGS);
3458 DISAS_INSN(shift8_im)
3460 shift_im(s, insn, OS_BYTE);
3463 DISAS_INSN(shift16_im)
3465 shift_im(s, insn, OS_WORD);
3468 DISAS_INSN(shift_im)
3470 shift_im(s, insn, OS_LONG);
3473 DISAS_INSN(shift8_reg)
3475 shift_reg(s, insn, OS_BYTE);
3478 DISAS_INSN(shift16_reg)
3480 shift_reg(s, insn, OS_WORD);
3483 DISAS_INSN(shift_reg)
3485 shift_reg(s, insn, OS_LONG);
3488 DISAS_INSN(shift_mem)
3490 int logical = insn & 8;
3491 int left = insn & 0x100;
3492 TCGv src;
3493 TCGv addr;
3495 SRC_EA(env, src, OS_WORD, !logical, &addr);
3496 tcg_gen_movi_i32(QREG_CC_V, 0);
3497 if (left) {
3498 tcg_gen_shri_i32(QREG_CC_C, src, 15);
3499 tcg_gen_shli_i32(QREG_CC_N, src, 1);
3501 /* Note that ColdFire always clears V,
3502 while M68000 sets if the most significant bit is changed at
3503 any time during the shift operation */
3504 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3505 src = gen_extend(src, OS_WORD, 1);
3506 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3508 } else {
3509 tcg_gen_mov_i32(QREG_CC_C, src);
3510 if (logical) {
3511 tcg_gen_shri_i32(QREG_CC_N, src, 1);
3512 } else {
3513 tcg_gen_sari_i32(QREG_CC_N, src, 1);
3517 gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3518 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3519 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3520 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3522 DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3523 set_cc_op(s, CC_OP_FLAGS);
3526 static void rotate(TCGv reg, TCGv shift, int left, int size)
3528 switch (size) {
3529 case 8:
3530 /* Replicate the 8-bit input so that a 32-bit rotate works. */
3531 tcg_gen_ext8u_i32(reg, reg);
3532 tcg_gen_muli_i32(reg, reg, 0x01010101);
3533 goto do_long;
3534 case 16:
3535 /* Replicate the 16-bit input so that a 32-bit rotate works. */
3536 tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3537 goto do_long;
3538 do_long:
3539 default:
3540 if (left) {
3541 tcg_gen_rotl_i32(reg, reg, shift);
3542 } else {
3543 tcg_gen_rotr_i32(reg, reg, shift);
3547 /* compute flags */
3549 switch (size) {
3550 case 8:
3551 tcg_gen_ext8s_i32(reg, reg);
3552 break;
3553 case 16:
3554 tcg_gen_ext16s_i32(reg, reg);
3555 break;
3556 default:
3557 break;
3560 /* QREG_CC_X is not affected */
3562 tcg_gen_mov_i32(QREG_CC_N, reg);
3563 tcg_gen_mov_i32(QREG_CC_Z, reg);
3565 if (left) {
3566 tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3567 } else {
3568 tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3571 tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3574 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3576 switch (size) {
3577 case 8:
3578 tcg_gen_ext8s_i32(reg, reg);
3579 break;
3580 case 16:
3581 tcg_gen_ext16s_i32(reg, reg);
3582 break;
3583 default:
3584 break;
3586 tcg_gen_mov_i32(QREG_CC_N, reg);
3587 tcg_gen_mov_i32(QREG_CC_Z, reg);
3588 tcg_gen_mov_i32(QREG_CC_X, X);
3589 tcg_gen_mov_i32(QREG_CC_C, X);
3590 tcg_gen_movi_i32(QREG_CC_V, 0);
3593 /* Result of rotate_x() is valid if 0 <= shift <= size */
3594 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3596 TCGv X, shl, shr, shx, sz, zero;
3598 sz = tcg_const_i32(size);
3600 shr = tcg_temp_new();
3601 shl = tcg_temp_new();
3602 shx = tcg_temp_new();
3603 if (left) {
3604 tcg_gen_mov_i32(shl, shift); /* shl = shift */
3605 tcg_gen_movi_i32(shr, size + 1);
3606 tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3607 tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
3608 /* shx = shx < 0 ? size : shx; */
3609 zero = tcg_const_i32(0);
3610 tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3611 tcg_temp_free(zero);
3612 } else {
3613 tcg_gen_mov_i32(shr, shift); /* shr = shift */
3614 tcg_gen_movi_i32(shl, size + 1);
3615 tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3616 tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3619 /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3621 tcg_gen_shl_i32(shl, reg, shl);
3622 tcg_gen_shr_i32(shr, reg, shr);
3623 tcg_gen_or_i32(reg, shl, shr);
3624 tcg_temp_free(shl);
3625 tcg_temp_free(shr);
3626 tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3627 tcg_gen_or_i32(reg, reg, shx);
3628 tcg_temp_free(shx);
3630 /* X = (reg >> size) & 1 */
3632 X = tcg_temp_new();
3633 tcg_gen_shr_i32(X, reg, sz);
3634 tcg_gen_andi_i32(X, X, 1);
3635 tcg_temp_free(sz);
3637 return X;
3640 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3641 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3643 TCGv_i64 t0, shift64;
3644 TCGv X, lo, hi, zero;
3646 shift64 = tcg_temp_new_i64();
3647 tcg_gen_extu_i32_i64(shift64, shift);
3649 t0 = tcg_temp_new_i64();
3651 X = tcg_temp_new();
3652 lo = tcg_temp_new();
3653 hi = tcg_temp_new();
3655 if (left) {
3656 /* create [reg:X:..] */
3658 tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3659 tcg_gen_concat_i32_i64(t0, lo, reg);
3661 /* rotate */
3663 tcg_gen_rotl_i64(t0, t0, shift64);
3664 tcg_temp_free_i64(shift64);
3666 /* result is [reg:..:reg:X] */
3668 tcg_gen_extr_i64_i32(lo, hi, t0);
3669 tcg_gen_andi_i32(X, lo, 1);
3671 tcg_gen_shri_i32(lo, lo, 1);
3672 } else {
3673 /* create [..:X:reg] */
3675 tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3677 tcg_gen_rotr_i64(t0, t0, shift64);
3678 tcg_temp_free_i64(shift64);
3680 /* result is value: [X:reg:..:reg] */
3682 tcg_gen_extr_i64_i32(lo, hi, t0);
3684 /* extract X */
3686 tcg_gen_shri_i32(X, hi, 31);
3688 /* extract result */
3690 tcg_gen_shli_i32(hi, hi, 1);
3692 tcg_temp_free_i64(t0);
3693 tcg_gen_or_i32(lo, lo, hi);
3694 tcg_temp_free(hi);
3696 /* if shift == 0, register and X are not affected */
3698 zero = tcg_const_i32(0);
3699 tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3700 tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3701 tcg_temp_free(zero);
3702 tcg_temp_free(lo);
3704 return X;
3707 DISAS_INSN(rotate_im)
3709 TCGv shift;
3710 int tmp;
3711 int left = (insn & 0x100);
3713 tmp = (insn >> 9) & 7;
3714 if (tmp == 0) {
3715 tmp = 8;
3718 shift = tcg_const_i32(tmp);
3719 if (insn & 8) {
3720 rotate(DREG(insn, 0), shift, left, 32);
3721 } else {
3722 TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3723 rotate_x_flags(DREG(insn, 0), X, 32);
3724 tcg_temp_free(X);
3726 tcg_temp_free(shift);
3728 set_cc_op(s, CC_OP_FLAGS);
3731 DISAS_INSN(rotate8_im)
3733 int left = (insn & 0x100);
3734 TCGv reg;
3735 TCGv shift;
3736 int tmp;
3738 reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3740 tmp = (insn >> 9) & 7;
3741 if (tmp == 0) {
3742 tmp = 8;
3745 shift = tcg_const_i32(tmp);
3746 if (insn & 8) {
3747 rotate(reg, shift, left, 8);
3748 } else {
3749 TCGv X = rotate_x(reg, shift, left, 8);
3750 rotate_x_flags(reg, X, 8);
3751 tcg_temp_free(X);
3753 tcg_temp_free(shift);
3754 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3755 set_cc_op(s, CC_OP_FLAGS);
3758 DISAS_INSN(rotate16_im)
3760 int left = (insn & 0x100);
3761 TCGv reg;
3762 TCGv shift;
3763 int tmp;
3765 reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3766 tmp = (insn >> 9) & 7;
3767 if (tmp == 0) {
3768 tmp = 8;
3771 shift = tcg_const_i32(tmp);
3772 if (insn & 8) {
3773 rotate(reg, shift, left, 16);
3774 } else {
3775 TCGv X = rotate_x(reg, shift, left, 16);
3776 rotate_x_flags(reg, X, 16);
3777 tcg_temp_free(X);
3779 tcg_temp_free(shift);
3780 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3781 set_cc_op(s, CC_OP_FLAGS);
3784 DISAS_INSN(rotate_reg)
3786 TCGv reg;
3787 TCGv src;
3788 TCGv t0, t1;
3789 int left = (insn & 0x100);
3791 reg = DREG(insn, 0);
3792 src = DREG(insn, 9);
3793 /* shift in [0..63] */
3794 t0 = tcg_temp_new();
3795 tcg_gen_andi_i32(t0, src, 63);
3796 t1 = tcg_temp_new_i32();
3797 if (insn & 8) {
3798 tcg_gen_andi_i32(t1, src, 31);
3799 rotate(reg, t1, left, 32);
3800 /* if shift == 0, clear C */
3801 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3802 t0, QREG_CC_V /* 0 */,
3803 QREG_CC_V /* 0 */, QREG_CC_C);
3804 } else {
3805 TCGv X;
3806 /* modulo 33 */
3807 tcg_gen_movi_i32(t1, 33);
3808 tcg_gen_remu_i32(t1, t0, t1);
3809 X = rotate32_x(DREG(insn, 0), t1, left);
3810 rotate_x_flags(DREG(insn, 0), X, 32);
3811 tcg_temp_free(X);
3813 tcg_temp_free(t1);
3814 tcg_temp_free(t0);
3815 set_cc_op(s, CC_OP_FLAGS);
3818 DISAS_INSN(rotate8_reg)
3820 TCGv reg;
3821 TCGv src;
3822 TCGv t0, t1;
3823 int left = (insn & 0x100);
3825 reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3826 src = DREG(insn, 9);
3827 /* shift in [0..63] */
3828 t0 = tcg_temp_new_i32();
3829 tcg_gen_andi_i32(t0, src, 63);
3830 t1 = tcg_temp_new_i32();
3831 if (insn & 8) {
3832 tcg_gen_andi_i32(t1, src, 7);
3833 rotate(reg, t1, left, 8);
3834 /* if shift == 0, clear C */
3835 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3836 t0, QREG_CC_V /* 0 */,
3837 QREG_CC_V /* 0 */, QREG_CC_C);
3838 } else {
3839 TCGv X;
3840 /* modulo 9 */
3841 tcg_gen_movi_i32(t1, 9);
3842 tcg_gen_remu_i32(t1, t0, t1);
3843 X = rotate_x(reg, t1, left, 8);
3844 rotate_x_flags(reg, X, 8);
3845 tcg_temp_free(X);
3847 tcg_temp_free(t1);
3848 tcg_temp_free(t0);
3849 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3850 set_cc_op(s, CC_OP_FLAGS);
3853 DISAS_INSN(rotate16_reg)
3855 TCGv reg;
3856 TCGv src;
3857 TCGv t0, t1;
3858 int left = (insn & 0x100);
3860 reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3861 src = DREG(insn, 9);
3862 /* shift in [0..63] */
3863 t0 = tcg_temp_new_i32();
3864 tcg_gen_andi_i32(t0, src, 63);
3865 t1 = tcg_temp_new_i32();
3866 if (insn & 8) {
3867 tcg_gen_andi_i32(t1, src, 15);
3868 rotate(reg, t1, left, 16);
3869 /* if shift == 0, clear C */
3870 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3871 t0, QREG_CC_V /* 0 */,
3872 QREG_CC_V /* 0 */, QREG_CC_C);
3873 } else {
3874 TCGv X;
3875 /* modulo 17 */
3876 tcg_gen_movi_i32(t1, 17);
3877 tcg_gen_remu_i32(t1, t0, t1);
3878 X = rotate_x(reg, t1, left, 16);
3879 rotate_x_flags(reg, X, 16);
3880 tcg_temp_free(X);
3882 tcg_temp_free(t1);
3883 tcg_temp_free(t0);
3884 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3885 set_cc_op(s, CC_OP_FLAGS);
3888 DISAS_INSN(rotate_mem)
3890 TCGv src;
3891 TCGv addr;
3892 TCGv shift;
3893 int left = (insn & 0x100);
3895 SRC_EA(env, src, OS_WORD, 0, &addr);
3897 shift = tcg_const_i32(1);
3898 if (insn & 0x0200) {
3899 rotate(src, shift, left, 16);
3900 } else {
3901 TCGv X = rotate_x(src, shift, left, 16);
3902 rotate_x_flags(src, X, 16);
3903 tcg_temp_free(X);
3905 tcg_temp_free(shift);
3906 DEST_EA(env, insn, OS_WORD, src, &addr);
3907 set_cc_op(s, CC_OP_FLAGS);
3910 DISAS_INSN(bfext_reg)
3912 int ext = read_im16(env, s);
3913 int is_sign = insn & 0x200;
3914 TCGv src = DREG(insn, 0);
3915 TCGv dst = DREG(ext, 12);
3916 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3917 int ofs = extract32(ext, 6, 5); /* big bit-endian */
3918 int pos = 32 - ofs - len; /* little bit-endian */
3919 TCGv tmp = tcg_temp_new();
3920 TCGv shift;
3922 /* In general, we're going to rotate the field so that it's at the
3923 top of the word and then right-shift by the compliment of the
3924 width to extend the field. */
3925 if (ext & 0x20) {
3926 /* Variable width. */
3927 if (ext & 0x800) {
3928 /* Variable offset. */
3929 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3930 tcg_gen_rotl_i32(tmp, src, tmp);
3931 } else {
3932 tcg_gen_rotli_i32(tmp, src, ofs);
3935 shift = tcg_temp_new();
3936 tcg_gen_neg_i32(shift, DREG(ext, 0));
3937 tcg_gen_andi_i32(shift, shift, 31);
3938 tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3939 if (is_sign) {
3940 tcg_gen_mov_i32(dst, QREG_CC_N);
3941 } else {
3942 tcg_gen_shr_i32(dst, tmp, shift);
3944 tcg_temp_free(shift);
3945 } else {
3946 /* Immediate width. */
3947 if (ext & 0x800) {
3948 /* Variable offset */
3949 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3950 tcg_gen_rotl_i32(tmp, src, tmp);
3951 src = tmp;
3952 pos = 32 - len;
3953 } else {
3954 /* Immediate offset. If the field doesn't wrap around the
3955 end of the word, rely on (s)extract completely. */
3956 if (pos < 0) {
3957 tcg_gen_rotli_i32(tmp, src, ofs);
3958 src = tmp;
3959 pos = 32 - len;
3963 tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3964 if (is_sign) {
3965 tcg_gen_mov_i32(dst, QREG_CC_N);
3966 } else {
3967 tcg_gen_extract_i32(dst, src, pos, len);
3971 tcg_temp_free(tmp);
3972 set_cc_op(s, CC_OP_LOGIC);
3975 DISAS_INSN(bfext_mem)
3977 int ext = read_im16(env, s);
3978 int is_sign = insn & 0x200;
3979 TCGv dest = DREG(ext, 12);
3980 TCGv addr, len, ofs;
3982 addr = gen_lea(env, s, insn, OS_UNSIZED);
3983 if (IS_NULL_QREG(addr)) {
3984 gen_addr_fault(s);
3985 return;
3988 if (ext & 0x20) {
3989 len = DREG(ext, 0);
3990 } else {
3991 len = tcg_const_i32(extract32(ext, 0, 5));
3993 if (ext & 0x800) {
3994 ofs = DREG(ext, 6);
3995 } else {
3996 ofs = tcg_const_i32(extract32(ext, 6, 5));
3999 if (is_sign) {
4000 gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
4001 tcg_gen_mov_i32(QREG_CC_N, dest);
4002 } else {
4003 TCGv_i64 tmp = tcg_temp_new_i64();
4004 gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
4005 tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4006 tcg_temp_free_i64(tmp);
4008 set_cc_op(s, CC_OP_LOGIC);
4010 if (!(ext & 0x20)) {
4011 tcg_temp_free(len);
4013 if (!(ext & 0x800)) {
4014 tcg_temp_free(ofs);
4018 DISAS_INSN(bfop_reg)
4020 int ext = read_im16(env, s);
4021 TCGv src = DREG(insn, 0);
4022 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4023 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4024 TCGv mask, tofs, tlen;
4026 tofs = NULL;
4027 tlen = NULL;
4028 if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
4029 tofs = tcg_temp_new();
4030 tlen = tcg_temp_new();
4033 if ((ext & 0x820) == 0) {
4034 /* Immediate width and offset. */
4035 uint32_t maski = 0x7fffffffu >> (len - 1);
4036 if (ofs + len <= 32) {
4037 tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4038 } else {
4039 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4041 tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4042 mask = tcg_const_i32(ror32(maski, ofs));
4043 if (tofs) {
4044 tcg_gen_movi_i32(tofs, ofs);
4045 tcg_gen_movi_i32(tlen, len);
4047 } else {
4048 TCGv tmp = tcg_temp_new();
4049 if (ext & 0x20) {
4050 /* Variable width */
4051 tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4052 tcg_gen_andi_i32(tmp, tmp, 31);
4053 mask = tcg_const_i32(0x7fffffffu);
4054 tcg_gen_shr_i32(mask, mask, tmp);
4055 if (tlen) {
4056 tcg_gen_addi_i32(tlen, tmp, 1);
4058 } else {
4059 /* Immediate width */
4060 mask = tcg_const_i32(0x7fffffffu >> (len - 1));
4061 if (tlen) {
4062 tcg_gen_movi_i32(tlen, len);
4065 if (ext & 0x800) {
4066 /* Variable offset */
4067 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4068 tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4069 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4070 tcg_gen_rotr_i32(mask, mask, tmp);
4071 if (tofs) {
4072 tcg_gen_mov_i32(tofs, tmp);
4074 } else {
4075 /* Immediate offset (and variable width) */
4076 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4077 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4078 tcg_gen_rotri_i32(mask, mask, ofs);
4079 if (tofs) {
4080 tcg_gen_movi_i32(tofs, ofs);
4083 tcg_temp_free(tmp);
4085 set_cc_op(s, CC_OP_LOGIC);
4087 switch (insn & 0x0f00) {
4088 case 0x0a00: /* bfchg */
4089 tcg_gen_eqv_i32(src, src, mask);
4090 break;
4091 case 0x0c00: /* bfclr */
4092 tcg_gen_and_i32(src, src, mask);
4093 break;
4094 case 0x0d00: /* bfffo */
4095 gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4096 tcg_temp_free(tlen);
4097 tcg_temp_free(tofs);
4098 break;
4099 case 0x0e00: /* bfset */
4100 tcg_gen_orc_i32(src, src, mask);
4101 break;
4102 case 0x0800: /* bftst */
4103 /* flags already set; no other work to do. */
4104 break;
4105 default:
4106 g_assert_not_reached();
4108 tcg_temp_free(mask);
4111 DISAS_INSN(bfop_mem)
4113 int ext = read_im16(env, s);
4114 TCGv addr, len, ofs;
4115 TCGv_i64 t64;
4117 addr = gen_lea(env, s, insn, OS_UNSIZED);
4118 if (IS_NULL_QREG(addr)) {
4119 gen_addr_fault(s);
4120 return;
4123 if (ext & 0x20) {
4124 len = DREG(ext, 0);
4125 } else {
4126 len = tcg_const_i32(extract32(ext, 0, 5));
4128 if (ext & 0x800) {
4129 ofs = DREG(ext, 6);
4130 } else {
4131 ofs = tcg_const_i32(extract32(ext, 6, 5));
4134 switch (insn & 0x0f00) {
4135 case 0x0a00: /* bfchg */
4136 gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4137 break;
4138 case 0x0c00: /* bfclr */
4139 gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4140 break;
4141 case 0x0d00: /* bfffo */
4142 t64 = tcg_temp_new_i64();
4143 gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4144 tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4145 tcg_temp_free_i64(t64);
4146 break;
4147 case 0x0e00: /* bfset */
4148 gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4149 break;
4150 case 0x0800: /* bftst */
4151 gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4152 break;
4153 default:
4154 g_assert_not_reached();
4156 set_cc_op(s, CC_OP_LOGIC);
4158 if (!(ext & 0x20)) {
4159 tcg_temp_free(len);
4161 if (!(ext & 0x800)) {
4162 tcg_temp_free(ofs);
4166 DISAS_INSN(bfins_reg)
4168 int ext = read_im16(env, s);
4169 TCGv dst = DREG(insn, 0);
4170 TCGv src = DREG(ext, 12);
4171 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4172 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4173 int pos = 32 - ofs - len; /* little bit-endian */
4174 TCGv tmp;
4176 tmp = tcg_temp_new();
4178 if (ext & 0x20) {
4179 /* Variable width */
4180 tcg_gen_neg_i32(tmp, DREG(ext, 0));
4181 tcg_gen_andi_i32(tmp, tmp, 31);
4182 tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4183 } else {
4184 /* Immediate width */
4185 tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4187 set_cc_op(s, CC_OP_LOGIC);
4189 /* Immediate width and offset */
4190 if ((ext & 0x820) == 0) {
4191 /* Check for suitability for deposit. */
4192 if (pos >= 0) {
4193 tcg_gen_deposit_i32(dst, dst, src, pos, len);
4194 } else {
4195 uint32_t maski = -2U << (len - 1);
4196 uint32_t roti = (ofs + len) & 31;
4197 tcg_gen_andi_i32(tmp, src, ~maski);
4198 tcg_gen_rotri_i32(tmp, tmp, roti);
4199 tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4200 tcg_gen_or_i32(dst, dst, tmp);
4202 } else {
4203 TCGv mask = tcg_temp_new();
4204 TCGv rot = tcg_temp_new();
4206 if (ext & 0x20) {
4207 /* Variable width */
4208 tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4209 tcg_gen_andi_i32(rot, rot, 31);
4210 tcg_gen_movi_i32(mask, -2);
4211 tcg_gen_shl_i32(mask, mask, rot);
4212 tcg_gen_mov_i32(rot, DREG(ext, 0));
4213 tcg_gen_andc_i32(tmp, src, mask);
4214 } else {
4215 /* Immediate width (variable offset) */
4216 uint32_t maski = -2U << (len - 1);
4217 tcg_gen_andi_i32(tmp, src, ~maski);
4218 tcg_gen_movi_i32(mask, maski);
4219 tcg_gen_movi_i32(rot, len & 31);
4221 if (ext & 0x800) {
4222 /* Variable offset */
4223 tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4224 } else {
4225 /* Immediate offset (variable width) */
4226 tcg_gen_addi_i32(rot, rot, ofs);
4228 tcg_gen_andi_i32(rot, rot, 31);
4229 tcg_gen_rotr_i32(mask, mask, rot);
4230 tcg_gen_rotr_i32(tmp, tmp, rot);
4231 tcg_gen_and_i32(dst, dst, mask);
4232 tcg_gen_or_i32(dst, dst, tmp);
4234 tcg_temp_free(rot);
4235 tcg_temp_free(mask);
4237 tcg_temp_free(tmp);
4240 DISAS_INSN(bfins_mem)
4242 int ext = read_im16(env, s);
4243 TCGv src = DREG(ext, 12);
4244 TCGv addr, len, ofs;
4246 addr = gen_lea(env, s, insn, OS_UNSIZED);
4247 if (IS_NULL_QREG(addr)) {
4248 gen_addr_fault(s);
4249 return;
4252 if (ext & 0x20) {
4253 len = DREG(ext, 0);
4254 } else {
4255 len = tcg_const_i32(extract32(ext, 0, 5));
4257 if (ext & 0x800) {
4258 ofs = DREG(ext, 6);
4259 } else {
4260 ofs = tcg_const_i32(extract32(ext, 6, 5));
4263 gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4264 set_cc_op(s, CC_OP_LOGIC);
4266 if (!(ext & 0x20)) {
4267 tcg_temp_free(len);
4269 if (!(ext & 0x800)) {
4270 tcg_temp_free(ofs);
4274 DISAS_INSN(ff1)
4276 TCGv reg;
4277 reg = DREG(insn, 0);
4278 gen_logic_cc(s, reg, OS_LONG);
4279 gen_helper_ff1(reg, reg);
4282 DISAS_INSN(chk)
4284 TCGv src, reg;
4285 int opsize;
4287 switch ((insn >> 7) & 3) {
4288 case 3:
4289 opsize = OS_WORD;
4290 break;
4291 case 2:
4292 if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4293 opsize = OS_LONG;
4294 break;
4296 /* fallthru */
4297 default:
4298 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4299 return;
4301 SRC_EA(env, src, opsize, 1, NULL);
4302 reg = gen_extend(DREG(insn, 9), opsize, 1);
4304 gen_flush_flags(s);
4305 gen_helper_chk(cpu_env, reg, src);
4308 DISAS_INSN(chk2)
4310 uint16_t ext;
4311 TCGv addr1, addr2, bound1, bound2, reg;
4312 int opsize;
4314 switch ((insn >> 9) & 3) {
4315 case 0:
4316 opsize = OS_BYTE;
4317 break;
4318 case 1:
4319 opsize = OS_WORD;
4320 break;
4321 case 2:
4322 opsize = OS_LONG;
4323 break;
4324 default:
4325 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4326 return;
4329 ext = read_im16(env, s);
4330 if ((ext & 0x0800) == 0) {
4331 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4332 return;
4335 addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4336 addr2 = tcg_temp_new();
4337 tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4339 bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4340 tcg_temp_free(addr1);
4341 bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4342 tcg_temp_free(addr2);
4344 reg = tcg_temp_new();
4345 if (ext & 0x8000) {
4346 tcg_gen_mov_i32(reg, AREG(ext, 12));
4347 } else {
4348 gen_ext(reg, DREG(ext, 12), opsize, 1);
4351 gen_flush_flags(s);
4352 gen_helper_chk2(cpu_env, reg, bound1, bound2);
4353 tcg_temp_free(reg);
4356 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4358 TCGv addr;
4359 TCGv_i64 t0, t1;
4361 addr = tcg_temp_new();
4363 t0 = tcg_temp_new_i64();
4364 t1 = tcg_temp_new_i64();
4366 tcg_gen_andi_i32(addr, src, ~15);
4367 tcg_gen_qemu_ld64(t0, addr, index);
4368 tcg_gen_addi_i32(addr, addr, 8);
4369 tcg_gen_qemu_ld64(t1, addr, index);
4371 tcg_gen_andi_i32(addr, dst, ~15);
4372 tcg_gen_qemu_st64(t0, addr, index);
4373 tcg_gen_addi_i32(addr, addr, 8);
4374 tcg_gen_qemu_st64(t1, addr, index);
4376 tcg_temp_free_i64(t0);
4377 tcg_temp_free_i64(t1);
4378 tcg_temp_free(addr);
4381 DISAS_INSN(move16_reg)
4383 int index = IS_USER(s);
4384 TCGv tmp;
4385 uint16_t ext;
4387 ext = read_im16(env, s);
4388 if ((ext & (1 << 15)) == 0) {
4389 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4392 m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4394 /* Ax can be Ay, so save Ay before incrementing Ax */
4395 tmp = tcg_temp_new();
4396 tcg_gen_mov_i32(tmp, AREG(ext, 12));
4397 tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4398 tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4399 tcg_temp_free(tmp);
4402 DISAS_INSN(move16_mem)
4404 int index = IS_USER(s);
4405 TCGv reg, addr;
4407 reg = AREG(insn, 0);
4408 addr = tcg_const_i32(read_im32(env, s));
4410 if ((insn >> 3) & 1) {
4411 /* MOVE16 (xxx).L, (Ay) */
4412 m68k_copy_line(reg, addr, index);
4413 } else {
4414 /* MOVE16 (Ay), (xxx).L */
4415 m68k_copy_line(addr, reg, index);
4418 tcg_temp_free(addr);
4420 if (((insn >> 3) & 2) == 0) {
4421 /* (Ay)+ */
4422 tcg_gen_addi_i32(reg, reg, 16);
4426 DISAS_INSN(strldsr)
4428 uint16_t ext;
4429 uint32_t addr;
4431 addr = s->pc - 2;
4432 ext = read_im16(env, s);
4433 if (ext != 0x46FC) {
4434 gen_exception(s, addr, EXCP_UNSUPPORTED);
4435 return;
4437 ext = read_im16(env, s);
4438 if (IS_USER(s) || (ext & SR_S) == 0) {
4439 gen_exception(s, addr, EXCP_PRIVILEGE);
4440 return;
4442 gen_push(s, gen_get_sr(s));
4443 gen_set_sr_im(s, ext, 0);
4446 DISAS_INSN(move_from_sr)
4448 TCGv sr;
4450 if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4451 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4452 return;
4454 sr = gen_get_sr(s);
4455 DEST_EA(env, insn, OS_WORD, sr, NULL);
4458 #if defined(CONFIG_SOFTMMU)
4459 DISAS_INSN(moves)
4461 int opsize;
4462 uint16_t ext;
4463 TCGv reg;
4464 TCGv addr;
4465 int extend;
4467 if (IS_USER(s)) {
4468 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4469 return;
4472 ext = read_im16(env, s);
4474 opsize = insn_opsize(insn);
4476 if (ext & 0x8000) {
4477 /* address register */
4478 reg = AREG(ext, 12);
4479 extend = 1;
4480 } else {
4481 /* data register */
4482 reg = DREG(ext, 12);
4483 extend = 0;
4486 addr = gen_lea(env, s, insn, opsize);
4487 if (IS_NULL_QREG(addr)) {
4488 gen_addr_fault(s);
4489 return;
4492 if (ext & 0x0800) {
4493 /* from reg to ea */
4494 gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4495 } else {
4496 /* from ea to reg */
4497 TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4498 if (extend) {
4499 gen_ext(reg, tmp, opsize, 1);
4500 } else {
4501 gen_partset_reg(opsize, reg, tmp);
4504 switch (extract32(insn, 3, 3)) {
4505 case 3: /* Indirect postincrement. */
4506 tcg_gen_addi_i32(AREG(insn, 0), addr,
4507 REG(insn, 0) == 7 && opsize == OS_BYTE
4509 : opsize_bytes(opsize));
4510 break;
4511 case 4: /* Indirect predecrememnt. */
4512 tcg_gen_mov_i32(AREG(insn, 0), addr);
4513 break;
4517 DISAS_INSN(move_to_sr)
4519 if (IS_USER(s)) {
4520 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4521 return;
4523 gen_move_to_sr(env, s, insn, false);
4524 gen_lookup_tb(s);
4527 DISAS_INSN(move_from_usp)
4529 if (IS_USER(s)) {
4530 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4531 return;
4533 tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4534 offsetof(CPUM68KState, sp[M68K_USP]));
4537 DISAS_INSN(move_to_usp)
4539 if (IS_USER(s)) {
4540 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4541 return;
4543 tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4544 offsetof(CPUM68KState, sp[M68K_USP]));
4547 DISAS_INSN(halt)
4549 if (IS_USER(s)) {
4550 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4551 return;
4554 gen_exception(s, s->pc, EXCP_HALT_INSN);
4557 DISAS_INSN(stop)
4559 uint16_t ext;
4561 if (IS_USER(s)) {
4562 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4563 return;
4566 ext = read_im16(env, s);
4568 gen_set_sr_im(s, ext, 0);
4569 tcg_gen_movi_i32(cpu_halted, 1);
4570 gen_exception(s, s->pc, EXCP_HLT);
4573 DISAS_INSN(rte)
4575 if (IS_USER(s)) {
4576 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4577 return;
4579 gen_exception(s, s->insn_pc, EXCP_RTE);
4582 DISAS_INSN(cf_movec)
4584 uint16_t ext;
4585 TCGv reg;
4587 if (IS_USER(s)) {
4588 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4589 return;
4592 ext = read_im16(env, s);
4594 if (ext & 0x8000) {
4595 reg = AREG(ext, 12);
4596 } else {
4597 reg = DREG(ext, 12);
4599 gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4600 gen_lookup_tb(s);
4603 DISAS_INSN(m68k_movec)
4605 uint16_t ext;
4606 TCGv reg;
4608 if (IS_USER(s)) {
4609 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4610 return;
4613 ext = read_im16(env, s);
4615 if (ext & 0x8000) {
4616 reg = AREG(ext, 12);
4617 } else {
4618 reg = DREG(ext, 12);
4620 if (insn & 1) {
4621 gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4622 } else {
4623 gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
4625 gen_lookup_tb(s);
4628 DISAS_INSN(intouch)
4630 if (IS_USER(s)) {
4631 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4632 return;
4634 /* ICache fetch. Implement as no-op. */
4637 DISAS_INSN(cpushl)
4639 if (IS_USER(s)) {
4640 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4641 return;
4643 /* Cache push/invalidate. Implement as no-op. */
4646 DISAS_INSN(cpush)
4648 if (IS_USER(s)) {
4649 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4650 return;
4652 /* Cache push/invalidate. Implement as no-op. */
4655 DISAS_INSN(cinv)
4657 if (IS_USER(s)) {
4658 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4659 return;
4661 /* Invalidate cache line. Implement as no-op. */
4664 #if defined(CONFIG_SOFTMMU)
4665 DISAS_INSN(pflush)
4667 TCGv opmode;
4669 if (IS_USER(s)) {
4670 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4671 return;
4674 opmode = tcg_const_i32((insn >> 3) & 3);
4675 gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4676 tcg_temp_free(opmode);
4679 DISAS_INSN(ptest)
4681 TCGv is_read;
4683 if (IS_USER(s)) {
4684 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4685 return;
4687 is_read = tcg_const_i32((insn >> 5) & 1);
4688 gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4689 tcg_temp_free(is_read);
4691 #endif
4693 DISAS_INSN(wddata)
4695 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4698 DISAS_INSN(wdebug)
4700 M68kCPU *cpu = m68k_env_get_cpu(env);
4702 if (IS_USER(s)) {
4703 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4704 return;
4706 /* TODO: Implement wdebug. */
4707 cpu_abort(CPU(cpu), "WDEBUG not implemented");
4709 #endif
4711 DISAS_INSN(trap)
4713 gen_exception(s, s->insn_pc, EXCP_TRAP0 + (insn & 0xf));
4716 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4718 switch (reg) {
4719 case M68K_FPIAR:
4720 tcg_gen_movi_i32(res, 0);
4721 break;
4722 case M68K_FPSR:
4723 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4724 break;
4725 case M68K_FPCR:
4726 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4727 break;
4731 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4733 switch (reg) {
4734 case M68K_FPIAR:
4735 break;
4736 case M68K_FPSR:
4737 tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4738 break;
4739 case M68K_FPCR:
4740 gen_helper_set_fpcr(cpu_env, val);
4741 break;
4745 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4747 int index = IS_USER(s);
4748 TCGv tmp;
4750 tmp = tcg_temp_new();
4751 gen_load_fcr(s, tmp, reg);
4752 tcg_gen_qemu_st32(tmp, addr, index);
4753 tcg_temp_free(tmp);
4756 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4758 int index = IS_USER(s);
4759 TCGv tmp;
4761 tmp = tcg_temp_new();
4762 tcg_gen_qemu_ld32u(tmp, addr, index);
4763 gen_store_fcr(s, tmp, reg);
4764 tcg_temp_free(tmp);
4768 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4769 uint32_t insn, uint32_t ext)
4771 int mask = (ext >> 10) & 7;
4772 int is_write = (ext >> 13) & 1;
4773 int mode = extract32(insn, 3, 3);
4774 int i;
4775 TCGv addr, tmp;
4777 switch (mode) {
4778 case 0: /* Dn */
4779 if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4780 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4781 return;
4783 if (is_write) {
4784 gen_load_fcr(s, DREG(insn, 0), mask);
4785 } else {
4786 gen_store_fcr(s, DREG(insn, 0), mask);
4788 return;
4789 case 1: /* An, only with FPIAR */
4790 if (mask != M68K_FPIAR) {
4791 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4792 return;
4794 if (is_write) {
4795 gen_load_fcr(s, AREG(insn, 0), mask);
4796 } else {
4797 gen_store_fcr(s, AREG(insn, 0), mask);
4799 return;
4800 default:
4801 break;
4804 tmp = gen_lea(env, s, insn, OS_LONG);
4805 if (IS_NULL_QREG(tmp)) {
4806 gen_addr_fault(s);
4807 return;
4810 addr = tcg_temp_new();
4811 tcg_gen_mov_i32(addr, tmp);
4813 /* mask:
4815 * 0b100 Floating-Point Control Register
4816 * 0b010 Floating-Point Status Register
4817 * 0b001 Floating-Point Instruction Address Register
4821 if (is_write && mode == 4) {
4822 for (i = 2; i >= 0; i--, mask >>= 1) {
4823 if (mask & 1) {
4824 gen_qemu_store_fcr(s, addr, 1 << i);
4825 if (mask != 1) {
4826 tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4830 tcg_gen_mov_i32(AREG(insn, 0), addr);
4831 } else {
4832 for (i = 0; i < 3; i++, mask >>= 1) {
4833 if (mask & 1) {
4834 if (is_write) {
4835 gen_qemu_store_fcr(s, addr, 1 << i);
4836 } else {
4837 gen_qemu_load_fcr(s, addr, 1 << i);
4839 if (mask != 1 || mode == 3) {
4840 tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4844 if (mode == 3) {
4845 tcg_gen_mov_i32(AREG(insn, 0), addr);
4848 tcg_temp_free_i32(addr);
4851 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4852 uint32_t insn, uint32_t ext)
4854 int opsize;
4855 TCGv addr, tmp;
4856 int mode = (ext >> 11) & 0x3;
4857 int is_load = ((ext & 0x2000) == 0);
4859 if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4860 opsize = OS_EXTENDED;
4861 } else {
4862 opsize = OS_DOUBLE; /* FIXME */
4865 addr = gen_lea(env, s, insn, opsize);
4866 if (IS_NULL_QREG(addr)) {
4867 gen_addr_fault(s);
4868 return;
4871 tmp = tcg_temp_new();
4872 if (mode & 0x1) {
4873 /* Dynamic register list */
4874 tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4875 } else {
4876 /* Static register list */
4877 tcg_gen_movi_i32(tmp, ext & 0xff);
4880 if (!is_load && (mode & 2) == 0) {
4881 /* predecrement addressing mode
4882 * only available to store register to memory
4884 if (opsize == OS_EXTENDED) {
4885 gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4886 } else {
4887 gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4889 } else {
4890 /* postincrement addressing mode */
4891 if (opsize == OS_EXTENDED) {
4892 if (is_load) {
4893 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4894 } else {
4895 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4897 } else {
4898 if (is_load) {
4899 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4900 } else {
4901 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4905 if ((insn & 070) == 030 || (insn & 070) == 040) {
4906 tcg_gen_mov_i32(AREG(insn, 0), tmp);
4908 tcg_temp_free(tmp);
4911 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
4912 immediately before the next FP instruction is executed. */
4913 DISAS_INSN(fpu)
4915 uint16_t ext;
4916 int opmode;
4917 int opsize;
4918 TCGv_ptr cpu_src, cpu_dest;
4920 ext = read_im16(env, s);
4921 opmode = ext & 0x7f;
4922 switch ((ext >> 13) & 7) {
4923 case 0:
4924 break;
4925 case 1:
4926 goto undef;
4927 case 2:
4928 if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4929 /* fmovecr */
4930 TCGv rom_offset = tcg_const_i32(opmode);
4931 cpu_dest = gen_fp_ptr(REG(ext, 7));
4932 gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4933 tcg_temp_free_ptr(cpu_dest);
4934 tcg_temp_free(rom_offset);
4935 return;
4937 break;
4938 case 3: /* fmove out */
4939 cpu_src = gen_fp_ptr(REG(ext, 7));
4940 opsize = ext_opsize(ext, 10);
4941 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4942 EA_STORE, IS_USER(s)) == -1) {
4943 gen_addr_fault(s);
4945 gen_helper_ftst(cpu_env, cpu_src);
4946 tcg_temp_free_ptr(cpu_src);
4947 return;
4948 case 4: /* fmove to control register. */
4949 case 5: /* fmove from control register. */
4950 gen_op_fmove_fcr(env, s, insn, ext);
4951 return;
4952 case 6: /* fmovem */
4953 case 7:
4954 if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4955 goto undef;
4957 gen_op_fmovem(env, s, insn, ext);
4958 return;
4960 if (ext & (1 << 14)) {
4961 /* Source effective address. */
4962 opsize = ext_opsize(ext, 10);
4963 cpu_src = gen_fp_result_ptr();
4964 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4965 EA_LOADS, IS_USER(s)) == -1) {
4966 gen_addr_fault(s);
4967 return;
4969 } else {
4970 /* Source register. */
4971 opsize = OS_EXTENDED;
4972 cpu_src = gen_fp_ptr(REG(ext, 10));
4974 cpu_dest = gen_fp_ptr(REG(ext, 7));
4975 switch (opmode) {
4976 case 0: /* fmove */
4977 gen_fp_move(cpu_dest, cpu_src);
4978 break;
4979 case 0x40: /* fsmove */
4980 gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
4981 break;
4982 case 0x44: /* fdmove */
4983 gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
4984 break;
4985 case 1: /* fint */
4986 gen_helper_firound(cpu_env, cpu_dest, cpu_src);
4987 break;
4988 case 3: /* fintrz */
4989 gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
4990 break;
4991 case 4: /* fsqrt */
4992 gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
4993 break;
4994 case 0x41: /* fssqrt */
4995 gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
4996 break;
4997 case 0x45: /* fdsqrt */
4998 gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
4999 break;
5000 case 0x18: /* fabs */
5001 gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5002 break;
5003 case 0x58: /* fsabs */
5004 gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5005 break;
5006 case 0x5c: /* fdabs */
5007 gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5008 break;
5009 case 0x1a: /* fneg */
5010 gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5011 break;
5012 case 0x5a: /* fsneg */
5013 gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5014 break;
5015 case 0x5e: /* fdneg */
5016 gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5017 break;
5018 case 0x20: /* fdiv */
5019 gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5020 break;
5021 case 0x60: /* fsdiv */
5022 gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5023 break;
5024 case 0x64: /* fddiv */
5025 gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5026 break;
5027 case 0x22: /* fadd */
5028 gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5029 break;
5030 case 0x62: /* fsadd */
5031 gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5032 break;
5033 case 0x66: /* fdadd */
5034 gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5035 break;
5036 case 0x23: /* fmul */
5037 gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5038 break;
5039 case 0x63: /* fsmul */
5040 gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5041 break;
5042 case 0x67: /* fdmul */
5043 gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5044 break;
5045 case 0x24: /* fsgldiv */
5046 gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5047 break;
5048 case 0x27: /* fsglmul */
5049 gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5050 break;
5051 case 0x28: /* fsub */
5052 gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5053 break;
5054 case 0x68: /* fssub */
5055 gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5056 break;
5057 case 0x6c: /* fdsub */
5058 gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5059 break;
5060 case 0x38: /* fcmp */
5061 gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5062 return;
5063 case 0x3a: /* ftst */
5064 gen_helper_ftst(cpu_env, cpu_src);
5065 return;
5066 default:
5067 goto undef;
5069 tcg_temp_free_ptr(cpu_src);
5070 gen_helper_ftst(cpu_env, cpu_dest);
5071 tcg_temp_free_ptr(cpu_dest);
5072 return;
5073 undef:
5074 /* FIXME: Is this right for offset addressing modes? */
5075 s->pc -= 2;
5076 disas_undef_fpu(env, s, insn);
5079 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5081 TCGv fpsr;
5083 c->g1 = 1;
5084 c->v2 = tcg_const_i32(0);
5085 c->g2 = 0;
5086 /* TODO: Raise BSUN exception. */
5087 fpsr = tcg_temp_new();
5088 gen_load_fcr(s, fpsr, M68K_FPSR);
5089 switch (cond) {
5090 case 0: /* False */
5091 case 16: /* Signaling False */
5092 c->v1 = c->v2;
5093 c->tcond = TCG_COND_NEVER;
5094 break;
5095 case 1: /* EQual Z */
5096 case 17: /* Signaling EQual Z */
5097 c->v1 = tcg_temp_new();
5098 c->g1 = 0;
5099 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5100 c->tcond = TCG_COND_NE;
5101 break;
5102 case 2: /* Ordered Greater Than !(A || Z || N) */
5103 case 18: /* Greater Than !(A || Z || N) */
5104 c->v1 = tcg_temp_new();
5105 c->g1 = 0;
5106 tcg_gen_andi_i32(c->v1, fpsr,
5107 FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5108 c->tcond = TCG_COND_EQ;
5109 break;
5110 case 3: /* Ordered Greater than or Equal Z || !(A || N) */
5111 case 19: /* Greater than or Equal Z || !(A || N) */
5112 c->v1 = tcg_temp_new();
5113 c->g1 = 0;
5114 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5115 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5116 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5117 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5118 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5119 c->tcond = TCG_COND_NE;
5120 break;
5121 case 4: /* Ordered Less Than !(!N || A || Z); */
5122 case 20: /* Less Than !(!N || A || Z); */
5123 c->v1 = tcg_temp_new();
5124 c->g1 = 0;
5125 tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5126 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5127 c->tcond = TCG_COND_EQ;
5128 break;
5129 case 5: /* Ordered Less than or Equal Z || (N && !A) */
5130 case 21: /* Less than or Equal Z || (N && !A) */
5131 c->v1 = tcg_temp_new();
5132 c->g1 = 0;
5133 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5134 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5135 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5136 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5137 c->tcond = TCG_COND_NE;
5138 break;
5139 case 6: /* Ordered Greater or Less than !(A || Z) */
5140 case 22: /* Greater or Less than !(A || Z) */
5141 c->v1 = tcg_temp_new();
5142 c->g1 = 0;
5143 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5144 c->tcond = TCG_COND_EQ;
5145 break;
5146 case 7: /* Ordered !A */
5147 case 23: /* Greater, Less or Equal !A */
5148 c->v1 = tcg_temp_new();
5149 c->g1 = 0;
5150 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5151 c->tcond = TCG_COND_EQ;
5152 break;
5153 case 8: /* Unordered A */
5154 case 24: /* Not Greater, Less or Equal A */
5155 c->v1 = tcg_temp_new();
5156 c->g1 = 0;
5157 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5158 c->tcond = TCG_COND_NE;
5159 break;
5160 case 9: /* Unordered or Equal A || Z */
5161 case 25: /* Not Greater or Less then A || Z */
5162 c->v1 = tcg_temp_new();
5163 c->g1 = 0;
5164 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5165 c->tcond = TCG_COND_NE;
5166 break;
5167 case 10: /* Unordered or Greater Than A || !(N || Z)) */
5168 case 26: /* Not Less or Equal A || !(N || Z)) */
5169 c->v1 = tcg_temp_new();
5170 c->g1 = 0;
5171 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5172 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5173 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5174 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5175 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5176 c->tcond = TCG_COND_NE;
5177 break;
5178 case 11: /* Unordered or Greater or Equal A || Z || !N */
5179 case 27: /* Not Less Than A || Z || !N */
5180 c->v1 = tcg_temp_new();
5181 c->g1 = 0;
5182 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5183 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5184 c->tcond = TCG_COND_NE;
5185 break;
5186 case 12: /* Unordered or Less Than A || (N && !Z) */
5187 case 28: /* Not Greater than or Equal A || (N && !Z) */
5188 c->v1 = tcg_temp_new();
5189 c->g1 = 0;
5190 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5191 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5192 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5193 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5194 c->tcond = TCG_COND_NE;
5195 break;
5196 case 13: /* Unordered or Less or Equal A || Z || N */
5197 case 29: /* Not Greater Than A || Z || N */
5198 c->v1 = tcg_temp_new();
5199 c->g1 = 0;
5200 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5201 c->tcond = TCG_COND_NE;
5202 break;
5203 case 14: /* Not Equal !Z */
5204 case 30: /* Signaling Not Equal !Z */
5205 c->v1 = tcg_temp_new();
5206 c->g1 = 0;
5207 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5208 c->tcond = TCG_COND_EQ;
5209 break;
5210 case 15: /* True */
5211 case 31: /* Signaling True */
5212 c->v1 = c->v2;
5213 c->tcond = TCG_COND_ALWAYS;
5214 break;
5216 tcg_temp_free(fpsr);
5219 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5221 DisasCompare c;
5223 gen_fcc_cond(&c, s, cond);
5224 update_cc_op(s);
5225 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5226 free_cond(&c);
5229 DISAS_INSN(fbcc)
5231 uint32_t offset;
5232 uint32_t base;
5233 TCGLabel *l1;
5235 base = s->pc;
5236 offset = (int16_t)read_im16(env, s);
5237 if (insn & (1 << 6)) {
5238 offset = (offset << 16) | read_im16(env, s);
5241 l1 = gen_new_label();
5242 update_cc_op(s);
5243 gen_fjmpcc(s, insn & 0x3f, l1);
5244 gen_jmp_tb(s, 0, s->pc);
5245 gen_set_label(l1);
5246 gen_jmp_tb(s, 1, base + offset);
5249 DISAS_INSN(fscc)
5251 DisasCompare c;
5252 int cond;
5253 TCGv tmp;
5254 uint16_t ext;
5256 ext = read_im16(env, s);
5257 cond = ext & 0x3f;
5258 gen_fcc_cond(&c, s, cond);
5260 tmp = tcg_temp_new();
5261 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5262 free_cond(&c);
5264 tcg_gen_neg_i32(tmp, tmp);
5265 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5266 tcg_temp_free(tmp);
5269 #if defined(CONFIG_SOFTMMU)
5270 DISAS_INSN(frestore)
5272 TCGv addr;
5274 if (IS_USER(s)) {
5275 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
5276 return;
5278 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5279 SRC_EA(env, addr, OS_LONG, 0, NULL);
5280 /* FIXME: check the state frame */
5281 } else {
5282 disas_undef(env, s, insn);
5286 DISAS_INSN(fsave)
5288 if (IS_USER(s)) {
5289 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
5290 return;
5293 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5294 /* always write IDLE */
5295 TCGv idle = tcg_const_i32(0x41000000);
5296 DEST_EA(env, insn, OS_LONG, idle, NULL);
5297 tcg_temp_free(idle);
5298 } else {
5299 disas_undef(env, s, insn);
5302 #endif
5304 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5306 TCGv tmp = tcg_temp_new();
5307 if (s->env->macsr & MACSR_FI) {
5308 if (upper)
5309 tcg_gen_andi_i32(tmp, val, 0xffff0000);
5310 else
5311 tcg_gen_shli_i32(tmp, val, 16);
5312 } else if (s->env->macsr & MACSR_SU) {
5313 if (upper)
5314 tcg_gen_sari_i32(tmp, val, 16);
5315 else
5316 tcg_gen_ext16s_i32(tmp, val);
5317 } else {
5318 if (upper)
5319 tcg_gen_shri_i32(tmp, val, 16);
5320 else
5321 tcg_gen_ext16u_i32(tmp, val);
5323 return tmp;
5326 static void gen_mac_clear_flags(void)
5328 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5329 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5332 DISAS_INSN(mac)
5334 TCGv rx;
5335 TCGv ry;
5336 uint16_t ext;
5337 int acc;
5338 TCGv tmp;
5339 TCGv addr;
5340 TCGv loadval;
5341 int dual;
5342 TCGv saved_flags;
5344 if (!s->done_mac) {
5345 s->mactmp = tcg_temp_new_i64();
5346 s->done_mac = 1;
5349 ext = read_im16(env, s);
5351 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5352 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5353 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5354 disas_undef(env, s, insn);
5355 return;
5357 if (insn & 0x30) {
5358 /* MAC with load. */
5359 tmp = gen_lea(env, s, insn, OS_LONG);
5360 addr = tcg_temp_new();
5361 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5362 /* Load the value now to ensure correct exception behavior.
5363 Perform writeback after reading the MAC inputs. */
5364 loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5366 acc ^= 1;
5367 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5368 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5369 } else {
5370 loadval = addr = NULL_QREG;
5371 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5372 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5375 gen_mac_clear_flags();
5376 #if 0
5377 l1 = -1;
5378 /* Disabled because conditional branches clobber temporary vars. */
5379 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5380 /* Skip the multiply if we know we will ignore it. */
5381 l1 = gen_new_label();
5382 tmp = tcg_temp_new();
5383 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5384 gen_op_jmp_nz32(tmp, l1);
5386 #endif
5388 if ((ext & 0x0800) == 0) {
5389 /* Word. */
5390 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5391 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5393 if (s->env->macsr & MACSR_FI) {
5394 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5395 } else {
5396 if (s->env->macsr & MACSR_SU)
5397 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5398 else
5399 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5400 switch ((ext >> 9) & 3) {
5401 case 1:
5402 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5403 break;
5404 case 3:
5405 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5406 break;
5410 if (dual) {
5411 /* Save the overflow flag from the multiply. */
5412 saved_flags = tcg_temp_new();
5413 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5414 } else {
5415 saved_flags = NULL_QREG;
5418 #if 0
5419 /* Disabled because conditional branches clobber temporary vars. */
5420 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5421 /* Skip the accumulate if the value is already saturated. */
5422 l1 = gen_new_label();
5423 tmp = tcg_temp_new();
5424 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5425 gen_op_jmp_nz32(tmp, l1);
5427 #endif
5429 if (insn & 0x100)
5430 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5431 else
5432 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5434 if (s->env->macsr & MACSR_FI)
5435 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5436 else if (s->env->macsr & MACSR_SU)
5437 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5438 else
5439 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5441 #if 0
5442 /* Disabled because conditional branches clobber temporary vars. */
5443 if (l1 != -1)
5444 gen_set_label(l1);
5445 #endif
5447 if (dual) {
5448 /* Dual accumulate variant. */
5449 acc = (ext >> 2) & 3;
5450 /* Restore the overflow flag from the multiplier. */
5451 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5452 #if 0
5453 /* Disabled because conditional branches clobber temporary vars. */
5454 if ((s->env->macsr & MACSR_OMC) != 0) {
5455 /* Skip the accumulate if the value is already saturated. */
5456 l1 = gen_new_label();
5457 tmp = tcg_temp_new();
5458 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5459 gen_op_jmp_nz32(tmp, l1);
5461 #endif
5462 if (ext & 2)
5463 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5464 else
5465 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5466 if (s->env->macsr & MACSR_FI)
5467 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5468 else if (s->env->macsr & MACSR_SU)
5469 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5470 else
5471 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5472 #if 0
5473 /* Disabled because conditional branches clobber temporary vars. */
5474 if (l1 != -1)
5475 gen_set_label(l1);
5476 #endif
5478 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5480 if (insn & 0x30) {
5481 TCGv rw;
5482 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5483 tcg_gen_mov_i32(rw, loadval);
5484 /* FIXME: Should address writeback happen with the masked or
5485 unmasked value? */
5486 switch ((insn >> 3) & 7) {
5487 case 3: /* Post-increment. */
5488 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5489 break;
5490 case 4: /* Pre-decrement. */
5491 tcg_gen_mov_i32(AREG(insn, 0), addr);
5496 DISAS_INSN(from_mac)
5498 TCGv rx;
5499 TCGv_i64 acc;
5500 int accnum;
5502 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5503 accnum = (insn >> 9) & 3;
5504 acc = MACREG(accnum);
5505 if (s->env->macsr & MACSR_FI) {
5506 gen_helper_get_macf(rx, cpu_env, acc);
5507 } else if ((s->env->macsr & MACSR_OMC) == 0) {
5508 tcg_gen_extrl_i64_i32(rx, acc);
5509 } else if (s->env->macsr & MACSR_SU) {
5510 gen_helper_get_macs(rx, acc);
5511 } else {
5512 gen_helper_get_macu(rx, acc);
5514 if (insn & 0x40) {
5515 tcg_gen_movi_i64(acc, 0);
5516 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5520 DISAS_INSN(move_mac)
5522 /* FIXME: This can be done without a helper. */
5523 int src;
5524 TCGv dest;
5525 src = insn & 3;
5526 dest = tcg_const_i32((insn >> 9) & 3);
5527 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5528 gen_mac_clear_flags();
5529 gen_helper_mac_set_flags(cpu_env, dest);
5532 DISAS_INSN(from_macsr)
5534 TCGv reg;
5536 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5537 tcg_gen_mov_i32(reg, QREG_MACSR);
5540 DISAS_INSN(from_mask)
5542 TCGv reg;
5543 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5544 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5547 DISAS_INSN(from_mext)
5549 TCGv reg;
5550 TCGv acc;
5551 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5552 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5553 if (s->env->macsr & MACSR_FI)
5554 gen_helper_get_mac_extf(reg, cpu_env, acc);
5555 else
5556 gen_helper_get_mac_exti(reg, cpu_env, acc);
5559 DISAS_INSN(macsr_to_ccr)
5561 TCGv tmp = tcg_temp_new();
5562 tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5563 gen_helper_set_sr(cpu_env, tmp);
5564 tcg_temp_free(tmp);
5565 set_cc_op(s, CC_OP_FLAGS);
5568 DISAS_INSN(to_mac)
5570 TCGv_i64 acc;
5571 TCGv val;
5572 int accnum;
5573 accnum = (insn >> 9) & 3;
5574 acc = MACREG(accnum);
5575 SRC_EA(env, val, OS_LONG, 0, NULL);
5576 if (s->env->macsr & MACSR_FI) {
5577 tcg_gen_ext_i32_i64(acc, val);
5578 tcg_gen_shli_i64(acc, acc, 8);
5579 } else if (s->env->macsr & MACSR_SU) {
5580 tcg_gen_ext_i32_i64(acc, val);
5581 } else {
5582 tcg_gen_extu_i32_i64(acc, val);
5584 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5585 gen_mac_clear_flags();
5586 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5589 DISAS_INSN(to_macsr)
5591 TCGv val;
5592 SRC_EA(env, val, OS_LONG, 0, NULL);
5593 gen_helper_set_macsr(cpu_env, val);
5594 gen_lookup_tb(s);
5597 DISAS_INSN(to_mask)
5599 TCGv val;
5600 SRC_EA(env, val, OS_LONG, 0, NULL);
5601 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5604 DISAS_INSN(to_mext)
5606 TCGv val;
5607 TCGv acc;
5608 SRC_EA(env, val, OS_LONG, 0, NULL);
5609 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5610 if (s->env->macsr & MACSR_FI)
5611 gen_helper_set_mac_extf(cpu_env, val, acc);
5612 else if (s->env->macsr & MACSR_SU)
5613 gen_helper_set_mac_exts(cpu_env, val, acc);
5614 else
5615 gen_helper_set_mac_extu(cpu_env, val, acc);
5618 static disas_proc opcode_table[65536];
5620 static void
5621 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5623 int i;
5624 int from;
5625 int to;
5627 /* Sanity check. All set bits must be included in the mask. */
5628 if (opcode & ~mask) {
5629 fprintf(stderr,
5630 "qemu internal error: bogus opcode definition %04x/%04x\n",
5631 opcode, mask);
5632 abort();
5634 /* This could probably be cleverer. For now just optimize the case where
5635 the top bits are known. */
5636 /* Find the first zero bit in the mask. */
5637 i = 0x8000;
5638 while ((i & mask) != 0)
5639 i >>= 1;
5640 /* Iterate over all combinations of this and lower bits. */
5641 if (i == 0)
5642 i = 1;
5643 else
5644 i <<= 1;
5645 from = opcode & ~(i - 1);
5646 to = from + i;
5647 for (i = from; i < to; i++) {
5648 if ((i & mask) == opcode)
5649 opcode_table[i] = proc;
5653 /* Register m68k opcode handlers. Order is important.
5654 Later insn override earlier ones. */
5655 void register_m68k_insns (CPUM68KState *env)
5657 /* Build the opcode table only once to avoid
5658 multithreading issues. */
5659 if (opcode_table[0] != NULL) {
5660 return;
5663 /* use BASE() for instruction available
5664 * for CF_ISA_A and M68000.
5666 #define BASE(name, opcode, mask) \
5667 register_opcode(disas_##name, 0x##opcode, 0x##mask)
5668 #define INSN(name, opcode, mask, feature) do { \
5669 if (m68k_feature(env, M68K_FEATURE_##feature)) \
5670 BASE(name, opcode, mask); \
5671 } while(0)
5672 BASE(undef, 0000, 0000);
5673 INSN(arith_im, 0080, fff8, CF_ISA_A);
5674 INSN(arith_im, 0000, ff00, M68000);
5675 INSN(chk2, 00c0, f9c0, CHK2);
5676 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
5677 BASE(bitop_reg, 0100, f1c0);
5678 BASE(bitop_reg, 0140, f1c0);
5679 BASE(bitop_reg, 0180, f1c0);
5680 BASE(bitop_reg, 01c0, f1c0);
5681 INSN(arith_im, 0280, fff8, CF_ISA_A);
5682 INSN(arith_im, 0200, ff00, M68000);
5683 INSN(undef, 02c0, ffc0, M68000);
5684 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
5685 INSN(arith_im, 0480, fff8, CF_ISA_A);
5686 INSN(arith_im, 0400, ff00, M68000);
5687 INSN(undef, 04c0, ffc0, M68000);
5688 INSN(arith_im, 0600, ff00, M68000);
5689 INSN(undef, 06c0, ffc0, M68000);
5690 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
5691 INSN(arith_im, 0680, fff8, CF_ISA_A);
5692 INSN(arith_im, 0c00, ff38, CF_ISA_A);
5693 INSN(arith_im, 0c00, ff00, M68000);
5694 BASE(bitop_im, 0800, ffc0);
5695 BASE(bitop_im, 0840, ffc0);
5696 BASE(bitop_im, 0880, ffc0);
5697 BASE(bitop_im, 08c0, ffc0);
5698 INSN(arith_im, 0a80, fff8, CF_ISA_A);
5699 INSN(arith_im, 0a00, ff00, M68000);
5700 #if defined(CONFIG_SOFTMMU)
5701 INSN(moves, 0e00, ff00, M68000);
5702 #endif
5703 INSN(cas, 0ac0, ffc0, CAS);
5704 INSN(cas, 0cc0, ffc0, CAS);
5705 INSN(cas, 0ec0, ffc0, CAS);
5706 INSN(cas2w, 0cfc, ffff, CAS);
5707 INSN(cas2l, 0efc, ffff, CAS);
5708 BASE(move, 1000, f000);
5709 BASE(move, 2000, f000);
5710 BASE(move, 3000, f000);
5711 INSN(chk, 4000, f040, M68000);
5712 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
5713 INSN(negx, 4080, fff8, CF_ISA_A);
5714 INSN(negx, 4000, ff00, M68000);
5715 INSN(undef, 40c0, ffc0, M68000);
5716 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5717 INSN(move_from_sr, 40c0, ffc0, M68000);
5718 BASE(lea, 41c0, f1c0);
5719 BASE(clr, 4200, ff00);
5720 BASE(undef, 42c0, ffc0);
5721 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5722 INSN(move_from_ccr, 42c0, ffc0, M68000);
5723 INSN(neg, 4480, fff8, CF_ISA_A);
5724 INSN(neg, 4400, ff00, M68000);
5725 INSN(undef, 44c0, ffc0, M68000);
5726 BASE(move_to_ccr, 44c0, ffc0);
5727 INSN(not, 4680, fff8, CF_ISA_A);
5728 INSN(not, 4600, ff00, M68000);
5729 #if defined(CONFIG_SOFTMMU)
5730 BASE(move_to_sr, 46c0, ffc0);
5731 #endif
5732 INSN(nbcd, 4800, ffc0, M68000);
5733 INSN(linkl, 4808, fff8, M68000);
5734 BASE(pea, 4840, ffc0);
5735 BASE(swap, 4840, fff8);
5736 INSN(bkpt, 4848, fff8, BKPT);
5737 INSN(movem, 48d0, fbf8, CF_ISA_A);
5738 INSN(movem, 48e8, fbf8, CF_ISA_A);
5739 INSN(movem, 4880, fb80, M68000);
5740 BASE(ext, 4880, fff8);
5741 BASE(ext, 48c0, fff8);
5742 BASE(ext, 49c0, fff8);
5743 BASE(tst, 4a00, ff00);
5744 INSN(tas, 4ac0, ffc0, CF_ISA_B);
5745 INSN(tas, 4ac0, ffc0, M68000);
5746 #if defined(CONFIG_SOFTMMU)
5747 INSN(halt, 4ac8, ffff, CF_ISA_A);
5748 #endif
5749 INSN(pulse, 4acc, ffff, CF_ISA_A);
5750 BASE(illegal, 4afc, ffff);
5751 INSN(mull, 4c00, ffc0, CF_ISA_A);
5752 INSN(mull, 4c00, ffc0, LONG_MULDIV);
5753 INSN(divl, 4c40, ffc0, CF_ISA_A);
5754 INSN(divl, 4c40, ffc0, LONG_MULDIV);
5755 INSN(sats, 4c80, fff8, CF_ISA_B);
5756 BASE(trap, 4e40, fff0);
5757 BASE(link, 4e50, fff8);
5758 BASE(unlk, 4e58, fff8);
5759 #if defined(CONFIG_SOFTMMU)
5760 INSN(move_to_usp, 4e60, fff8, USP);
5761 INSN(move_from_usp, 4e68, fff8, USP);
5762 INSN(reset, 4e70, ffff, M68000);
5763 BASE(stop, 4e72, ffff);
5764 BASE(rte, 4e73, ffff);
5765 INSN(cf_movec, 4e7b, ffff, CF_ISA_A);
5766 INSN(m68k_movec, 4e7a, fffe, M68000);
5767 #endif
5768 BASE(nop, 4e71, ffff);
5769 INSN(rtd, 4e74, ffff, RTD);
5770 BASE(rts, 4e75, ffff);
5771 BASE(jump, 4e80, ffc0);
5772 BASE(jump, 4ec0, ffc0);
5773 INSN(addsubq, 5000, f080, M68000);
5774 BASE(addsubq, 5080, f0c0);
5775 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
5776 INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */
5777 INSN(dbcc, 50c8, f0f8, M68000);
5778 INSN(tpf, 51f8, fff8, CF_ISA_A);
5780 /* Branch instructions. */
5781 BASE(branch, 6000, f000);
5782 /* Disable long branch instructions, then add back the ones we want. */
5783 BASE(undef, 60ff, f0ff); /* All long branches. */
5784 INSN(branch, 60ff, f0ff, CF_ISA_B);
5785 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
5786 INSN(branch, 60ff, ffff, BRAL);
5787 INSN(branch, 60ff, f0ff, BCCL);
5789 BASE(moveq, 7000, f100);
5790 INSN(mvzs, 7100, f100, CF_ISA_B);
5791 BASE(or, 8000, f000);
5792 BASE(divw, 80c0, f0c0);
5793 INSN(sbcd_reg, 8100, f1f8, M68000);
5794 INSN(sbcd_mem, 8108, f1f8, M68000);
5795 BASE(addsub, 9000, f000);
5796 INSN(undef, 90c0, f0c0, CF_ISA_A);
5797 INSN(subx_reg, 9180, f1f8, CF_ISA_A);
5798 INSN(subx_reg, 9100, f138, M68000);
5799 INSN(subx_mem, 9108, f138, M68000);
5800 INSN(suba, 91c0, f1c0, CF_ISA_A);
5801 INSN(suba, 90c0, f0c0, M68000);
5803 BASE(undef_mac, a000, f000);
5804 INSN(mac, a000, f100, CF_EMAC);
5805 INSN(from_mac, a180, f9b0, CF_EMAC);
5806 INSN(move_mac, a110, f9fc, CF_EMAC);
5807 INSN(from_macsr,a980, f9f0, CF_EMAC);
5808 INSN(from_mask, ad80, fff0, CF_EMAC);
5809 INSN(from_mext, ab80, fbf0, CF_EMAC);
5810 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5811 INSN(to_mac, a100, f9c0, CF_EMAC);
5812 INSN(to_macsr, a900, ffc0, CF_EMAC);
5813 INSN(to_mext, ab00, fbc0, CF_EMAC);
5814 INSN(to_mask, ad00, ffc0, CF_EMAC);
5816 INSN(mov3q, a140, f1c0, CF_ISA_B);
5817 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
5818 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
5819 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5820 INSN(cmp, b080, f1c0, CF_ISA_A);
5821 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
5822 INSN(cmp, b000, f100, M68000);
5823 INSN(eor, b100, f100, M68000);
5824 INSN(cmpm, b108, f138, M68000);
5825 INSN(cmpa, b0c0, f0c0, M68000);
5826 INSN(eor, b180, f1c0, CF_ISA_A);
5827 BASE(and, c000, f000);
5828 INSN(exg_dd, c140, f1f8, M68000);
5829 INSN(exg_aa, c148, f1f8, M68000);
5830 INSN(exg_da, c188, f1f8, M68000);
5831 BASE(mulw, c0c0, f0c0);
5832 INSN(abcd_reg, c100, f1f8, M68000);
5833 INSN(abcd_mem, c108, f1f8, M68000);
5834 BASE(addsub, d000, f000);
5835 INSN(undef, d0c0, f0c0, CF_ISA_A);
5836 INSN(addx_reg, d180, f1f8, CF_ISA_A);
5837 INSN(addx_reg, d100, f138, M68000);
5838 INSN(addx_mem, d108, f138, M68000);
5839 INSN(adda, d1c0, f1c0, CF_ISA_A);
5840 INSN(adda, d0c0, f0c0, M68000);
5841 INSN(shift_im, e080, f0f0, CF_ISA_A);
5842 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5843 INSN(shift8_im, e000, f0f0, M68000);
5844 INSN(shift16_im, e040, f0f0, M68000);
5845 INSN(shift_im, e080, f0f0, M68000);
5846 INSN(shift8_reg, e020, f0f0, M68000);
5847 INSN(shift16_reg, e060, f0f0, M68000);
5848 INSN(shift_reg, e0a0, f0f0, M68000);
5849 INSN(shift_mem, e0c0, fcc0, M68000);
5850 INSN(rotate_im, e090, f0f0, M68000);
5851 INSN(rotate8_im, e010, f0f0, M68000);
5852 INSN(rotate16_im, e050, f0f0, M68000);
5853 INSN(rotate_reg, e0b0, f0f0, M68000);
5854 INSN(rotate8_reg, e030, f0f0, M68000);
5855 INSN(rotate16_reg, e070, f0f0, M68000);
5856 INSN(rotate_mem, e4c0, fcc0, M68000);
5857 INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
5858 INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5859 INSN(bfins_mem, efc0, ffc0, BITFIELD);
5860 INSN(bfins_reg, efc0, fff8, BITFIELD);
5861 INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
5862 INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
5863 INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
5864 INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
5865 INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
5866 INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
5867 INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
5868 INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
5869 INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
5870 INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
5871 BASE(undef_fpu, f000, f000);
5872 INSN(fpu, f200, ffc0, CF_FPU);
5873 INSN(fbcc, f280, ffc0, CF_FPU);
5874 INSN(fpu, f200, ffc0, FPU);
5875 INSN(fscc, f240, ffc0, FPU);
5876 INSN(fbcc, f280, ff80, FPU);
5877 #if defined(CONFIG_SOFTMMU)
5878 INSN(frestore, f340, ffc0, CF_FPU);
5879 INSN(fsave, f300, ffc0, CF_FPU);
5880 INSN(frestore, f340, ffc0, FPU);
5881 INSN(fsave, f300, ffc0, FPU);
5882 INSN(intouch, f340, ffc0, CF_ISA_A);
5883 INSN(cpushl, f428, ff38, CF_ISA_A);
5884 INSN(cpush, f420, ff20, M68040);
5885 INSN(cinv, f400, ff20, M68040);
5886 INSN(pflush, f500, ffe0, M68040);
5887 INSN(ptest, f548, ffd8, M68040);
5888 INSN(wddata, fb00, ff00, CF_ISA_A);
5889 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
5890 #endif
5891 INSN(move16_mem, f600, ffe0, M68040);
5892 INSN(move16_reg, f620, fff8, M68040);
5893 #undef INSN
5896 /* ??? Some of this implementation is not exception safe. We should always
5897 write back the result to memory before setting the condition codes. */
5898 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
5900 uint16_t insn = read_im16(env, s);
5901 opcode_table[insn](env, s, insn);
5902 do_writebacks(s);
5905 /* generate intermediate code for basic block 'tb'. */
5906 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
5908 CPUM68KState *env = cs->env_ptr;
5909 DisasContext dc1, *dc = &dc1;
5910 target_ulong pc_start;
5911 int pc_offset;
5912 int num_insns;
5913 int max_insns;
5915 /* generate intermediate code */
5916 pc_start = tb->pc;
5918 dc->tb = tb;
5920 dc->env = env;
5921 dc->is_jmp = DISAS_NEXT;
5922 dc->pc = pc_start;
5923 dc->cc_op = CC_OP_DYNAMIC;
5924 dc->cc_op_synced = 1;
5925 dc->singlestep_enabled = cs->singlestep_enabled;
5926 dc->done_mac = 0;
5927 dc->writeback_mask = 0;
5928 num_insns = 0;
5929 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
5930 if (max_insns == 0) {
5931 max_insns = CF_COUNT_MASK;
5933 if (max_insns > TCG_MAX_INSNS) {
5934 max_insns = TCG_MAX_INSNS;
5937 gen_tb_start(tb);
5938 do {
5939 pc_offset = dc->pc - pc_start;
5940 tcg_gen_insn_start(dc->pc, dc->cc_op);
5941 num_insns++;
5943 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5944 gen_exception(dc, dc->pc, EXCP_DEBUG);
5945 dc->is_jmp = DISAS_JUMP;
5946 /* The address covered by the breakpoint must be included in
5947 [tb->pc, tb->pc + tb->size) in order to for it to be
5948 properly cleared -- thus we increment the PC here so that
5949 the logic setting tb->size below does the right thing. */
5950 dc->pc += 2;
5951 break;
5954 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
5955 gen_io_start();
5958 dc->insn_pc = dc->pc;
5959 disas_m68k_insn(env, dc);
5960 } while (!dc->is_jmp && !tcg_op_buf_full() &&
5961 !cs->singlestep_enabled &&
5962 !singlestep &&
5963 (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
5964 num_insns < max_insns);
5966 if (tb_cflags(tb) & CF_LAST_IO)
5967 gen_io_end();
5968 if (unlikely(cs->singlestep_enabled)) {
5969 /* Make sure the pc is updated, and raise a debug exception. */
5970 if (!dc->is_jmp) {
5971 update_cc_op(dc);
5972 tcg_gen_movi_i32(QREG_PC, dc->pc);
5974 gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
5975 } else {
5976 switch(dc->is_jmp) {
5977 case DISAS_NEXT:
5978 update_cc_op(dc);
5979 gen_jmp_tb(dc, 0, dc->pc);
5980 break;
5981 default:
5982 case DISAS_JUMP:
5983 case DISAS_UPDATE:
5984 update_cc_op(dc);
5985 /* indicate that the hash table must be used to find the next TB */
5986 tcg_gen_exit_tb(0);
5987 break;
5988 case DISAS_TB_JUMP:
5989 /* nothing more to generate */
5990 break;
5993 gen_tb_end(tb, num_insns);
5995 #ifdef DEBUG_DISAS
5996 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5997 && qemu_log_in_addr_range(pc_start)) {
5998 qemu_log_lock();
5999 qemu_log("----------------\n");
6000 qemu_log("IN: %s\n", lookup_symbol(pc_start));
6001 log_target_disas(cs, pc_start, dc->pc - pc_start);
6002 qemu_log("\n");
6003 qemu_log_unlock();
6005 #endif
6006 tb->size = dc->pc - pc_start;
6007 tb->icount = num_insns;
6010 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6012 floatx80 a = { .high = high, .low = low };
6013 union {
6014 float64 f64;
6015 double d;
6016 } u;
6018 u.f64 = floatx80_to_float64(a, &env->fp_status);
6019 return u.d;
6022 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
6023 int flags)
6025 M68kCPU *cpu = M68K_CPU(cs);
6026 CPUM68KState *env = &cpu->env;
6027 int i;
6028 uint16_t sr;
6029 for (i = 0; i < 8; i++) {
6030 cpu_fprintf(f, "D%d = %08x A%d = %08x "
6031 "F%d = %04x %016"PRIx64" (%12g)\n",
6032 i, env->dregs[i], i, env->aregs[i],
6033 i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6034 floatx80_to_double(env, env->fregs[i].l.upper,
6035 env->fregs[i].l.lower));
6037 cpu_fprintf (f, "PC = %08x ", env->pc);
6038 sr = env->sr | cpu_m68k_get_ccr(env);
6039 cpu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6040 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6041 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6042 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6043 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6044 (sr & CCF_C) ? 'C' : '-');
6045 cpu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6046 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6047 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6048 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6049 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6050 cpu_fprintf(f, "\n "
6051 "FPCR = %04x ", env->fpcr);
6052 switch (env->fpcr & FPCR_PREC_MASK) {
6053 case FPCR_PREC_X:
6054 cpu_fprintf(f, "X ");
6055 break;
6056 case FPCR_PREC_S:
6057 cpu_fprintf(f, "S ");
6058 break;
6059 case FPCR_PREC_D:
6060 cpu_fprintf(f, "D ");
6061 break;
6063 switch (env->fpcr & FPCR_RND_MASK) {
6064 case FPCR_RND_N:
6065 cpu_fprintf(f, "RN ");
6066 break;
6067 case FPCR_RND_Z:
6068 cpu_fprintf(f, "RZ ");
6069 break;
6070 case FPCR_RND_M:
6071 cpu_fprintf(f, "RM ");
6072 break;
6073 case FPCR_RND_P:
6074 cpu_fprintf(f, "RP ");
6075 break;
6077 cpu_fprintf(f, "\n");
6078 #ifdef CONFIG_SOFTMMU
6079 cpu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6080 env->current_sp == M68K_SSP ? "->" : " ", env->sp[M68K_SSP],
6081 env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP],
6082 env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]);
6083 cpu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6084 cpu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6085 cpu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6086 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6087 cpu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6088 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6089 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6090 cpu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6091 env->mmu.mmusr, env->mmu.ar);
6092 #endif
6095 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
6096 target_ulong *data)
6098 int cc_op = data[1];
6099 env->pc = data[0];
6100 if (cc_op != CC_OP_DYNAMIC) {
6101 env->cc_op = cc_op;