include/standard-headers: add pvrdma related headers
[qemu.git] / target / m68k / translate.c
blob70c7583621dd53c39d95c19198922e9a854ad1ff
1 /*
2 * m68k translation
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
33 #include "trace-tcg.h"
34 #include "exec/log.h"
36 //#define DEBUG_DISPATCH 1
38 #define DEFO32(name, offset) static TCGv QREG_##name;
39 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
40 #include "qregs.def"
41 #undef DEFO32
42 #undef DEFO64
44 static TCGv_i32 cpu_halted;
45 static TCGv_i32 cpu_exception_index;
47 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
48 static TCGv cpu_dregs[8];
49 static TCGv cpu_aregs[8];
50 static TCGv_i64 cpu_macc[4];
52 #define REG(insn, pos) (((insn) >> (pos)) & 7)
53 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
54 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
55 #define MACREG(acc) cpu_macc[acc]
56 #define QREG_SP get_areg(s, 7)
58 static TCGv NULL_QREG;
59 #define IS_NULL_QREG(t) (t == NULL_QREG)
60 /* Used to distinguish stores from bad addressing modes. */
61 static TCGv store_dummy;
63 #include "exec/gen-icount.h"
65 void m68k_tcg_init(void)
67 char *p;
68 int i;
70 #define DEFO32(name, offset) \
71 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
72 offsetof(CPUM68KState, offset), #name);
73 #define DEFO64(name, offset) \
74 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
75 offsetof(CPUM68KState, offset), #name);
76 #include "qregs.def"
77 #undef DEFO32
78 #undef DEFO64
80 cpu_halted = tcg_global_mem_new_i32(cpu_env,
81 -offsetof(M68kCPU, env) +
82 offsetof(CPUState, halted), "HALTED");
83 cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
84 -offsetof(M68kCPU, env) +
85 offsetof(CPUState, exception_index),
86 "EXCEPTION");
88 p = cpu_reg_names;
89 for (i = 0; i < 8; i++) {
90 sprintf(p, "D%d", i);
91 cpu_dregs[i] = tcg_global_mem_new(cpu_env,
92 offsetof(CPUM68KState, dregs[i]), p);
93 p += 3;
94 sprintf(p, "A%d", i);
95 cpu_aregs[i] = tcg_global_mem_new(cpu_env,
96 offsetof(CPUM68KState, aregs[i]), p);
97 p += 3;
99 for (i = 0; i < 4; i++) {
100 sprintf(p, "ACC%d", i);
101 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
102 offsetof(CPUM68KState, macc[i]), p);
103 p += 5;
106 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
107 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
110 /* internal defines */
111 typedef struct DisasContext {
112 CPUM68KState *env;
113 target_ulong insn_pc; /* Start of the current instruction. */
114 target_ulong pc;
115 int is_jmp;
116 CCOp cc_op; /* Current CC operation */
117 int cc_op_synced;
118 struct TranslationBlock *tb;
119 int singlestep_enabled;
120 TCGv_i64 mactmp;
121 int done_mac;
122 int writeback_mask;
123 TCGv writeback[8];
124 } DisasContext;
126 static TCGv get_areg(DisasContext *s, unsigned regno)
128 if (s->writeback_mask & (1 << regno)) {
129 return s->writeback[regno];
130 } else {
131 return cpu_aregs[regno];
135 static void delay_set_areg(DisasContext *s, unsigned regno,
136 TCGv val, bool give_temp)
138 if (s->writeback_mask & (1 << regno)) {
139 if (give_temp) {
140 tcg_temp_free(s->writeback[regno]);
141 s->writeback[regno] = val;
142 } else {
143 tcg_gen_mov_i32(s->writeback[regno], val);
145 } else {
146 s->writeback_mask |= 1 << regno;
147 if (give_temp) {
148 s->writeback[regno] = val;
149 } else {
150 TCGv tmp = tcg_temp_new();
151 s->writeback[regno] = tmp;
152 tcg_gen_mov_i32(tmp, val);
157 static void do_writebacks(DisasContext *s)
159 unsigned mask = s->writeback_mask;
160 if (mask) {
161 s->writeback_mask = 0;
162 do {
163 unsigned regno = ctz32(mask);
164 tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
165 tcg_temp_free(s->writeback[regno]);
166 mask &= mask - 1;
167 } while (mask);
171 /* is_jmp field values */
172 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
173 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
174 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
175 #define DISAS_JUMP_NEXT DISAS_TARGET_3
177 #if defined(CONFIG_USER_ONLY)
178 #define IS_USER(s) 1
179 #else
180 #define IS_USER(s) (!(s->tb->flags & TB_FLAGS_MSR_S))
181 #define SFC_INDEX(s) ((s->tb->flags & TB_FLAGS_SFC_S) ? \
182 MMU_KERNEL_IDX : MMU_USER_IDX)
183 #define DFC_INDEX(s) ((s->tb->flags & TB_FLAGS_DFC_S) ? \
184 MMU_KERNEL_IDX : MMU_USER_IDX)
185 #endif
187 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
189 #ifdef DEBUG_DISPATCH
190 #define DISAS_INSN(name) \
191 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
192 uint16_t insn); \
193 static void disas_##name(CPUM68KState *env, DisasContext *s, \
194 uint16_t insn) \
196 qemu_log("Dispatch " #name "\n"); \
197 real_disas_##name(env, s, insn); \
199 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
200 uint16_t insn)
201 #else
202 #define DISAS_INSN(name) \
203 static void disas_##name(CPUM68KState *env, DisasContext *s, \
204 uint16_t insn)
205 #endif
207 static const uint8_t cc_op_live[CC_OP_NB] = {
208 [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
209 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
210 [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
211 [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
212 [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
213 [CC_OP_LOGIC] = CCF_X | CCF_N
216 static void set_cc_op(DisasContext *s, CCOp op)
218 CCOp old_op = s->cc_op;
219 int dead;
221 if (old_op == op) {
222 return;
224 s->cc_op = op;
225 s->cc_op_synced = 0;
227 /* Discard CC computation that will no longer be used.
228 Note that X and N are never dead. */
229 dead = cc_op_live[old_op] & ~cc_op_live[op];
230 if (dead & CCF_C) {
231 tcg_gen_discard_i32(QREG_CC_C);
233 if (dead & CCF_Z) {
234 tcg_gen_discard_i32(QREG_CC_Z);
236 if (dead & CCF_V) {
237 tcg_gen_discard_i32(QREG_CC_V);
241 /* Update the CPU env CC_OP state. */
242 static void update_cc_op(DisasContext *s)
244 if (!s->cc_op_synced) {
245 s->cc_op_synced = 1;
246 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
250 /* Generate a jump to an immediate address. */
251 static void gen_jmp_im(DisasContext *s, uint32_t dest)
253 update_cc_op(s);
254 tcg_gen_movi_i32(QREG_PC, dest);
255 s->is_jmp = DISAS_JUMP;
258 /* Generate a jump to the address in qreg DEST. */
259 static void gen_jmp(DisasContext *s, TCGv dest)
261 update_cc_op(s);
262 tcg_gen_mov_i32(QREG_PC, dest);
263 s->is_jmp = DISAS_JUMP;
266 static void gen_raise_exception(int nr)
268 TCGv_i32 tmp = tcg_const_i32(nr);
270 gen_helper_raise_exception(cpu_env, tmp);
271 tcg_temp_free_i32(tmp);
274 static void gen_exception(DisasContext *s, uint32_t where, int nr)
276 gen_jmp_im(s, where);
277 gen_raise_exception(nr);
280 static inline void gen_addr_fault(DisasContext *s)
282 gen_exception(s, s->insn_pc, EXCP_ADDRESS);
285 /* Generate a load from the specified address. Narrow values are
286 sign extended to full register width. */
287 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
288 int sign, int index)
290 TCGv tmp;
291 tmp = tcg_temp_new_i32();
292 switch(opsize) {
293 case OS_BYTE:
294 if (sign)
295 tcg_gen_qemu_ld8s(tmp, addr, index);
296 else
297 tcg_gen_qemu_ld8u(tmp, addr, index);
298 break;
299 case OS_WORD:
300 if (sign)
301 tcg_gen_qemu_ld16s(tmp, addr, index);
302 else
303 tcg_gen_qemu_ld16u(tmp, addr, index);
304 break;
305 case OS_LONG:
306 tcg_gen_qemu_ld32u(tmp, addr, index);
307 break;
308 default:
309 g_assert_not_reached();
311 return tmp;
314 /* Generate a store. */
315 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
316 int index)
318 switch(opsize) {
319 case OS_BYTE:
320 tcg_gen_qemu_st8(val, addr, index);
321 break;
322 case OS_WORD:
323 tcg_gen_qemu_st16(val, addr, index);
324 break;
325 case OS_LONG:
326 tcg_gen_qemu_st32(val, addr, index);
327 break;
328 default:
329 g_assert_not_reached();
333 typedef enum {
334 EA_STORE,
335 EA_LOADU,
336 EA_LOADS
337 } ea_what;
339 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
340 otherwise generate a store. */
341 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
342 ea_what what, int index)
344 if (what == EA_STORE) {
345 gen_store(s, opsize, addr, val, index);
346 return store_dummy;
347 } else {
348 return gen_load(s, opsize, addr, what == EA_LOADS, index);
352 /* Read a 16-bit immediate constant */
353 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
355 uint16_t im;
356 im = cpu_lduw_code(env, s->pc);
357 s->pc += 2;
358 return im;
361 /* Read an 8-bit immediate constant */
362 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
364 return read_im16(env, s);
367 /* Read a 32-bit immediate constant. */
368 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
370 uint32_t im;
371 im = read_im16(env, s) << 16;
372 im |= 0xffff & read_im16(env, s);
373 return im;
376 /* Read a 64-bit immediate constant. */
377 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
379 uint64_t im;
380 im = (uint64_t)read_im32(env, s) << 32;
381 im |= (uint64_t)read_im32(env, s);
382 return im;
385 /* Calculate and address index. */
386 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
388 TCGv add;
389 int scale;
391 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
392 if ((ext & 0x800) == 0) {
393 tcg_gen_ext16s_i32(tmp, add);
394 add = tmp;
396 scale = (ext >> 9) & 3;
397 if (scale != 0) {
398 tcg_gen_shli_i32(tmp, add, scale);
399 add = tmp;
401 return add;
404 /* Handle a base + index + displacement effective addresss.
405 A NULL_QREG base means pc-relative. */
406 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
408 uint32_t offset;
409 uint16_t ext;
410 TCGv add;
411 TCGv tmp;
412 uint32_t bd, od;
414 offset = s->pc;
415 ext = read_im16(env, s);
417 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
418 return NULL_QREG;
420 if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
421 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
422 ext &= ~(3 << 9);
425 if (ext & 0x100) {
426 /* full extension word format */
427 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
428 return NULL_QREG;
430 if ((ext & 0x30) > 0x10) {
431 /* base displacement */
432 if ((ext & 0x30) == 0x20) {
433 bd = (int16_t)read_im16(env, s);
434 } else {
435 bd = read_im32(env, s);
437 } else {
438 bd = 0;
440 tmp = tcg_temp_new();
441 if ((ext & 0x44) == 0) {
442 /* pre-index */
443 add = gen_addr_index(s, ext, tmp);
444 } else {
445 add = NULL_QREG;
447 if ((ext & 0x80) == 0) {
448 /* base not suppressed */
449 if (IS_NULL_QREG(base)) {
450 base = tcg_const_i32(offset + bd);
451 bd = 0;
453 if (!IS_NULL_QREG(add)) {
454 tcg_gen_add_i32(tmp, add, base);
455 add = tmp;
456 } else {
457 add = base;
460 if (!IS_NULL_QREG(add)) {
461 if (bd != 0) {
462 tcg_gen_addi_i32(tmp, add, bd);
463 add = tmp;
465 } else {
466 add = tcg_const_i32(bd);
468 if ((ext & 3) != 0) {
469 /* memory indirect */
470 base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
471 if ((ext & 0x44) == 4) {
472 add = gen_addr_index(s, ext, tmp);
473 tcg_gen_add_i32(tmp, add, base);
474 add = tmp;
475 } else {
476 add = base;
478 if ((ext & 3) > 1) {
479 /* outer displacement */
480 if ((ext & 3) == 2) {
481 od = (int16_t)read_im16(env, s);
482 } else {
483 od = read_im32(env, s);
485 } else {
486 od = 0;
488 if (od != 0) {
489 tcg_gen_addi_i32(tmp, add, od);
490 add = tmp;
493 } else {
494 /* brief extension word format */
495 tmp = tcg_temp_new();
496 add = gen_addr_index(s, ext, tmp);
497 if (!IS_NULL_QREG(base)) {
498 tcg_gen_add_i32(tmp, add, base);
499 if ((int8_t)ext)
500 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
501 } else {
502 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
504 add = tmp;
506 return add;
509 /* Sign or zero extend a value. */
511 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
513 switch (opsize) {
514 case OS_BYTE:
515 if (sign) {
516 tcg_gen_ext8s_i32(res, val);
517 } else {
518 tcg_gen_ext8u_i32(res, val);
520 break;
521 case OS_WORD:
522 if (sign) {
523 tcg_gen_ext16s_i32(res, val);
524 } else {
525 tcg_gen_ext16u_i32(res, val);
527 break;
528 case OS_LONG:
529 tcg_gen_mov_i32(res, val);
530 break;
531 default:
532 g_assert_not_reached();
536 /* Evaluate all the CC flags. */
538 static void gen_flush_flags(DisasContext *s)
540 TCGv t0, t1;
542 switch (s->cc_op) {
543 case CC_OP_FLAGS:
544 return;
546 case CC_OP_ADDB:
547 case CC_OP_ADDW:
548 case CC_OP_ADDL:
549 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
550 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
551 /* Compute signed overflow for addition. */
552 t0 = tcg_temp_new();
553 t1 = tcg_temp_new();
554 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
555 gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
556 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
557 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
558 tcg_temp_free(t0);
559 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
560 tcg_temp_free(t1);
561 break;
563 case CC_OP_SUBB:
564 case CC_OP_SUBW:
565 case CC_OP_SUBL:
566 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
567 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
568 /* Compute signed overflow for subtraction. */
569 t0 = tcg_temp_new();
570 t1 = tcg_temp_new();
571 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
572 gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
573 tcg_gen_xor_i32(t1, QREG_CC_N, t0);
574 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
575 tcg_temp_free(t0);
576 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
577 tcg_temp_free(t1);
578 break;
580 case CC_OP_CMPB:
581 case CC_OP_CMPW:
582 case CC_OP_CMPL:
583 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
584 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
585 gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
586 /* Compute signed overflow for subtraction. */
587 t0 = tcg_temp_new();
588 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
589 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
590 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
591 tcg_temp_free(t0);
592 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
593 break;
595 case CC_OP_LOGIC:
596 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
597 tcg_gen_movi_i32(QREG_CC_C, 0);
598 tcg_gen_movi_i32(QREG_CC_V, 0);
599 break;
601 case CC_OP_DYNAMIC:
602 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
603 s->cc_op_synced = 1;
604 break;
606 default:
607 t0 = tcg_const_i32(s->cc_op);
608 gen_helper_flush_flags(cpu_env, t0);
609 tcg_temp_free(t0);
610 s->cc_op_synced = 1;
611 break;
614 /* Note that flush_flags also assigned to env->cc_op. */
615 s->cc_op = CC_OP_FLAGS;
618 static inline TCGv gen_extend(TCGv val, int opsize, int sign)
620 TCGv tmp;
622 if (opsize == OS_LONG) {
623 tmp = val;
624 } else {
625 tmp = tcg_temp_new();
626 gen_ext(tmp, val, opsize, sign);
629 return tmp;
632 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
634 gen_ext(QREG_CC_N, val, opsize, 1);
635 set_cc_op(s, CC_OP_LOGIC);
638 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
640 tcg_gen_mov_i32(QREG_CC_N, dest);
641 tcg_gen_mov_i32(QREG_CC_V, src);
642 set_cc_op(s, CC_OP_CMPB + opsize);
645 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
647 gen_ext(QREG_CC_N, dest, opsize, 1);
648 tcg_gen_mov_i32(QREG_CC_V, src);
651 static inline int opsize_bytes(int opsize)
653 switch (opsize) {
654 case OS_BYTE: return 1;
655 case OS_WORD: return 2;
656 case OS_LONG: return 4;
657 case OS_SINGLE: return 4;
658 case OS_DOUBLE: return 8;
659 case OS_EXTENDED: return 12;
660 case OS_PACKED: return 12;
661 default:
662 g_assert_not_reached();
666 static inline int insn_opsize(int insn)
668 switch ((insn >> 6) & 3) {
669 case 0: return OS_BYTE;
670 case 1: return OS_WORD;
671 case 2: return OS_LONG;
672 default:
673 g_assert_not_reached();
677 static inline int ext_opsize(int ext, int pos)
679 switch ((ext >> pos) & 7) {
680 case 0: return OS_LONG;
681 case 1: return OS_SINGLE;
682 case 2: return OS_EXTENDED;
683 case 3: return OS_PACKED;
684 case 4: return OS_WORD;
685 case 5: return OS_DOUBLE;
686 case 6: return OS_BYTE;
687 default:
688 g_assert_not_reached();
692 /* Assign value to a register. If the width is less than the register width
693 only the low part of the register is set. */
694 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
696 TCGv tmp;
697 switch (opsize) {
698 case OS_BYTE:
699 tcg_gen_andi_i32(reg, reg, 0xffffff00);
700 tmp = tcg_temp_new();
701 tcg_gen_ext8u_i32(tmp, val);
702 tcg_gen_or_i32(reg, reg, tmp);
703 tcg_temp_free(tmp);
704 break;
705 case OS_WORD:
706 tcg_gen_andi_i32(reg, reg, 0xffff0000);
707 tmp = tcg_temp_new();
708 tcg_gen_ext16u_i32(tmp, val);
709 tcg_gen_or_i32(reg, reg, tmp);
710 tcg_temp_free(tmp);
711 break;
712 case OS_LONG:
713 case OS_SINGLE:
714 tcg_gen_mov_i32(reg, val);
715 break;
716 default:
717 g_assert_not_reached();
721 /* Generate code for an "effective address". Does not adjust the base
722 register for autoincrement addressing modes. */
723 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
724 int mode, int reg0, int opsize)
726 TCGv reg;
727 TCGv tmp;
728 uint16_t ext;
729 uint32_t offset;
731 switch (mode) {
732 case 0: /* Data register direct. */
733 case 1: /* Address register direct. */
734 return NULL_QREG;
735 case 3: /* Indirect postincrement. */
736 if (opsize == OS_UNSIZED) {
737 return NULL_QREG;
739 /* fallthru */
740 case 2: /* Indirect register */
741 return get_areg(s, reg0);
742 case 4: /* Indirect predecrememnt. */
743 if (opsize == OS_UNSIZED) {
744 return NULL_QREG;
746 reg = get_areg(s, reg0);
747 tmp = tcg_temp_new();
748 if (reg0 == 7 && opsize == OS_BYTE &&
749 m68k_feature(s->env, M68K_FEATURE_M68000)) {
750 tcg_gen_subi_i32(tmp, reg, 2);
751 } else {
752 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
754 return tmp;
755 case 5: /* Indirect displacement. */
756 reg = get_areg(s, reg0);
757 tmp = tcg_temp_new();
758 ext = read_im16(env, s);
759 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
760 return tmp;
761 case 6: /* Indirect index + displacement. */
762 reg = get_areg(s, reg0);
763 return gen_lea_indexed(env, s, reg);
764 case 7: /* Other */
765 switch (reg0) {
766 case 0: /* Absolute short. */
767 offset = (int16_t)read_im16(env, s);
768 return tcg_const_i32(offset);
769 case 1: /* Absolute long. */
770 offset = read_im32(env, s);
771 return tcg_const_i32(offset);
772 case 2: /* pc displacement */
773 offset = s->pc;
774 offset += (int16_t)read_im16(env, s);
775 return tcg_const_i32(offset);
776 case 3: /* pc index+displacement. */
777 return gen_lea_indexed(env, s, NULL_QREG);
778 case 4: /* Immediate. */
779 default:
780 return NULL_QREG;
783 /* Should never happen. */
784 return NULL_QREG;
787 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
788 int opsize)
790 int mode = extract32(insn, 3, 3);
791 int reg0 = REG(insn, 0);
792 return gen_lea_mode(env, s, mode, reg0, opsize);
795 /* Generate code to load/store a value from/into an EA. If WHAT > 0 this is
796 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
797 ADDRP is non-null for readwrite operands. */
798 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
799 int opsize, TCGv val, TCGv *addrp, ea_what what,
800 int index)
802 TCGv reg, tmp, result;
803 int32_t offset;
805 switch (mode) {
806 case 0: /* Data register direct. */
807 reg = cpu_dregs[reg0];
808 if (what == EA_STORE) {
809 gen_partset_reg(opsize, reg, val);
810 return store_dummy;
811 } else {
812 return gen_extend(reg, opsize, what == EA_LOADS);
814 case 1: /* Address register direct. */
815 reg = get_areg(s, reg0);
816 if (what == EA_STORE) {
817 tcg_gen_mov_i32(reg, val);
818 return store_dummy;
819 } else {
820 return gen_extend(reg, opsize, what == EA_LOADS);
822 case 2: /* Indirect register */
823 reg = get_areg(s, reg0);
824 return gen_ldst(s, opsize, reg, val, what, index);
825 case 3: /* Indirect postincrement. */
826 reg = get_areg(s, reg0);
827 result = gen_ldst(s, opsize, reg, val, what, index);
828 if (what == EA_STORE || !addrp) {
829 TCGv tmp = tcg_temp_new();
830 if (reg0 == 7 && opsize == OS_BYTE &&
831 m68k_feature(s->env, M68K_FEATURE_M68000)) {
832 tcg_gen_addi_i32(tmp, reg, 2);
833 } else {
834 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
836 delay_set_areg(s, reg0, tmp, true);
838 return result;
839 case 4: /* Indirect predecrememnt. */
840 if (addrp && what == EA_STORE) {
841 tmp = *addrp;
842 } else {
843 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
844 if (IS_NULL_QREG(tmp)) {
845 return tmp;
847 if (addrp) {
848 *addrp = tmp;
851 result = gen_ldst(s, opsize, tmp, val, what, index);
852 if (what == EA_STORE || !addrp) {
853 delay_set_areg(s, reg0, tmp, false);
855 return result;
856 case 5: /* Indirect displacement. */
857 case 6: /* Indirect index + displacement. */
858 do_indirect:
859 if (addrp && what == EA_STORE) {
860 tmp = *addrp;
861 } else {
862 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
863 if (IS_NULL_QREG(tmp)) {
864 return tmp;
866 if (addrp) {
867 *addrp = tmp;
870 return gen_ldst(s, opsize, tmp, val, what, index);
871 case 7: /* Other */
872 switch (reg0) {
873 case 0: /* Absolute short. */
874 case 1: /* Absolute long. */
875 case 2: /* pc displacement */
876 case 3: /* pc index+displacement. */
877 goto do_indirect;
878 case 4: /* Immediate. */
879 /* Sign extend values for consistency. */
880 switch (opsize) {
881 case OS_BYTE:
882 if (what == EA_LOADS) {
883 offset = (int8_t)read_im8(env, s);
884 } else {
885 offset = read_im8(env, s);
887 break;
888 case OS_WORD:
889 if (what == EA_LOADS) {
890 offset = (int16_t)read_im16(env, s);
891 } else {
892 offset = read_im16(env, s);
894 break;
895 case OS_LONG:
896 offset = read_im32(env, s);
897 break;
898 default:
899 g_assert_not_reached();
901 return tcg_const_i32(offset);
902 default:
903 return NULL_QREG;
906 /* Should never happen. */
907 return NULL_QREG;
910 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
911 int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
913 int mode = extract32(insn, 3, 3);
914 int reg0 = REG(insn, 0);
915 return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
918 static TCGv_ptr gen_fp_ptr(int freg)
920 TCGv_ptr fp = tcg_temp_new_ptr();
921 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
922 return fp;
925 static TCGv_ptr gen_fp_result_ptr(void)
927 TCGv_ptr fp = tcg_temp_new_ptr();
928 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
929 return fp;
932 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
934 TCGv t32;
935 TCGv_i64 t64;
937 t32 = tcg_temp_new();
938 tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
939 tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
940 tcg_temp_free(t32);
942 t64 = tcg_temp_new_i64();
943 tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
944 tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
945 tcg_temp_free_i64(t64);
948 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
949 int index)
951 TCGv tmp;
952 TCGv_i64 t64;
954 t64 = tcg_temp_new_i64();
955 tmp = tcg_temp_new();
956 switch (opsize) {
957 case OS_BYTE:
958 tcg_gen_qemu_ld8s(tmp, addr, index);
959 gen_helper_exts32(cpu_env, fp, tmp);
960 break;
961 case OS_WORD:
962 tcg_gen_qemu_ld16s(tmp, addr, index);
963 gen_helper_exts32(cpu_env, fp, tmp);
964 break;
965 case OS_LONG:
966 tcg_gen_qemu_ld32u(tmp, addr, index);
967 gen_helper_exts32(cpu_env, fp, tmp);
968 break;
969 case OS_SINGLE:
970 tcg_gen_qemu_ld32u(tmp, addr, index);
971 gen_helper_extf32(cpu_env, fp, tmp);
972 break;
973 case OS_DOUBLE:
974 tcg_gen_qemu_ld64(t64, addr, index);
975 gen_helper_extf64(cpu_env, fp, t64);
976 break;
977 case OS_EXTENDED:
978 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
979 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
980 break;
982 tcg_gen_qemu_ld32u(tmp, addr, index);
983 tcg_gen_shri_i32(tmp, tmp, 16);
984 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
985 tcg_gen_addi_i32(tmp, addr, 4);
986 tcg_gen_qemu_ld64(t64, tmp, index);
987 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
988 break;
989 case OS_PACKED:
990 /* unimplemented data type on 68040/ColdFire
991 * FIXME if needed for another FPU
993 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
994 break;
995 default:
996 g_assert_not_reached();
998 tcg_temp_free(tmp);
999 tcg_temp_free_i64(t64);
1002 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1003 int index)
1005 TCGv tmp;
1006 TCGv_i64 t64;
1008 t64 = tcg_temp_new_i64();
1009 tmp = tcg_temp_new();
1010 switch (opsize) {
1011 case OS_BYTE:
1012 gen_helper_reds32(tmp, cpu_env, fp);
1013 tcg_gen_qemu_st8(tmp, addr, index);
1014 break;
1015 case OS_WORD:
1016 gen_helper_reds32(tmp, cpu_env, fp);
1017 tcg_gen_qemu_st16(tmp, addr, index);
1018 break;
1019 case OS_LONG:
1020 gen_helper_reds32(tmp, cpu_env, fp);
1021 tcg_gen_qemu_st32(tmp, addr, index);
1022 break;
1023 case OS_SINGLE:
1024 gen_helper_redf32(tmp, cpu_env, fp);
1025 tcg_gen_qemu_st32(tmp, addr, index);
1026 break;
1027 case OS_DOUBLE:
1028 gen_helper_redf64(t64, cpu_env, fp);
1029 tcg_gen_qemu_st64(t64, addr, index);
1030 break;
1031 case OS_EXTENDED:
1032 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1033 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1034 break;
1036 tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1037 tcg_gen_shli_i32(tmp, tmp, 16);
1038 tcg_gen_qemu_st32(tmp, addr, index);
1039 tcg_gen_addi_i32(tmp, addr, 4);
1040 tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1041 tcg_gen_qemu_st64(t64, tmp, index);
1042 break;
1043 case OS_PACKED:
1044 /* unimplemented data type on 68040/ColdFire
1045 * FIXME if needed for another FPU
1047 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1048 break;
1049 default:
1050 g_assert_not_reached();
1052 tcg_temp_free(tmp);
1053 tcg_temp_free_i64(t64);
1056 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1057 TCGv_ptr fp, ea_what what, int index)
1059 if (what == EA_STORE) {
1060 gen_store_fp(s, opsize, addr, fp, index);
1061 } else {
1062 gen_load_fp(s, opsize, addr, fp, index);
1066 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1067 int reg0, int opsize, TCGv_ptr fp, ea_what what,
1068 int index)
1070 TCGv reg, addr, tmp;
1071 TCGv_i64 t64;
1073 switch (mode) {
1074 case 0: /* Data register direct. */
1075 reg = cpu_dregs[reg0];
1076 if (what == EA_STORE) {
1077 switch (opsize) {
1078 case OS_BYTE:
1079 case OS_WORD:
1080 case OS_LONG:
1081 gen_helper_reds32(reg, cpu_env, fp);
1082 break;
1083 case OS_SINGLE:
1084 gen_helper_redf32(reg, cpu_env, fp);
1085 break;
1086 default:
1087 g_assert_not_reached();
1089 } else {
1090 tmp = tcg_temp_new();
1091 switch (opsize) {
1092 case OS_BYTE:
1093 tcg_gen_ext8s_i32(tmp, reg);
1094 gen_helper_exts32(cpu_env, fp, tmp);
1095 break;
1096 case OS_WORD:
1097 tcg_gen_ext16s_i32(tmp, reg);
1098 gen_helper_exts32(cpu_env, fp, tmp);
1099 break;
1100 case OS_LONG:
1101 gen_helper_exts32(cpu_env, fp, reg);
1102 break;
1103 case OS_SINGLE:
1104 gen_helper_extf32(cpu_env, fp, reg);
1105 break;
1106 default:
1107 g_assert_not_reached();
1109 tcg_temp_free(tmp);
1111 return 0;
1112 case 1: /* Address register direct. */
1113 return -1;
1114 case 2: /* Indirect register */
1115 addr = get_areg(s, reg0);
1116 gen_ldst_fp(s, opsize, addr, fp, what, index);
1117 return 0;
1118 case 3: /* Indirect postincrement. */
1119 addr = cpu_aregs[reg0];
1120 gen_ldst_fp(s, opsize, addr, fp, what, index);
1121 tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1122 return 0;
1123 case 4: /* Indirect predecrememnt. */
1124 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1125 if (IS_NULL_QREG(addr)) {
1126 return -1;
1128 gen_ldst_fp(s, opsize, addr, fp, what, index);
1129 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1130 return 0;
1131 case 5: /* Indirect displacement. */
1132 case 6: /* Indirect index + displacement. */
1133 do_indirect:
1134 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1135 if (IS_NULL_QREG(addr)) {
1136 return -1;
1138 gen_ldst_fp(s, opsize, addr, fp, what, index);
1139 return 0;
1140 case 7: /* Other */
1141 switch (reg0) {
1142 case 0: /* Absolute short. */
1143 case 1: /* Absolute long. */
1144 case 2: /* pc displacement */
1145 case 3: /* pc index+displacement. */
1146 goto do_indirect;
1147 case 4: /* Immediate. */
1148 if (what == EA_STORE) {
1149 return -1;
1151 switch (opsize) {
1152 case OS_BYTE:
1153 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1154 gen_helper_exts32(cpu_env, fp, tmp);
1155 tcg_temp_free(tmp);
1156 break;
1157 case OS_WORD:
1158 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1159 gen_helper_exts32(cpu_env, fp, tmp);
1160 tcg_temp_free(tmp);
1161 break;
1162 case OS_LONG:
1163 tmp = tcg_const_i32(read_im32(env, s));
1164 gen_helper_exts32(cpu_env, fp, tmp);
1165 tcg_temp_free(tmp);
1166 break;
1167 case OS_SINGLE:
1168 tmp = tcg_const_i32(read_im32(env, s));
1169 gen_helper_extf32(cpu_env, fp, tmp);
1170 tcg_temp_free(tmp);
1171 break;
1172 case OS_DOUBLE:
1173 t64 = tcg_const_i64(read_im64(env, s));
1174 gen_helper_extf64(cpu_env, fp, t64);
1175 tcg_temp_free_i64(t64);
1176 break;
1177 case OS_EXTENDED:
1178 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1179 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1180 break;
1182 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1183 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1184 tcg_temp_free(tmp);
1185 t64 = tcg_const_i64(read_im64(env, s));
1186 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1187 tcg_temp_free_i64(t64);
1188 break;
1189 case OS_PACKED:
1190 /* unimplemented data type on 68040/ColdFire
1191 * FIXME if needed for another FPU
1193 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1194 break;
1195 default:
1196 g_assert_not_reached();
1198 return 0;
1199 default:
1200 return -1;
1203 return -1;
1206 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1207 int opsize, TCGv_ptr fp, ea_what what, int index)
1209 int mode = extract32(insn, 3, 3);
1210 int reg0 = REG(insn, 0);
1211 return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1214 typedef struct {
1215 TCGCond tcond;
1216 bool g1;
1217 bool g2;
1218 TCGv v1;
1219 TCGv v2;
1220 } DisasCompare;
1222 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1224 TCGv tmp, tmp2;
1225 TCGCond tcond;
1226 CCOp op = s->cc_op;
1228 /* The CC_OP_CMP form can handle most normal comparisons directly. */
1229 if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1230 c->g1 = c->g2 = 1;
1231 c->v1 = QREG_CC_N;
1232 c->v2 = QREG_CC_V;
1233 switch (cond) {
1234 case 2: /* HI */
1235 case 3: /* LS */
1236 tcond = TCG_COND_LEU;
1237 goto done;
1238 case 4: /* CC */
1239 case 5: /* CS */
1240 tcond = TCG_COND_LTU;
1241 goto done;
1242 case 6: /* NE */
1243 case 7: /* EQ */
1244 tcond = TCG_COND_EQ;
1245 goto done;
1246 case 10: /* PL */
1247 case 11: /* MI */
1248 c->g1 = c->g2 = 0;
1249 c->v2 = tcg_const_i32(0);
1250 c->v1 = tmp = tcg_temp_new();
1251 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1252 gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1253 /* fallthru */
1254 case 12: /* GE */
1255 case 13: /* LT */
1256 tcond = TCG_COND_LT;
1257 goto done;
1258 case 14: /* GT */
1259 case 15: /* LE */
1260 tcond = TCG_COND_LE;
1261 goto done;
1265 c->g1 = 1;
1266 c->g2 = 0;
1267 c->v2 = tcg_const_i32(0);
1269 switch (cond) {
1270 case 0: /* T */
1271 case 1: /* F */
1272 c->v1 = c->v2;
1273 tcond = TCG_COND_NEVER;
1274 goto done;
1275 case 14: /* GT (!(Z || (N ^ V))) */
1276 case 15: /* LE (Z || (N ^ V)) */
1277 /* Logic operations clear V, which simplifies LE to (Z || N),
1278 and since Z and N are co-located, this becomes a normal
1279 comparison vs N. */
1280 if (op == CC_OP_LOGIC) {
1281 c->v1 = QREG_CC_N;
1282 tcond = TCG_COND_LE;
1283 goto done;
1285 break;
1286 case 12: /* GE (!(N ^ V)) */
1287 case 13: /* LT (N ^ V) */
1288 /* Logic operations clear V, which simplifies this to N. */
1289 if (op != CC_OP_LOGIC) {
1290 break;
1292 /* fallthru */
1293 case 10: /* PL (!N) */
1294 case 11: /* MI (N) */
1295 /* Several cases represent N normally. */
1296 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1297 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1298 op == CC_OP_LOGIC) {
1299 c->v1 = QREG_CC_N;
1300 tcond = TCG_COND_LT;
1301 goto done;
1303 break;
1304 case 6: /* NE (!Z) */
1305 case 7: /* EQ (Z) */
1306 /* Some cases fold Z into N. */
1307 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1308 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1309 op == CC_OP_LOGIC) {
1310 tcond = TCG_COND_EQ;
1311 c->v1 = QREG_CC_N;
1312 goto done;
1314 break;
1315 case 4: /* CC (!C) */
1316 case 5: /* CS (C) */
1317 /* Some cases fold C into X. */
1318 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1319 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1320 tcond = TCG_COND_NE;
1321 c->v1 = QREG_CC_X;
1322 goto done;
1324 /* fallthru */
1325 case 8: /* VC (!V) */
1326 case 9: /* VS (V) */
1327 /* Logic operations clear V and C. */
1328 if (op == CC_OP_LOGIC) {
1329 tcond = TCG_COND_NEVER;
1330 c->v1 = c->v2;
1331 goto done;
1333 break;
1336 /* Otherwise, flush flag state to CC_OP_FLAGS. */
1337 gen_flush_flags(s);
1339 switch (cond) {
1340 case 0: /* T */
1341 case 1: /* F */
1342 default:
1343 /* Invalid, or handled above. */
1344 abort();
1345 case 2: /* HI (!C && !Z) -> !(C || Z)*/
1346 case 3: /* LS (C || Z) */
1347 c->v1 = tmp = tcg_temp_new();
1348 c->g1 = 0;
1349 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1350 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1351 tcond = TCG_COND_NE;
1352 break;
1353 case 4: /* CC (!C) */
1354 case 5: /* CS (C) */
1355 c->v1 = QREG_CC_C;
1356 tcond = TCG_COND_NE;
1357 break;
1358 case 6: /* NE (!Z) */
1359 case 7: /* EQ (Z) */
1360 c->v1 = QREG_CC_Z;
1361 tcond = TCG_COND_EQ;
1362 break;
1363 case 8: /* VC (!V) */
1364 case 9: /* VS (V) */
1365 c->v1 = QREG_CC_V;
1366 tcond = TCG_COND_LT;
1367 break;
1368 case 10: /* PL (!N) */
1369 case 11: /* MI (N) */
1370 c->v1 = QREG_CC_N;
1371 tcond = TCG_COND_LT;
1372 break;
1373 case 12: /* GE (!(N ^ V)) */
1374 case 13: /* LT (N ^ V) */
1375 c->v1 = tmp = tcg_temp_new();
1376 c->g1 = 0;
1377 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1378 tcond = TCG_COND_LT;
1379 break;
1380 case 14: /* GT (!(Z || (N ^ V))) */
1381 case 15: /* LE (Z || (N ^ V)) */
1382 c->v1 = tmp = tcg_temp_new();
1383 c->g1 = 0;
1384 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1385 tcg_gen_neg_i32(tmp, tmp);
1386 tmp2 = tcg_temp_new();
1387 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1388 tcg_gen_or_i32(tmp, tmp, tmp2);
1389 tcg_temp_free(tmp2);
1390 tcond = TCG_COND_LT;
1391 break;
1394 done:
1395 if ((cond & 1) == 0) {
1396 tcond = tcg_invert_cond(tcond);
1398 c->tcond = tcond;
1401 static void free_cond(DisasCompare *c)
1403 if (!c->g1) {
1404 tcg_temp_free(c->v1);
1406 if (!c->g2) {
1407 tcg_temp_free(c->v2);
1411 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1413 DisasCompare c;
1415 gen_cc_cond(&c, s, cond);
1416 update_cc_op(s);
1417 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1418 free_cond(&c);
1421 /* Force a TB lookup after an instruction that changes the CPU state. */
1422 static void gen_lookup_tb(DisasContext *s)
1424 update_cc_op(s);
1425 tcg_gen_movi_i32(QREG_PC, s->pc);
1426 s->is_jmp = DISAS_UPDATE;
1429 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1430 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1431 op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \
1432 if (IS_NULL_QREG(result)) { \
1433 gen_addr_fault(s); \
1434 return; \
1436 } while (0)
1438 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1439 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \
1440 EA_STORE, IS_USER(s)); \
1441 if (IS_NULL_QREG(ea_result)) { \
1442 gen_addr_fault(s); \
1443 return; \
1445 } while (0)
1447 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1449 #ifndef CONFIG_USER_ONLY
1450 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1451 (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1452 #else
1453 return true;
1454 #endif
1457 /* Generate a jump to an immediate address. */
1458 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1460 if (unlikely(s->singlestep_enabled)) {
1461 gen_exception(s, dest, EXCP_DEBUG);
1462 } else if (use_goto_tb(s, dest)) {
1463 tcg_gen_goto_tb(n);
1464 tcg_gen_movi_i32(QREG_PC, dest);
1465 tcg_gen_exit_tb((uintptr_t)s->tb + n);
1466 } else {
1467 gen_jmp_im(s, dest);
1468 tcg_gen_exit_tb(0);
1470 s->is_jmp = DISAS_TB_JUMP;
1473 DISAS_INSN(scc)
1475 DisasCompare c;
1476 int cond;
1477 TCGv tmp;
1479 cond = (insn >> 8) & 0xf;
1480 gen_cc_cond(&c, s, cond);
1482 tmp = tcg_temp_new();
1483 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1484 free_cond(&c);
1486 tcg_gen_neg_i32(tmp, tmp);
1487 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1488 tcg_temp_free(tmp);
1491 DISAS_INSN(dbcc)
1493 TCGLabel *l1;
1494 TCGv reg;
1495 TCGv tmp;
1496 int16_t offset;
1497 uint32_t base;
1499 reg = DREG(insn, 0);
1500 base = s->pc;
1501 offset = (int16_t)read_im16(env, s);
1502 l1 = gen_new_label();
1503 gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1505 tmp = tcg_temp_new();
1506 tcg_gen_ext16s_i32(tmp, reg);
1507 tcg_gen_addi_i32(tmp, tmp, -1);
1508 gen_partset_reg(OS_WORD, reg, tmp);
1509 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1510 gen_jmp_tb(s, 1, base + offset);
1511 gen_set_label(l1);
1512 gen_jmp_tb(s, 0, s->pc);
1515 DISAS_INSN(undef_mac)
1517 gen_exception(s, s->insn_pc, EXCP_LINEA);
1520 DISAS_INSN(undef_fpu)
1522 gen_exception(s, s->insn_pc, EXCP_LINEF);
1525 DISAS_INSN(undef)
1527 /* ??? This is both instructions that are as yet unimplemented
1528 for the 680x0 series, as well as those that are implemented
1529 but actually illegal for CPU32 or pre-68020. */
1530 qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
1531 insn, s->insn_pc);
1532 gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED);
1535 DISAS_INSN(mulw)
1537 TCGv reg;
1538 TCGv tmp;
1539 TCGv src;
1540 int sign;
1542 sign = (insn & 0x100) != 0;
1543 reg = DREG(insn, 9);
1544 tmp = tcg_temp_new();
1545 if (sign)
1546 tcg_gen_ext16s_i32(tmp, reg);
1547 else
1548 tcg_gen_ext16u_i32(tmp, reg);
1549 SRC_EA(env, src, OS_WORD, sign, NULL);
1550 tcg_gen_mul_i32(tmp, tmp, src);
1551 tcg_gen_mov_i32(reg, tmp);
1552 gen_logic_cc(s, tmp, OS_LONG);
1553 tcg_temp_free(tmp);
1556 DISAS_INSN(divw)
1558 int sign;
1559 TCGv src;
1560 TCGv destr;
1562 /* divX.w <EA>,Dn 32/16 -> 16r:16q */
1564 sign = (insn & 0x100) != 0;
1566 /* dest.l / src.w */
1568 SRC_EA(env, src, OS_WORD, sign, NULL);
1569 destr = tcg_const_i32(REG(insn, 9));
1570 if (sign) {
1571 gen_helper_divsw(cpu_env, destr, src);
1572 } else {
1573 gen_helper_divuw(cpu_env, destr, src);
1575 tcg_temp_free(destr);
1577 set_cc_op(s, CC_OP_FLAGS);
1580 DISAS_INSN(divl)
1582 TCGv num, reg, den;
1583 int sign;
1584 uint16_t ext;
1586 ext = read_im16(env, s);
1588 sign = (ext & 0x0800) != 0;
1590 if (ext & 0x400) {
1591 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1592 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
1593 return;
1596 /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
1598 SRC_EA(env, den, OS_LONG, 0, NULL);
1599 num = tcg_const_i32(REG(ext, 12));
1600 reg = tcg_const_i32(REG(ext, 0));
1601 if (sign) {
1602 gen_helper_divsll(cpu_env, num, reg, den);
1603 } else {
1604 gen_helper_divull(cpu_env, num, reg, den);
1606 tcg_temp_free(reg);
1607 tcg_temp_free(num);
1608 set_cc_op(s, CC_OP_FLAGS);
1609 return;
1612 /* divX.l <EA>, Dq 32/32 -> 32q */
1613 /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
1615 SRC_EA(env, den, OS_LONG, 0, NULL);
1616 num = tcg_const_i32(REG(ext, 12));
1617 reg = tcg_const_i32(REG(ext, 0));
1618 if (sign) {
1619 gen_helper_divsl(cpu_env, num, reg, den);
1620 } else {
1621 gen_helper_divul(cpu_env, num, reg, den);
1623 tcg_temp_free(reg);
1624 tcg_temp_free(num);
1626 set_cc_op(s, CC_OP_FLAGS);
1629 static void bcd_add(TCGv dest, TCGv src)
1631 TCGv t0, t1;
1633 /* dest10 = dest10 + src10 + X
1635 * t1 = src
1636 * t2 = t1 + 0x066
1637 * t3 = t2 + dest + X
1638 * t4 = t2 ^ dest
1639 * t5 = t3 ^ t4
1640 * t6 = ~t5 & 0x110
1641 * t7 = (t6 >> 2) | (t6 >> 3)
1642 * return t3 - t7
1645 /* t1 = (src + 0x066) + dest + X
1646 * = result with some possible exceding 0x6
1649 t0 = tcg_const_i32(0x066);
1650 tcg_gen_add_i32(t0, t0, src);
1652 t1 = tcg_temp_new();
1653 tcg_gen_add_i32(t1, t0, dest);
1654 tcg_gen_add_i32(t1, t1, QREG_CC_X);
1656 /* we will remove exceding 0x6 where there is no carry */
1658 /* t0 = (src + 0x0066) ^ dest
1659 * = t1 without carries
1662 tcg_gen_xor_i32(t0, t0, dest);
1664 /* extract the carries
1665 * t0 = t0 ^ t1
1666 * = only the carries
1669 tcg_gen_xor_i32(t0, t0, t1);
1671 /* generate 0x1 where there is no carry
1672 * and for each 0x10, generate a 0x6
1675 tcg_gen_shri_i32(t0, t0, 3);
1676 tcg_gen_not_i32(t0, t0);
1677 tcg_gen_andi_i32(t0, t0, 0x22);
1678 tcg_gen_add_i32(dest, t0, t0);
1679 tcg_gen_add_i32(dest, dest, t0);
1680 tcg_temp_free(t0);
1682 /* remove the exceding 0x6
1683 * for digits that have not generated a carry
1686 tcg_gen_sub_i32(dest, t1, dest);
1687 tcg_temp_free(t1);
1690 static void bcd_sub(TCGv dest, TCGv src)
1692 TCGv t0, t1, t2;
1694 /* dest10 = dest10 - src10 - X
1695 * = bcd_add(dest + 1 - X, 0x199 - src)
1698 /* t0 = 0x066 + (0x199 - src) */
1700 t0 = tcg_temp_new();
1701 tcg_gen_subfi_i32(t0, 0x1ff, src);
1703 /* t1 = t0 + dest + 1 - X*/
1705 t1 = tcg_temp_new();
1706 tcg_gen_add_i32(t1, t0, dest);
1707 tcg_gen_addi_i32(t1, t1, 1);
1708 tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1710 /* t2 = t0 ^ dest */
1712 t2 = tcg_temp_new();
1713 tcg_gen_xor_i32(t2, t0, dest);
1715 /* t0 = t1 ^ t2 */
1717 tcg_gen_xor_i32(t0, t1, t2);
1719 /* t2 = ~t0 & 0x110
1720 * t0 = (t2 >> 2) | (t2 >> 3)
1722 * to fit on 8bit operands, changed in:
1724 * t2 = ~(t0 >> 3) & 0x22
1725 * t0 = t2 + t2
1726 * t0 = t0 + t2
1729 tcg_gen_shri_i32(t2, t0, 3);
1730 tcg_gen_not_i32(t2, t2);
1731 tcg_gen_andi_i32(t2, t2, 0x22);
1732 tcg_gen_add_i32(t0, t2, t2);
1733 tcg_gen_add_i32(t0, t0, t2);
1734 tcg_temp_free(t2);
1736 /* return t1 - t0 */
1738 tcg_gen_sub_i32(dest, t1, t0);
1739 tcg_temp_free(t0);
1740 tcg_temp_free(t1);
1743 static void bcd_flags(TCGv val)
1745 tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1746 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1748 tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1750 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1753 DISAS_INSN(abcd_reg)
1755 TCGv src;
1756 TCGv dest;
1758 gen_flush_flags(s); /* !Z is sticky */
1760 src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1761 dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1762 bcd_add(dest, src);
1763 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1765 bcd_flags(dest);
1768 DISAS_INSN(abcd_mem)
1770 TCGv src, dest, addr;
1772 gen_flush_flags(s); /* !Z is sticky */
1774 /* Indirect pre-decrement load (mode 4) */
1776 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1777 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1778 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1779 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1781 bcd_add(dest, src);
1783 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1784 EA_STORE, IS_USER(s));
1786 bcd_flags(dest);
1789 DISAS_INSN(sbcd_reg)
1791 TCGv src, dest;
1793 gen_flush_flags(s); /* !Z is sticky */
1795 src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1796 dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1798 bcd_sub(dest, src);
1800 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1802 bcd_flags(dest);
1805 DISAS_INSN(sbcd_mem)
1807 TCGv src, dest, addr;
1809 gen_flush_flags(s); /* !Z is sticky */
1811 /* Indirect pre-decrement load (mode 4) */
1813 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1814 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1815 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1816 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1818 bcd_sub(dest, src);
1820 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1821 EA_STORE, IS_USER(s));
1823 bcd_flags(dest);
1826 DISAS_INSN(nbcd)
1828 TCGv src, dest;
1829 TCGv addr;
1831 gen_flush_flags(s); /* !Z is sticky */
1833 SRC_EA(env, src, OS_BYTE, 0, &addr);
1835 dest = tcg_const_i32(0);
1836 bcd_sub(dest, src);
1838 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1840 bcd_flags(dest);
1842 tcg_temp_free(dest);
1845 DISAS_INSN(addsub)
1847 TCGv reg;
1848 TCGv dest;
1849 TCGv src;
1850 TCGv tmp;
1851 TCGv addr;
1852 int add;
1853 int opsize;
1855 add = (insn & 0x4000) != 0;
1856 opsize = insn_opsize(insn);
1857 reg = gen_extend(DREG(insn, 9), opsize, 1);
1858 dest = tcg_temp_new();
1859 if (insn & 0x100) {
1860 SRC_EA(env, tmp, opsize, 1, &addr);
1861 src = reg;
1862 } else {
1863 tmp = reg;
1864 SRC_EA(env, src, opsize, 1, NULL);
1866 if (add) {
1867 tcg_gen_add_i32(dest, tmp, src);
1868 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1869 set_cc_op(s, CC_OP_ADDB + opsize);
1870 } else {
1871 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1872 tcg_gen_sub_i32(dest, tmp, src);
1873 set_cc_op(s, CC_OP_SUBB + opsize);
1875 gen_update_cc_add(dest, src, opsize);
1876 if (insn & 0x100) {
1877 DEST_EA(env, insn, opsize, dest, &addr);
1878 } else {
1879 gen_partset_reg(opsize, DREG(insn, 9), dest);
1881 tcg_temp_free(dest);
1884 /* Reverse the order of the bits in REG. */
1885 DISAS_INSN(bitrev)
1887 TCGv reg;
1888 reg = DREG(insn, 0);
1889 gen_helper_bitrev(reg, reg);
1892 DISAS_INSN(bitop_reg)
1894 int opsize;
1895 int op;
1896 TCGv src1;
1897 TCGv src2;
1898 TCGv tmp;
1899 TCGv addr;
1900 TCGv dest;
1902 if ((insn & 0x38) != 0)
1903 opsize = OS_BYTE;
1904 else
1905 opsize = OS_LONG;
1906 op = (insn >> 6) & 3;
1907 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1909 gen_flush_flags(s);
1910 src2 = tcg_temp_new();
1911 if (opsize == OS_BYTE)
1912 tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1913 else
1914 tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1916 tmp = tcg_const_i32(1);
1917 tcg_gen_shl_i32(tmp, tmp, src2);
1918 tcg_temp_free(src2);
1920 tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1922 dest = tcg_temp_new();
1923 switch (op) {
1924 case 1: /* bchg */
1925 tcg_gen_xor_i32(dest, src1, tmp);
1926 break;
1927 case 2: /* bclr */
1928 tcg_gen_andc_i32(dest, src1, tmp);
1929 break;
1930 case 3: /* bset */
1931 tcg_gen_or_i32(dest, src1, tmp);
1932 break;
1933 default: /* btst */
1934 break;
1936 tcg_temp_free(tmp);
1937 if (op) {
1938 DEST_EA(env, insn, opsize, dest, &addr);
1940 tcg_temp_free(dest);
1943 DISAS_INSN(sats)
1945 TCGv reg;
1946 reg = DREG(insn, 0);
1947 gen_flush_flags(s);
1948 gen_helper_sats(reg, reg, QREG_CC_V);
1949 gen_logic_cc(s, reg, OS_LONG);
1952 static void gen_push(DisasContext *s, TCGv val)
1954 TCGv tmp;
1956 tmp = tcg_temp_new();
1957 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1958 gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1959 tcg_gen_mov_i32(QREG_SP, tmp);
1960 tcg_temp_free(tmp);
1963 static TCGv mreg(int reg)
1965 if (reg < 8) {
1966 /* Dx */
1967 return cpu_dregs[reg];
1969 /* Ax */
1970 return cpu_aregs[reg & 7];
1973 DISAS_INSN(movem)
1975 TCGv addr, incr, tmp, r[16];
1976 int is_load = (insn & 0x0400) != 0;
1977 int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1978 uint16_t mask = read_im16(env, s);
1979 int mode = extract32(insn, 3, 3);
1980 int reg0 = REG(insn, 0);
1981 int i;
1983 tmp = cpu_aregs[reg0];
1985 switch (mode) {
1986 case 0: /* data register direct */
1987 case 1: /* addr register direct */
1988 do_addr_fault:
1989 gen_addr_fault(s);
1990 return;
1992 case 2: /* indirect */
1993 break;
1995 case 3: /* indirect post-increment */
1996 if (!is_load) {
1997 /* post-increment is not allowed */
1998 goto do_addr_fault;
2000 break;
2002 case 4: /* indirect pre-decrement */
2003 if (is_load) {
2004 /* pre-decrement is not allowed */
2005 goto do_addr_fault;
2007 /* We want a bare copy of the address reg, without any pre-decrement
2008 adjustment, as gen_lea would provide. */
2009 break;
2011 default:
2012 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2013 if (IS_NULL_QREG(tmp)) {
2014 goto do_addr_fault;
2016 break;
2019 addr = tcg_temp_new();
2020 tcg_gen_mov_i32(addr, tmp);
2021 incr = tcg_const_i32(opsize_bytes(opsize));
2023 if (is_load) {
2024 /* memory to register */
2025 for (i = 0; i < 16; i++) {
2026 if (mask & (1 << i)) {
2027 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
2028 tcg_gen_add_i32(addr, addr, incr);
2031 for (i = 0; i < 16; i++) {
2032 if (mask & (1 << i)) {
2033 tcg_gen_mov_i32(mreg(i), r[i]);
2034 tcg_temp_free(r[i]);
2037 if (mode == 3) {
2038 /* post-increment: movem (An)+,X */
2039 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2041 } else {
2042 /* register to memory */
2043 if (mode == 4) {
2044 /* pre-decrement: movem X,-(An) */
2045 for (i = 15; i >= 0; i--) {
2046 if ((mask << i) & 0x8000) {
2047 tcg_gen_sub_i32(addr, addr, incr);
2048 if (reg0 + 8 == i &&
2049 m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2050 /* M68020+: if the addressing register is the
2051 * register moved to memory, the value written
2052 * is the initial value decremented by the size of
2053 * the operation, regardless of how many actual
2054 * stores have been performed until this point.
2055 * M68000/M68010: the value is the initial value.
2057 tmp = tcg_temp_new();
2058 tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2059 gen_store(s, opsize, addr, tmp, IS_USER(s));
2060 tcg_temp_free(tmp);
2061 } else {
2062 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2066 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2067 } else {
2068 for (i = 0; i < 16; i++) {
2069 if (mask & (1 << i)) {
2070 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2071 tcg_gen_add_i32(addr, addr, incr);
2077 tcg_temp_free(incr);
2078 tcg_temp_free(addr);
2081 DISAS_INSN(movep)
2083 uint8_t i;
2084 int16_t displ;
2085 TCGv reg;
2086 TCGv addr;
2087 TCGv abuf;
2088 TCGv dbuf;
2090 displ = read_im16(env, s);
2092 addr = AREG(insn, 0);
2093 reg = DREG(insn, 9);
2095 abuf = tcg_temp_new();
2096 tcg_gen_addi_i32(abuf, addr, displ);
2097 dbuf = tcg_temp_new();
2099 if (insn & 0x40) {
2100 i = 4;
2101 } else {
2102 i = 2;
2105 if (insn & 0x80) {
2106 for ( ; i > 0 ; i--) {
2107 tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2108 tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
2109 if (i > 1) {
2110 tcg_gen_addi_i32(abuf, abuf, 2);
2113 } else {
2114 for ( ; i > 0 ; i--) {
2115 tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
2116 tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2117 if (i > 1) {
2118 tcg_gen_addi_i32(abuf, abuf, 2);
2122 tcg_temp_free(abuf);
2123 tcg_temp_free(dbuf);
2126 DISAS_INSN(bitop_im)
2128 int opsize;
2129 int op;
2130 TCGv src1;
2131 uint32_t mask;
2132 int bitnum;
2133 TCGv tmp;
2134 TCGv addr;
2136 if ((insn & 0x38) != 0)
2137 opsize = OS_BYTE;
2138 else
2139 opsize = OS_LONG;
2140 op = (insn >> 6) & 3;
2142 bitnum = read_im16(env, s);
2143 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2144 if (bitnum & 0xfe00) {
2145 disas_undef(env, s, insn);
2146 return;
2148 } else {
2149 if (bitnum & 0xff00) {
2150 disas_undef(env, s, insn);
2151 return;
2155 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2157 gen_flush_flags(s);
2158 if (opsize == OS_BYTE)
2159 bitnum &= 7;
2160 else
2161 bitnum &= 31;
2162 mask = 1 << bitnum;
2164 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2166 if (op) {
2167 tmp = tcg_temp_new();
2168 switch (op) {
2169 case 1: /* bchg */
2170 tcg_gen_xori_i32(tmp, src1, mask);
2171 break;
2172 case 2: /* bclr */
2173 tcg_gen_andi_i32(tmp, src1, ~mask);
2174 break;
2175 case 3: /* bset */
2176 tcg_gen_ori_i32(tmp, src1, mask);
2177 break;
2178 default: /* btst */
2179 break;
2181 DEST_EA(env, insn, opsize, tmp, &addr);
2182 tcg_temp_free(tmp);
2186 static TCGv gen_get_ccr(DisasContext *s)
2188 TCGv dest;
2190 update_cc_op(s);
2191 dest = tcg_temp_new();
2192 gen_helper_get_ccr(dest, cpu_env);
2193 return dest;
2196 static TCGv gen_get_sr(DisasContext *s)
2198 TCGv ccr;
2199 TCGv sr;
2201 ccr = gen_get_ccr(s);
2202 sr = tcg_temp_new();
2203 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2204 tcg_gen_or_i32(sr, sr, ccr);
2205 return sr;
2208 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2210 if (ccr_only) {
2211 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2212 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2213 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2214 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2215 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2216 } else {
2217 TCGv sr = tcg_const_i32(val);
2218 gen_helper_set_sr(cpu_env, sr);
2219 tcg_temp_free(sr);
2221 set_cc_op(s, CC_OP_FLAGS);
2224 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2226 if (ccr_only) {
2227 gen_helper_set_ccr(cpu_env, val);
2228 } else {
2229 gen_helper_set_sr(cpu_env, val);
2231 set_cc_op(s, CC_OP_FLAGS);
2234 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2235 bool ccr_only)
2237 if ((insn & 0x3f) == 0x3c) {
2238 uint16_t val;
2239 val = read_im16(env, s);
2240 gen_set_sr_im(s, val, ccr_only);
2241 } else {
2242 TCGv src;
2243 SRC_EA(env, src, OS_WORD, 0, NULL);
2244 gen_set_sr(s, src, ccr_only);
2248 DISAS_INSN(arith_im)
2250 int op;
2251 TCGv im;
2252 TCGv src1;
2253 TCGv dest;
2254 TCGv addr;
2255 int opsize;
2256 bool with_SR = ((insn & 0x3f) == 0x3c);
2258 op = (insn >> 9) & 7;
2259 opsize = insn_opsize(insn);
2260 switch (opsize) {
2261 case OS_BYTE:
2262 im = tcg_const_i32((int8_t)read_im8(env, s));
2263 break;
2264 case OS_WORD:
2265 im = tcg_const_i32((int16_t)read_im16(env, s));
2266 break;
2267 case OS_LONG:
2268 im = tcg_const_i32(read_im32(env, s));
2269 break;
2270 default:
2271 abort();
2274 if (with_SR) {
2275 /* SR/CCR can only be used with andi/eori/ori */
2276 if (op == 2 || op == 3 || op == 6) {
2277 disas_undef(env, s, insn);
2278 return;
2280 switch (opsize) {
2281 case OS_BYTE:
2282 src1 = gen_get_ccr(s);
2283 break;
2284 case OS_WORD:
2285 if (IS_USER(s)) {
2286 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
2287 return;
2289 src1 = gen_get_sr(s);
2290 break;
2291 case OS_LONG:
2292 disas_undef(env, s, insn);
2293 return;
2295 } else {
2296 SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2298 dest = tcg_temp_new();
2299 switch (op) {
2300 case 0: /* ori */
2301 tcg_gen_or_i32(dest, src1, im);
2302 if (with_SR) {
2303 gen_set_sr(s, dest, opsize == OS_BYTE);
2304 } else {
2305 DEST_EA(env, insn, opsize, dest, &addr);
2306 gen_logic_cc(s, dest, opsize);
2308 break;
2309 case 1: /* andi */
2310 tcg_gen_and_i32(dest, src1, im);
2311 if (with_SR) {
2312 gen_set_sr(s, dest, opsize == OS_BYTE);
2313 } else {
2314 DEST_EA(env, insn, opsize, dest, &addr);
2315 gen_logic_cc(s, dest, opsize);
2317 break;
2318 case 2: /* subi */
2319 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2320 tcg_gen_sub_i32(dest, src1, im);
2321 gen_update_cc_add(dest, im, opsize);
2322 set_cc_op(s, CC_OP_SUBB + opsize);
2323 DEST_EA(env, insn, opsize, dest, &addr);
2324 break;
2325 case 3: /* addi */
2326 tcg_gen_add_i32(dest, src1, im);
2327 gen_update_cc_add(dest, im, opsize);
2328 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2329 set_cc_op(s, CC_OP_ADDB + opsize);
2330 DEST_EA(env, insn, opsize, dest, &addr);
2331 break;
2332 case 5: /* eori */
2333 tcg_gen_xor_i32(dest, src1, im);
2334 if (with_SR) {
2335 gen_set_sr(s, dest, opsize == OS_BYTE);
2336 } else {
2337 DEST_EA(env, insn, opsize, dest, &addr);
2338 gen_logic_cc(s, dest, opsize);
2340 break;
2341 case 6: /* cmpi */
2342 gen_update_cc_cmp(s, src1, im, opsize);
2343 break;
2344 default:
2345 abort();
2347 tcg_temp_free(im);
2348 tcg_temp_free(dest);
2351 DISAS_INSN(cas)
2353 int opsize;
2354 TCGv addr;
2355 uint16_t ext;
2356 TCGv load;
2357 TCGv cmp;
2358 TCGMemOp opc;
2360 switch ((insn >> 9) & 3) {
2361 case 1:
2362 opsize = OS_BYTE;
2363 opc = MO_SB;
2364 break;
2365 case 2:
2366 opsize = OS_WORD;
2367 opc = MO_TESW;
2368 break;
2369 case 3:
2370 opsize = OS_LONG;
2371 opc = MO_TESL;
2372 break;
2373 default:
2374 g_assert_not_reached();
2377 ext = read_im16(env, s);
2379 /* cas Dc,Du,<EA> */
2381 addr = gen_lea(env, s, insn, opsize);
2382 if (IS_NULL_QREG(addr)) {
2383 gen_addr_fault(s);
2384 return;
2387 cmp = gen_extend(DREG(ext, 0), opsize, 1);
2389 /* if <EA> == Dc then
2390 * <EA> = Du
2391 * Dc = <EA> (because <EA> == Dc)
2392 * else
2393 * Dc = <EA>
2396 load = tcg_temp_new();
2397 tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2398 IS_USER(s), opc);
2399 /* update flags before setting cmp to load */
2400 gen_update_cc_cmp(s, load, cmp, opsize);
2401 gen_partset_reg(opsize, DREG(ext, 0), load);
2403 tcg_temp_free(load);
2405 switch (extract32(insn, 3, 3)) {
2406 case 3: /* Indirect postincrement. */
2407 tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2408 break;
2409 case 4: /* Indirect predecrememnt. */
2410 tcg_gen_mov_i32(AREG(insn, 0), addr);
2411 break;
2415 DISAS_INSN(cas2w)
2417 uint16_t ext1, ext2;
2418 TCGv addr1, addr2;
2419 TCGv regs;
2421 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2423 ext1 = read_im16(env, s);
2425 if (ext1 & 0x8000) {
2426 /* Address Register */
2427 addr1 = AREG(ext1, 12);
2428 } else {
2429 /* Data Register */
2430 addr1 = DREG(ext1, 12);
2433 ext2 = read_im16(env, s);
2434 if (ext2 & 0x8000) {
2435 /* Address Register */
2436 addr2 = AREG(ext2, 12);
2437 } else {
2438 /* Data Register */
2439 addr2 = DREG(ext2, 12);
2442 /* if (R1) == Dc1 && (R2) == Dc2 then
2443 * (R1) = Du1
2444 * (R2) = Du2
2445 * else
2446 * Dc1 = (R1)
2447 * Dc2 = (R2)
2450 regs = tcg_const_i32(REG(ext2, 6) |
2451 (REG(ext1, 6) << 3) |
2452 (REG(ext2, 0) << 6) |
2453 (REG(ext1, 0) << 9));
2454 if (tb_cflags(s->tb) & CF_PARALLEL) {
2455 gen_helper_exit_atomic(cpu_env);
2456 } else {
2457 gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2459 tcg_temp_free(regs);
2461 /* Note that cas2w also assigned to env->cc_op. */
2462 s->cc_op = CC_OP_CMPW;
2463 s->cc_op_synced = 1;
2466 DISAS_INSN(cas2l)
2468 uint16_t ext1, ext2;
2469 TCGv addr1, addr2, regs;
2471 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2473 ext1 = read_im16(env, s);
2475 if (ext1 & 0x8000) {
2476 /* Address Register */
2477 addr1 = AREG(ext1, 12);
2478 } else {
2479 /* Data Register */
2480 addr1 = DREG(ext1, 12);
2483 ext2 = read_im16(env, s);
2484 if (ext2 & 0x8000) {
2485 /* Address Register */
2486 addr2 = AREG(ext2, 12);
2487 } else {
2488 /* Data Register */
2489 addr2 = DREG(ext2, 12);
2492 /* if (R1) == Dc1 && (R2) == Dc2 then
2493 * (R1) = Du1
2494 * (R2) = Du2
2495 * else
2496 * Dc1 = (R1)
2497 * Dc2 = (R2)
2500 regs = tcg_const_i32(REG(ext2, 6) |
2501 (REG(ext1, 6) << 3) |
2502 (REG(ext2, 0) << 6) |
2503 (REG(ext1, 0) << 9));
2504 if (tb_cflags(s->tb) & CF_PARALLEL) {
2505 gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2506 } else {
2507 gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2509 tcg_temp_free(regs);
2511 /* Note that cas2l also assigned to env->cc_op. */
2512 s->cc_op = CC_OP_CMPL;
2513 s->cc_op_synced = 1;
2516 DISAS_INSN(byterev)
2518 TCGv reg;
2520 reg = DREG(insn, 0);
2521 tcg_gen_bswap32_i32(reg, reg);
2524 DISAS_INSN(move)
2526 TCGv src;
2527 TCGv dest;
2528 int op;
2529 int opsize;
2531 switch (insn >> 12) {
2532 case 1: /* move.b */
2533 opsize = OS_BYTE;
2534 break;
2535 case 2: /* move.l */
2536 opsize = OS_LONG;
2537 break;
2538 case 3: /* move.w */
2539 opsize = OS_WORD;
2540 break;
2541 default:
2542 abort();
2544 SRC_EA(env, src, opsize, 1, NULL);
2545 op = (insn >> 6) & 7;
2546 if (op == 1) {
2547 /* movea */
2548 /* The value will already have been sign extended. */
2549 dest = AREG(insn, 9);
2550 tcg_gen_mov_i32(dest, src);
2551 } else {
2552 /* normal move */
2553 uint16_t dest_ea;
2554 dest_ea = ((insn >> 9) & 7) | (op << 3);
2555 DEST_EA(env, dest_ea, opsize, src, NULL);
2556 /* This will be correct because loads sign extend. */
2557 gen_logic_cc(s, src, opsize);
2561 DISAS_INSN(negx)
2563 TCGv z;
2564 TCGv src;
2565 TCGv addr;
2566 int opsize;
2568 opsize = insn_opsize(insn);
2569 SRC_EA(env, src, opsize, 1, &addr);
2571 gen_flush_flags(s); /* compute old Z */
2573 /* Perform substract with borrow.
2574 * (X, N) = -(src + X);
2577 z = tcg_const_i32(0);
2578 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2579 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2580 tcg_temp_free(z);
2581 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2583 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2585 /* Compute signed-overflow for negation. The normal formula for
2586 * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2587 * this simplies to res & src.
2590 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2592 /* Copy the rest of the results into place. */
2593 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2594 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2596 set_cc_op(s, CC_OP_FLAGS);
2598 /* result is in QREG_CC_N */
2600 DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2603 DISAS_INSN(lea)
2605 TCGv reg;
2606 TCGv tmp;
2608 reg = AREG(insn, 9);
2609 tmp = gen_lea(env, s, insn, OS_LONG);
2610 if (IS_NULL_QREG(tmp)) {
2611 gen_addr_fault(s);
2612 return;
2614 tcg_gen_mov_i32(reg, tmp);
2617 DISAS_INSN(clr)
2619 int opsize;
2620 TCGv zero;
2622 zero = tcg_const_i32(0);
2624 opsize = insn_opsize(insn);
2625 DEST_EA(env, insn, opsize, zero, NULL);
2626 gen_logic_cc(s, zero, opsize);
2627 tcg_temp_free(zero);
2630 DISAS_INSN(move_from_ccr)
2632 TCGv ccr;
2634 ccr = gen_get_ccr(s);
2635 DEST_EA(env, insn, OS_WORD, ccr, NULL);
2638 DISAS_INSN(neg)
2640 TCGv src1;
2641 TCGv dest;
2642 TCGv addr;
2643 int opsize;
2645 opsize = insn_opsize(insn);
2646 SRC_EA(env, src1, opsize, 1, &addr);
2647 dest = tcg_temp_new();
2648 tcg_gen_neg_i32(dest, src1);
2649 set_cc_op(s, CC_OP_SUBB + opsize);
2650 gen_update_cc_add(dest, src1, opsize);
2651 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2652 DEST_EA(env, insn, opsize, dest, &addr);
2653 tcg_temp_free(dest);
2656 DISAS_INSN(move_to_ccr)
2658 gen_move_to_sr(env, s, insn, true);
2661 DISAS_INSN(not)
2663 TCGv src1;
2664 TCGv dest;
2665 TCGv addr;
2666 int opsize;
2668 opsize = insn_opsize(insn);
2669 SRC_EA(env, src1, opsize, 1, &addr);
2670 dest = tcg_temp_new();
2671 tcg_gen_not_i32(dest, src1);
2672 DEST_EA(env, insn, opsize, dest, &addr);
2673 gen_logic_cc(s, dest, opsize);
2676 DISAS_INSN(swap)
2678 TCGv src1;
2679 TCGv src2;
2680 TCGv reg;
2682 src1 = tcg_temp_new();
2683 src2 = tcg_temp_new();
2684 reg = DREG(insn, 0);
2685 tcg_gen_shli_i32(src1, reg, 16);
2686 tcg_gen_shri_i32(src2, reg, 16);
2687 tcg_gen_or_i32(reg, src1, src2);
2688 tcg_temp_free(src2);
2689 tcg_temp_free(src1);
2690 gen_logic_cc(s, reg, OS_LONG);
2693 DISAS_INSN(bkpt)
2695 gen_exception(s, s->insn_pc, EXCP_DEBUG);
2698 DISAS_INSN(pea)
2700 TCGv tmp;
2702 tmp = gen_lea(env, s, insn, OS_LONG);
2703 if (IS_NULL_QREG(tmp)) {
2704 gen_addr_fault(s);
2705 return;
2707 gen_push(s, tmp);
2710 DISAS_INSN(ext)
2712 int op;
2713 TCGv reg;
2714 TCGv tmp;
2716 reg = DREG(insn, 0);
2717 op = (insn >> 6) & 7;
2718 tmp = tcg_temp_new();
2719 if (op == 3)
2720 tcg_gen_ext16s_i32(tmp, reg);
2721 else
2722 tcg_gen_ext8s_i32(tmp, reg);
2723 if (op == 2)
2724 gen_partset_reg(OS_WORD, reg, tmp);
2725 else
2726 tcg_gen_mov_i32(reg, tmp);
2727 gen_logic_cc(s, tmp, OS_LONG);
2728 tcg_temp_free(tmp);
2731 DISAS_INSN(tst)
2733 int opsize;
2734 TCGv tmp;
2736 opsize = insn_opsize(insn);
2737 SRC_EA(env, tmp, opsize, 1, NULL);
2738 gen_logic_cc(s, tmp, opsize);
2741 DISAS_INSN(pulse)
2743 /* Implemented as a NOP. */
2746 DISAS_INSN(illegal)
2748 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
2751 /* ??? This should be atomic. */
2752 DISAS_INSN(tas)
2754 TCGv dest;
2755 TCGv src1;
2756 TCGv addr;
2758 dest = tcg_temp_new();
2759 SRC_EA(env, src1, OS_BYTE, 1, &addr);
2760 gen_logic_cc(s, src1, OS_BYTE);
2761 tcg_gen_ori_i32(dest, src1, 0x80);
2762 DEST_EA(env, insn, OS_BYTE, dest, &addr);
2763 tcg_temp_free(dest);
2766 DISAS_INSN(mull)
2768 uint16_t ext;
2769 TCGv src1;
2770 int sign;
2772 ext = read_im16(env, s);
2774 sign = ext & 0x800;
2776 if (ext & 0x400) {
2777 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2778 gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED);
2779 return;
2782 SRC_EA(env, src1, OS_LONG, 0, NULL);
2784 if (sign) {
2785 tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2786 } else {
2787 tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2789 /* if Dl == Dh, 68040 returns low word */
2790 tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2791 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2792 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2794 tcg_gen_movi_i32(QREG_CC_V, 0);
2795 tcg_gen_movi_i32(QREG_CC_C, 0);
2797 set_cc_op(s, CC_OP_FLAGS);
2798 return;
2800 SRC_EA(env, src1, OS_LONG, 0, NULL);
2801 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2802 tcg_gen_movi_i32(QREG_CC_C, 0);
2803 if (sign) {
2804 tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2805 /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2806 tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2807 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2808 } else {
2809 tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2810 /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2811 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2813 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2814 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2816 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2818 set_cc_op(s, CC_OP_FLAGS);
2819 } else {
2820 /* The upper 32 bits of the product are discarded, so
2821 muls.l and mulu.l are functionally equivalent. */
2822 tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2823 gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2827 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2829 TCGv reg;
2830 TCGv tmp;
2832 reg = AREG(insn, 0);
2833 tmp = tcg_temp_new();
2834 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2835 gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2836 if ((insn & 7) != 7) {
2837 tcg_gen_mov_i32(reg, tmp);
2839 tcg_gen_addi_i32(QREG_SP, tmp, offset);
2840 tcg_temp_free(tmp);
2843 DISAS_INSN(link)
2845 int16_t offset;
2847 offset = read_im16(env, s);
2848 gen_link(s, insn, offset);
2851 DISAS_INSN(linkl)
2853 int32_t offset;
2855 offset = read_im32(env, s);
2856 gen_link(s, insn, offset);
2859 DISAS_INSN(unlk)
2861 TCGv src;
2862 TCGv reg;
2863 TCGv tmp;
2865 src = tcg_temp_new();
2866 reg = AREG(insn, 0);
2867 tcg_gen_mov_i32(src, reg);
2868 tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2869 tcg_gen_mov_i32(reg, tmp);
2870 tcg_gen_addi_i32(QREG_SP, src, 4);
2871 tcg_temp_free(src);
2874 #if defined(CONFIG_SOFTMMU)
2875 DISAS_INSN(reset)
2877 if (IS_USER(s)) {
2878 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
2879 return;
2882 gen_helper_reset(cpu_env);
2884 #endif
2886 DISAS_INSN(nop)
2890 DISAS_INSN(rtd)
2892 TCGv tmp;
2893 int16_t offset = read_im16(env, s);
2895 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2896 tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2897 gen_jmp(s, tmp);
2900 DISAS_INSN(rts)
2902 TCGv tmp;
2904 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2905 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2906 gen_jmp(s, tmp);
2909 DISAS_INSN(jump)
2911 TCGv tmp;
2913 /* Load the target address first to ensure correct exception
2914 behavior. */
2915 tmp = gen_lea(env, s, insn, OS_LONG);
2916 if (IS_NULL_QREG(tmp)) {
2917 gen_addr_fault(s);
2918 return;
2920 if ((insn & 0x40) == 0) {
2921 /* jsr */
2922 gen_push(s, tcg_const_i32(s->pc));
2924 gen_jmp(s, tmp);
2927 DISAS_INSN(addsubq)
2929 TCGv src;
2930 TCGv dest;
2931 TCGv val;
2932 int imm;
2933 TCGv addr;
2934 int opsize;
2936 if ((insn & 070) == 010) {
2937 /* Operation on address register is always long. */
2938 opsize = OS_LONG;
2939 } else {
2940 opsize = insn_opsize(insn);
2942 SRC_EA(env, src, opsize, 1, &addr);
2943 imm = (insn >> 9) & 7;
2944 if (imm == 0) {
2945 imm = 8;
2947 val = tcg_const_i32(imm);
2948 dest = tcg_temp_new();
2949 tcg_gen_mov_i32(dest, src);
2950 if ((insn & 0x38) == 0x08) {
2951 /* Don't update condition codes if the destination is an
2952 address register. */
2953 if (insn & 0x0100) {
2954 tcg_gen_sub_i32(dest, dest, val);
2955 } else {
2956 tcg_gen_add_i32(dest, dest, val);
2958 } else {
2959 if (insn & 0x0100) {
2960 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2961 tcg_gen_sub_i32(dest, dest, val);
2962 set_cc_op(s, CC_OP_SUBB + opsize);
2963 } else {
2964 tcg_gen_add_i32(dest, dest, val);
2965 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2966 set_cc_op(s, CC_OP_ADDB + opsize);
2968 gen_update_cc_add(dest, val, opsize);
2970 tcg_temp_free(val);
2971 DEST_EA(env, insn, opsize, dest, &addr);
2972 tcg_temp_free(dest);
2975 DISAS_INSN(tpf)
2977 switch (insn & 7) {
2978 case 2: /* One extension word. */
2979 s->pc += 2;
2980 break;
2981 case 3: /* Two extension words. */
2982 s->pc += 4;
2983 break;
2984 case 4: /* No extension words. */
2985 break;
2986 default:
2987 disas_undef(env, s, insn);
2991 DISAS_INSN(branch)
2993 int32_t offset;
2994 uint32_t base;
2995 int op;
2996 TCGLabel *l1;
2998 base = s->pc;
2999 op = (insn >> 8) & 0xf;
3000 offset = (int8_t)insn;
3001 if (offset == 0) {
3002 offset = (int16_t)read_im16(env, s);
3003 } else if (offset == -1) {
3004 offset = read_im32(env, s);
3006 if (op == 1) {
3007 /* bsr */
3008 gen_push(s, tcg_const_i32(s->pc));
3010 if (op > 1) {
3011 /* Bcc */
3012 l1 = gen_new_label();
3013 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
3014 gen_jmp_tb(s, 1, base + offset);
3015 gen_set_label(l1);
3016 gen_jmp_tb(s, 0, s->pc);
3017 } else {
3018 /* Unconditional branch. */
3019 update_cc_op(s);
3020 gen_jmp_tb(s, 0, base + offset);
3024 DISAS_INSN(moveq)
3026 tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3027 gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3030 DISAS_INSN(mvzs)
3032 int opsize;
3033 TCGv src;
3034 TCGv reg;
3036 if (insn & 0x40)
3037 opsize = OS_WORD;
3038 else
3039 opsize = OS_BYTE;
3040 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3041 reg = DREG(insn, 9);
3042 tcg_gen_mov_i32(reg, src);
3043 gen_logic_cc(s, src, opsize);
3046 DISAS_INSN(or)
3048 TCGv reg;
3049 TCGv dest;
3050 TCGv src;
3051 TCGv addr;
3052 int opsize;
3054 opsize = insn_opsize(insn);
3055 reg = gen_extend(DREG(insn, 9), opsize, 0);
3056 dest = tcg_temp_new();
3057 if (insn & 0x100) {
3058 SRC_EA(env, src, opsize, 0, &addr);
3059 tcg_gen_or_i32(dest, src, reg);
3060 DEST_EA(env, insn, opsize, dest, &addr);
3061 } else {
3062 SRC_EA(env, src, opsize, 0, NULL);
3063 tcg_gen_or_i32(dest, src, reg);
3064 gen_partset_reg(opsize, DREG(insn, 9), dest);
3066 gen_logic_cc(s, dest, opsize);
3067 tcg_temp_free(dest);
3070 DISAS_INSN(suba)
3072 TCGv src;
3073 TCGv reg;
3075 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3076 reg = AREG(insn, 9);
3077 tcg_gen_sub_i32(reg, reg, src);
3080 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3082 TCGv tmp;
3084 gen_flush_flags(s); /* compute old Z */
3086 /* Perform substract with borrow.
3087 * (X, N) = dest - (src + X);
3090 tmp = tcg_const_i32(0);
3091 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
3092 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
3093 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3094 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3096 /* Compute signed-overflow for substract. */
3098 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3099 tcg_gen_xor_i32(tmp, dest, src);
3100 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3101 tcg_temp_free(tmp);
3103 /* Copy the rest of the results into place. */
3104 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3105 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3107 set_cc_op(s, CC_OP_FLAGS);
3109 /* result is in QREG_CC_N */
3112 DISAS_INSN(subx_reg)
3114 TCGv dest;
3115 TCGv src;
3116 int opsize;
3118 opsize = insn_opsize(insn);
3120 src = gen_extend(DREG(insn, 0), opsize, 1);
3121 dest = gen_extend(DREG(insn, 9), opsize, 1);
3123 gen_subx(s, src, dest, opsize);
3125 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3128 DISAS_INSN(subx_mem)
3130 TCGv src;
3131 TCGv addr_src;
3132 TCGv dest;
3133 TCGv addr_dest;
3134 int opsize;
3136 opsize = insn_opsize(insn);
3138 addr_src = AREG(insn, 0);
3139 tcg_gen_subi_i32(addr_src, addr_src, opsize);
3140 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3142 addr_dest = AREG(insn, 9);
3143 tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
3144 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3146 gen_subx(s, src, dest, opsize);
3148 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3151 DISAS_INSN(mov3q)
3153 TCGv src;
3154 int val;
3156 val = (insn >> 9) & 7;
3157 if (val == 0)
3158 val = -1;
3159 src = tcg_const_i32(val);
3160 gen_logic_cc(s, src, OS_LONG);
3161 DEST_EA(env, insn, OS_LONG, src, NULL);
3162 tcg_temp_free(src);
3165 DISAS_INSN(cmp)
3167 TCGv src;
3168 TCGv reg;
3169 int opsize;
3171 opsize = insn_opsize(insn);
3172 SRC_EA(env, src, opsize, 1, NULL);
3173 reg = gen_extend(DREG(insn, 9), opsize, 1);
3174 gen_update_cc_cmp(s, reg, src, opsize);
3177 DISAS_INSN(cmpa)
3179 int opsize;
3180 TCGv src;
3181 TCGv reg;
3183 if (insn & 0x100) {
3184 opsize = OS_LONG;
3185 } else {
3186 opsize = OS_WORD;
3188 SRC_EA(env, src, opsize, 1, NULL);
3189 reg = AREG(insn, 9);
3190 gen_update_cc_cmp(s, reg, src, OS_LONG);
3193 DISAS_INSN(cmpm)
3195 int opsize = insn_opsize(insn);
3196 TCGv src, dst;
3198 /* Post-increment load (mode 3) from Ay. */
3199 src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3200 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3201 /* Post-increment load (mode 3) from Ax. */
3202 dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3203 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3205 gen_update_cc_cmp(s, dst, src, opsize);
3208 DISAS_INSN(eor)
3210 TCGv src;
3211 TCGv dest;
3212 TCGv addr;
3213 int opsize;
3215 opsize = insn_opsize(insn);
3217 SRC_EA(env, src, opsize, 0, &addr);
3218 dest = tcg_temp_new();
3219 tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3220 gen_logic_cc(s, dest, opsize);
3221 DEST_EA(env, insn, opsize, dest, &addr);
3222 tcg_temp_free(dest);
3225 static void do_exg(TCGv reg1, TCGv reg2)
3227 TCGv temp = tcg_temp_new();
3228 tcg_gen_mov_i32(temp, reg1);
3229 tcg_gen_mov_i32(reg1, reg2);
3230 tcg_gen_mov_i32(reg2, temp);
3231 tcg_temp_free(temp);
3234 DISAS_INSN(exg_dd)
3236 /* exchange Dx and Dy */
3237 do_exg(DREG(insn, 9), DREG(insn, 0));
3240 DISAS_INSN(exg_aa)
3242 /* exchange Ax and Ay */
3243 do_exg(AREG(insn, 9), AREG(insn, 0));
3246 DISAS_INSN(exg_da)
3248 /* exchange Dx and Ay */
3249 do_exg(DREG(insn, 9), AREG(insn, 0));
3252 DISAS_INSN(and)
3254 TCGv src;
3255 TCGv reg;
3256 TCGv dest;
3257 TCGv addr;
3258 int opsize;
3260 dest = tcg_temp_new();
3262 opsize = insn_opsize(insn);
3263 reg = DREG(insn, 9);
3264 if (insn & 0x100) {
3265 SRC_EA(env, src, opsize, 0, &addr);
3266 tcg_gen_and_i32(dest, src, reg);
3267 DEST_EA(env, insn, opsize, dest, &addr);
3268 } else {
3269 SRC_EA(env, src, opsize, 0, NULL);
3270 tcg_gen_and_i32(dest, src, reg);
3271 gen_partset_reg(opsize, reg, dest);
3273 gen_logic_cc(s, dest, opsize);
3274 tcg_temp_free(dest);
3277 DISAS_INSN(adda)
3279 TCGv src;
3280 TCGv reg;
3282 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3283 reg = AREG(insn, 9);
3284 tcg_gen_add_i32(reg, reg, src);
3287 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3289 TCGv tmp;
3291 gen_flush_flags(s); /* compute old Z */
3293 /* Perform addition with carry.
3294 * (X, N) = src + dest + X;
3297 tmp = tcg_const_i32(0);
3298 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3299 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3300 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3302 /* Compute signed-overflow for addition. */
3304 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3305 tcg_gen_xor_i32(tmp, dest, src);
3306 tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3307 tcg_temp_free(tmp);
3309 /* Copy the rest of the results into place. */
3310 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3311 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3313 set_cc_op(s, CC_OP_FLAGS);
3315 /* result is in QREG_CC_N */
3318 DISAS_INSN(addx_reg)
3320 TCGv dest;
3321 TCGv src;
3322 int opsize;
3324 opsize = insn_opsize(insn);
3326 dest = gen_extend(DREG(insn, 9), opsize, 1);
3327 src = gen_extend(DREG(insn, 0), opsize, 1);
3329 gen_addx(s, src, dest, opsize);
3331 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3334 DISAS_INSN(addx_mem)
3336 TCGv src;
3337 TCGv addr_src;
3338 TCGv dest;
3339 TCGv addr_dest;
3340 int opsize;
3342 opsize = insn_opsize(insn);
3344 addr_src = AREG(insn, 0);
3345 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3346 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3348 addr_dest = AREG(insn, 9);
3349 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3350 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3352 gen_addx(s, src, dest, opsize);
3354 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3357 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3359 int count = (insn >> 9) & 7;
3360 int logical = insn & 8;
3361 int left = insn & 0x100;
3362 int bits = opsize_bytes(opsize) * 8;
3363 TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3365 if (count == 0) {
3366 count = 8;
3369 tcg_gen_movi_i32(QREG_CC_V, 0);
3370 if (left) {
3371 tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3372 tcg_gen_shli_i32(QREG_CC_N, reg, count);
3374 /* Note that ColdFire always clears V (done above),
3375 while M68000 sets if the most significant bit is changed at
3376 any time during the shift operation */
3377 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3378 /* if shift count >= bits, V is (reg != 0) */
3379 if (count >= bits) {
3380 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3381 } else {
3382 TCGv t0 = tcg_temp_new();
3383 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3384 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3385 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3386 tcg_temp_free(t0);
3388 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3390 } else {
3391 tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3392 if (logical) {
3393 tcg_gen_shri_i32(QREG_CC_N, reg, count);
3394 } else {
3395 tcg_gen_sari_i32(QREG_CC_N, reg, count);
3399 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3400 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3401 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3402 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3404 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3405 set_cc_op(s, CC_OP_FLAGS);
3408 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3410 int logical = insn & 8;
3411 int left = insn & 0x100;
3412 int bits = opsize_bytes(opsize) * 8;
3413 TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3414 TCGv s32;
3415 TCGv_i64 t64, s64;
3417 t64 = tcg_temp_new_i64();
3418 s64 = tcg_temp_new_i64();
3419 s32 = tcg_temp_new();
3421 /* Note that m68k truncates the shift count modulo 64, not 32.
3422 In addition, a 64-bit shift makes it easy to find "the last
3423 bit shifted out", for the carry flag. */
3424 tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3425 tcg_gen_extu_i32_i64(s64, s32);
3426 tcg_gen_extu_i32_i64(t64, reg);
3428 /* Optimistically set V=0. Also used as a zero source below. */
3429 tcg_gen_movi_i32(QREG_CC_V, 0);
3430 if (left) {
3431 tcg_gen_shl_i64(t64, t64, s64);
3433 if (opsize == OS_LONG) {
3434 tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3435 /* Note that C=0 if shift count is 0, and we get that for free. */
3436 } else {
3437 TCGv zero = tcg_const_i32(0);
3438 tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3439 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3440 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3441 s32, zero, zero, QREG_CC_C);
3442 tcg_temp_free(zero);
3444 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3446 /* X = C, but only if the shift count was non-zero. */
3447 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3448 QREG_CC_C, QREG_CC_X);
3450 /* M68000 sets V if the most significant bit is changed at
3451 * any time during the shift operation. Do this via creating
3452 * an extension of the sign bit, comparing, and discarding
3453 * the bits below the sign bit. I.e.
3454 * int64_t s = (intN_t)reg;
3455 * int64_t t = (int64_t)(intN_t)reg << count;
3456 * V = ((s ^ t) & (-1 << (bits - 1))) != 0
3458 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3459 TCGv_i64 tt = tcg_const_i64(32);
3460 /* if shift is greater than 32, use 32 */
3461 tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3462 tcg_temp_free_i64(tt);
3463 /* Sign extend the input to 64 bits; re-do the shift. */
3464 tcg_gen_ext_i32_i64(t64, reg);
3465 tcg_gen_shl_i64(s64, t64, s64);
3466 /* Clear all bits that are unchanged. */
3467 tcg_gen_xor_i64(t64, t64, s64);
3468 /* Ignore the bits below the sign bit. */
3469 tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3470 /* If any bits remain set, we have overflow. */
3471 tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3472 tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3473 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3475 } else {
3476 tcg_gen_shli_i64(t64, t64, 32);
3477 if (logical) {
3478 tcg_gen_shr_i64(t64, t64, s64);
3479 } else {
3480 tcg_gen_sar_i64(t64, t64, s64);
3482 tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3484 /* Note that C=0 if shift count is 0, and we get that for free. */
3485 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3487 /* X = C, but only if the shift count was non-zero. */
3488 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3489 QREG_CC_C, QREG_CC_X);
3491 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3492 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3494 tcg_temp_free(s32);
3495 tcg_temp_free_i64(s64);
3496 tcg_temp_free_i64(t64);
3498 /* Write back the result. */
3499 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3500 set_cc_op(s, CC_OP_FLAGS);
3503 DISAS_INSN(shift8_im)
3505 shift_im(s, insn, OS_BYTE);
3508 DISAS_INSN(shift16_im)
3510 shift_im(s, insn, OS_WORD);
3513 DISAS_INSN(shift_im)
3515 shift_im(s, insn, OS_LONG);
3518 DISAS_INSN(shift8_reg)
3520 shift_reg(s, insn, OS_BYTE);
3523 DISAS_INSN(shift16_reg)
3525 shift_reg(s, insn, OS_WORD);
3528 DISAS_INSN(shift_reg)
3530 shift_reg(s, insn, OS_LONG);
3533 DISAS_INSN(shift_mem)
3535 int logical = insn & 8;
3536 int left = insn & 0x100;
3537 TCGv src;
3538 TCGv addr;
3540 SRC_EA(env, src, OS_WORD, !logical, &addr);
3541 tcg_gen_movi_i32(QREG_CC_V, 0);
3542 if (left) {
3543 tcg_gen_shri_i32(QREG_CC_C, src, 15);
3544 tcg_gen_shli_i32(QREG_CC_N, src, 1);
3546 /* Note that ColdFire always clears V,
3547 while M68000 sets if the most significant bit is changed at
3548 any time during the shift operation */
3549 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3550 src = gen_extend(src, OS_WORD, 1);
3551 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3553 } else {
3554 tcg_gen_mov_i32(QREG_CC_C, src);
3555 if (logical) {
3556 tcg_gen_shri_i32(QREG_CC_N, src, 1);
3557 } else {
3558 tcg_gen_sari_i32(QREG_CC_N, src, 1);
3562 gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3563 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3564 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3565 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3567 DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3568 set_cc_op(s, CC_OP_FLAGS);
3571 static void rotate(TCGv reg, TCGv shift, int left, int size)
3573 switch (size) {
3574 case 8:
3575 /* Replicate the 8-bit input so that a 32-bit rotate works. */
3576 tcg_gen_ext8u_i32(reg, reg);
3577 tcg_gen_muli_i32(reg, reg, 0x01010101);
3578 goto do_long;
3579 case 16:
3580 /* Replicate the 16-bit input so that a 32-bit rotate works. */
3581 tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3582 goto do_long;
3583 do_long:
3584 default:
3585 if (left) {
3586 tcg_gen_rotl_i32(reg, reg, shift);
3587 } else {
3588 tcg_gen_rotr_i32(reg, reg, shift);
3592 /* compute flags */
3594 switch (size) {
3595 case 8:
3596 tcg_gen_ext8s_i32(reg, reg);
3597 break;
3598 case 16:
3599 tcg_gen_ext16s_i32(reg, reg);
3600 break;
3601 default:
3602 break;
3605 /* QREG_CC_X is not affected */
3607 tcg_gen_mov_i32(QREG_CC_N, reg);
3608 tcg_gen_mov_i32(QREG_CC_Z, reg);
3610 if (left) {
3611 tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3612 } else {
3613 tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3616 tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3619 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3621 switch (size) {
3622 case 8:
3623 tcg_gen_ext8s_i32(reg, reg);
3624 break;
3625 case 16:
3626 tcg_gen_ext16s_i32(reg, reg);
3627 break;
3628 default:
3629 break;
3631 tcg_gen_mov_i32(QREG_CC_N, reg);
3632 tcg_gen_mov_i32(QREG_CC_Z, reg);
3633 tcg_gen_mov_i32(QREG_CC_X, X);
3634 tcg_gen_mov_i32(QREG_CC_C, X);
3635 tcg_gen_movi_i32(QREG_CC_V, 0);
3638 /* Result of rotate_x() is valid if 0 <= shift <= size */
3639 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3641 TCGv X, shl, shr, shx, sz, zero;
3643 sz = tcg_const_i32(size);
3645 shr = tcg_temp_new();
3646 shl = tcg_temp_new();
3647 shx = tcg_temp_new();
3648 if (left) {
3649 tcg_gen_mov_i32(shl, shift); /* shl = shift */
3650 tcg_gen_movi_i32(shr, size + 1);
3651 tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3652 tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
3653 /* shx = shx < 0 ? size : shx; */
3654 zero = tcg_const_i32(0);
3655 tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3656 tcg_temp_free(zero);
3657 } else {
3658 tcg_gen_mov_i32(shr, shift); /* shr = shift */
3659 tcg_gen_movi_i32(shl, size + 1);
3660 tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3661 tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3664 /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3666 tcg_gen_shl_i32(shl, reg, shl);
3667 tcg_gen_shr_i32(shr, reg, shr);
3668 tcg_gen_or_i32(reg, shl, shr);
3669 tcg_temp_free(shl);
3670 tcg_temp_free(shr);
3671 tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3672 tcg_gen_or_i32(reg, reg, shx);
3673 tcg_temp_free(shx);
3675 /* X = (reg >> size) & 1 */
3677 X = tcg_temp_new();
3678 tcg_gen_shr_i32(X, reg, sz);
3679 tcg_gen_andi_i32(X, X, 1);
3680 tcg_temp_free(sz);
3682 return X;
3685 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3686 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3688 TCGv_i64 t0, shift64;
3689 TCGv X, lo, hi, zero;
3691 shift64 = tcg_temp_new_i64();
3692 tcg_gen_extu_i32_i64(shift64, shift);
3694 t0 = tcg_temp_new_i64();
3696 X = tcg_temp_new();
3697 lo = tcg_temp_new();
3698 hi = tcg_temp_new();
3700 if (left) {
3701 /* create [reg:X:..] */
3703 tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3704 tcg_gen_concat_i32_i64(t0, lo, reg);
3706 /* rotate */
3708 tcg_gen_rotl_i64(t0, t0, shift64);
3709 tcg_temp_free_i64(shift64);
3711 /* result is [reg:..:reg:X] */
3713 tcg_gen_extr_i64_i32(lo, hi, t0);
3714 tcg_gen_andi_i32(X, lo, 1);
3716 tcg_gen_shri_i32(lo, lo, 1);
3717 } else {
3718 /* create [..:X:reg] */
3720 tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3722 tcg_gen_rotr_i64(t0, t0, shift64);
3723 tcg_temp_free_i64(shift64);
3725 /* result is value: [X:reg:..:reg] */
3727 tcg_gen_extr_i64_i32(lo, hi, t0);
3729 /* extract X */
3731 tcg_gen_shri_i32(X, hi, 31);
3733 /* extract result */
3735 tcg_gen_shli_i32(hi, hi, 1);
3737 tcg_temp_free_i64(t0);
3738 tcg_gen_or_i32(lo, lo, hi);
3739 tcg_temp_free(hi);
3741 /* if shift == 0, register and X are not affected */
3743 zero = tcg_const_i32(0);
3744 tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3745 tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3746 tcg_temp_free(zero);
3747 tcg_temp_free(lo);
3749 return X;
3752 DISAS_INSN(rotate_im)
3754 TCGv shift;
3755 int tmp;
3756 int left = (insn & 0x100);
3758 tmp = (insn >> 9) & 7;
3759 if (tmp == 0) {
3760 tmp = 8;
3763 shift = tcg_const_i32(tmp);
3764 if (insn & 8) {
3765 rotate(DREG(insn, 0), shift, left, 32);
3766 } else {
3767 TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3768 rotate_x_flags(DREG(insn, 0), X, 32);
3769 tcg_temp_free(X);
3771 tcg_temp_free(shift);
3773 set_cc_op(s, CC_OP_FLAGS);
3776 DISAS_INSN(rotate8_im)
3778 int left = (insn & 0x100);
3779 TCGv reg;
3780 TCGv shift;
3781 int tmp;
3783 reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3785 tmp = (insn >> 9) & 7;
3786 if (tmp == 0) {
3787 tmp = 8;
3790 shift = tcg_const_i32(tmp);
3791 if (insn & 8) {
3792 rotate(reg, shift, left, 8);
3793 } else {
3794 TCGv X = rotate_x(reg, shift, left, 8);
3795 rotate_x_flags(reg, X, 8);
3796 tcg_temp_free(X);
3798 tcg_temp_free(shift);
3799 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3800 set_cc_op(s, CC_OP_FLAGS);
3803 DISAS_INSN(rotate16_im)
3805 int left = (insn & 0x100);
3806 TCGv reg;
3807 TCGv shift;
3808 int tmp;
3810 reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3811 tmp = (insn >> 9) & 7;
3812 if (tmp == 0) {
3813 tmp = 8;
3816 shift = tcg_const_i32(tmp);
3817 if (insn & 8) {
3818 rotate(reg, shift, left, 16);
3819 } else {
3820 TCGv X = rotate_x(reg, shift, left, 16);
3821 rotate_x_flags(reg, X, 16);
3822 tcg_temp_free(X);
3824 tcg_temp_free(shift);
3825 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3826 set_cc_op(s, CC_OP_FLAGS);
3829 DISAS_INSN(rotate_reg)
3831 TCGv reg;
3832 TCGv src;
3833 TCGv t0, t1;
3834 int left = (insn & 0x100);
3836 reg = DREG(insn, 0);
3837 src = DREG(insn, 9);
3838 /* shift in [0..63] */
3839 t0 = tcg_temp_new();
3840 tcg_gen_andi_i32(t0, src, 63);
3841 t1 = tcg_temp_new_i32();
3842 if (insn & 8) {
3843 tcg_gen_andi_i32(t1, src, 31);
3844 rotate(reg, t1, left, 32);
3845 /* if shift == 0, clear C */
3846 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3847 t0, QREG_CC_V /* 0 */,
3848 QREG_CC_V /* 0 */, QREG_CC_C);
3849 } else {
3850 TCGv X;
3851 /* modulo 33 */
3852 tcg_gen_movi_i32(t1, 33);
3853 tcg_gen_remu_i32(t1, t0, t1);
3854 X = rotate32_x(DREG(insn, 0), t1, left);
3855 rotate_x_flags(DREG(insn, 0), X, 32);
3856 tcg_temp_free(X);
3858 tcg_temp_free(t1);
3859 tcg_temp_free(t0);
3860 set_cc_op(s, CC_OP_FLAGS);
3863 DISAS_INSN(rotate8_reg)
3865 TCGv reg;
3866 TCGv src;
3867 TCGv t0, t1;
3868 int left = (insn & 0x100);
3870 reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3871 src = DREG(insn, 9);
3872 /* shift in [0..63] */
3873 t0 = tcg_temp_new_i32();
3874 tcg_gen_andi_i32(t0, src, 63);
3875 t1 = tcg_temp_new_i32();
3876 if (insn & 8) {
3877 tcg_gen_andi_i32(t1, src, 7);
3878 rotate(reg, t1, left, 8);
3879 /* if shift == 0, clear C */
3880 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3881 t0, QREG_CC_V /* 0 */,
3882 QREG_CC_V /* 0 */, QREG_CC_C);
3883 } else {
3884 TCGv X;
3885 /* modulo 9 */
3886 tcg_gen_movi_i32(t1, 9);
3887 tcg_gen_remu_i32(t1, t0, t1);
3888 X = rotate_x(reg, t1, left, 8);
3889 rotate_x_flags(reg, X, 8);
3890 tcg_temp_free(X);
3892 tcg_temp_free(t1);
3893 tcg_temp_free(t0);
3894 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3895 set_cc_op(s, CC_OP_FLAGS);
3898 DISAS_INSN(rotate16_reg)
3900 TCGv reg;
3901 TCGv src;
3902 TCGv t0, t1;
3903 int left = (insn & 0x100);
3905 reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3906 src = DREG(insn, 9);
3907 /* shift in [0..63] */
3908 t0 = tcg_temp_new_i32();
3909 tcg_gen_andi_i32(t0, src, 63);
3910 t1 = tcg_temp_new_i32();
3911 if (insn & 8) {
3912 tcg_gen_andi_i32(t1, src, 15);
3913 rotate(reg, t1, left, 16);
3914 /* if shift == 0, clear C */
3915 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3916 t0, QREG_CC_V /* 0 */,
3917 QREG_CC_V /* 0 */, QREG_CC_C);
3918 } else {
3919 TCGv X;
3920 /* modulo 17 */
3921 tcg_gen_movi_i32(t1, 17);
3922 tcg_gen_remu_i32(t1, t0, t1);
3923 X = rotate_x(reg, t1, left, 16);
3924 rotate_x_flags(reg, X, 16);
3925 tcg_temp_free(X);
3927 tcg_temp_free(t1);
3928 tcg_temp_free(t0);
3929 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3930 set_cc_op(s, CC_OP_FLAGS);
3933 DISAS_INSN(rotate_mem)
3935 TCGv src;
3936 TCGv addr;
3937 TCGv shift;
3938 int left = (insn & 0x100);
3940 SRC_EA(env, src, OS_WORD, 0, &addr);
3942 shift = tcg_const_i32(1);
3943 if (insn & 0x0200) {
3944 rotate(src, shift, left, 16);
3945 } else {
3946 TCGv X = rotate_x(src, shift, left, 16);
3947 rotate_x_flags(src, X, 16);
3948 tcg_temp_free(X);
3950 tcg_temp_free(shift);
3951 DEST_EA(env, insn, OS_WORD, src, &addr);
3952 set_cc_op(s, CC_OP_FLAGS);
3955 DISAS_INSN(bfext_reg)
3957 int ext = read_im16(env, s);
3958 int is_sign = insn & 0x200;
3959 TCGv src = DREG(insn, 0);
3960 TCGv dst = DREG(ext, 12);
3961 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3962 int ofs = extract32(ext, 6, 5); /* big bit-endian */
3963 int pos = 32 - ofs - len; /* little bit-endian */
3964 TCGv tmp = tcg_temp_new();
3965 TCGv shift;
3967 /* In general, we're going to rotate the field so that it's at the
3968 top of the word and then right-shift by the compliment of the
3969 width to extend the field. */
3970 if (ext & 0x20) {
3971 /* Variable width. */
3972 if (ext & 0x800) {
3973 /* Variable offset. */
3974 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3975 tcg_gen_rotl_i32(tmp, src, tmp);
3976 } else {
3977 tcg_gen_rotli_i32(tmp, src, ofs);
3980 shift = tcg_temp_new();
3981 tcg_gen_neg_i32(shift, DREG(ext, 0));
3982 tcg_gen_andi_i32(shift, shift, 31);
3983 tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3984 if (is_sign) {
3985 tcg_gen_mov_i32(dst, QREG_CC_N);
3986 } else {
3987 tcg_gen_shr_i32(dst, tmp, shift);
3989 tcg_temp_free(shift);
3990 } else {
3991 /* Immediate width. */
3992 if (ext & 0x800) {
3993 /* Variable offset */
3994 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3995 tcg_gen_rotl_i32(tmp, src, tmp);
3996 src = tmp;
3997 pos = 32 - len;
3998 } else {
3999 /* Immediate offset. If the field doesn't wrap around the
4000 end of the word, rely on (s)extract completely. */
4001 if (pos < 0) {
4002 tcg_gen_rotli_i32(tmp, src, ofs);
4003 src = tmp;
4004 pos = 32 - len;
4008 tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
4009 if (is_sign) {
4010 tcg_gen_mov_i32(dst, QREG_CC_N);
4011 } else {
4012 tcg_gen_extract_i32(dst, src, pos, len);
4016 tcg_temp_free(tmp);
4017 set_cc_op(s, CC_OP_LOGIC);
4020 DISAS_INSN(bfext_mem)
4022 int ext = read_im16(env, s);
4023 int is_sign = insn & 0x200;
4024 TCGv dest = DREG(ext, 12);
4025 TCGv addr, len, ofs;
4027 addr = gen_lea(env, s, insn, OS_UNSIZED);
4028 if (IS_NULL_QREG(addr)) {
4029 gen_addr_fault(s);
4030 return;
4033 if (ext & 0x20) {
4034 len = DREG(ext, 0);
4035 } else {
4036 len = tcg_const_i32(extract32(ext, 0, 5));
4038 if (ext & 0x800) {
4039 ofs = DREG(ext, 6);
4040 } else {
4041 ofs = tcg_const_i32(extract32(ext, 6, 5));
4044 if (is_sign) {
4045 gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
4046 tcg_gen_mov_i32(QREG_CC_N, dest);
4047 } else {
4048 TCGv_i64 tmp = tcg_temp_new_i64();
4049 gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
4050 tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4051 tcg_temp_free_i64(tmp);
4053 set_cc_op(s, CC_OP_LOGIC);
4055 if (!(ext & 0x20)) {
4056 tcg_temp_free(len);
4058 if (!(ext & 0x800)) {
4059 tcg_temp_free(ofs);
4063 DISAS_INSN(bfop_reg)
4065 int ext = read_im16(env, s);
4066 TCGv src = DREG(insn, 0);
4067 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4068 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4069 TCGv mask, tofs, tlen;
4071 tofs = NULL;
4072 tlen = NULL;
4073 if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
4074 tofs = tcg_temp_new();
4075 tlen = tcg_temp_new();
4078 if ((ext & 0x820) == 0) {
4079 /* Immediate width and offset. */
4080 uint32_t maski = 0x7fffffffu >> (len - 1);
4081 if (ofs + len <= 32) {
4082 tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4083 } else {
4084 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4086 tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4087 mask = tcg_const_i32(ror32(maski, ofs));
4088 if (tofs) {
4089 tcg_gen_movi_i32(tofs, ofs);
4090 tcg_gen_movi_i32(tlen, len);
4092 } else {
4093 TCGv tmp = tcg_temp_new();
4094 if (ext & 0x20) {
4095 /* Variable width */
4096 tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4097 tcg_gen_andi_i32(tmp, tmp, 31);
4098 mask = tcg_const_i32(0x7fffffffu);
4099 tcg_gen_shr_i32(mask, mask, tmp);
4100 if (tlen) {
4101 tcg_gen_addi_i32(tlen, tmp, 1);
4103 } else {
4104 /* Immediate width */
4105 mask = tcg_const_i32(0x7fffffffu >> (len - 1));
4106 if (tlen) {
4107 tcg_gen_movi_i32(tlen, len);
4110 if (ext & 0x800) {
4111 /* Variable offset */
4112 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4113 tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4114 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4115 tcg_gen_rotr_i32(mask, mask, tmp);
4116 if (tofs) {
4117 tcg_gen_mov_i32(tofs, tmp);
4119 } else {
4120 /* Immediate offset (and variable width) */
4121 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4122 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4123 tcg_gen_rotri_i32(mask, mask, ofs);
4124 if (tofs) {
4125 tcg_gen_movi_i32(tofs, ofs);
4128 tcg_temp_free(tmp);
4130 set_cc_op(s, CC_OP_LOGIC);
4132 switch (insn & 0x0f00) {
4133 case 0x0a00: /* bfchg */
4134 tcg_gen_eqv_i32(src, src, mask);
4135 break;
4136 case 0x0c00: /* bfclr */
4137 tcg_gen_and_i32(src, src, mask);
4138 break;
4139 case 0x0d00: /* bfffo */
4140 gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4141 tcg_temp_free(tlen);
4142 tcg_temp_free(tofs);
4143 break;
4144 case 0x0e00: /* bfset */
4145 tcg_gen_orc_i32(src, src, mask);
4146 break;
4147 case 0x0800: /* bftst */
4148 /* flags already set; no other work to do. */
4149 break;
4150 default:
4151 g_assert_not_reached();
4153 tcg_temp_free(mask);
4156 DISAS_INSN(bfop_mem)
4158 int ext = read_im16(env, s);
4159 TCGv addr, len, ofs;
4160 TCGv_i64 t64;
4162 addr = gen_lea(env, s, insn, OS_UNSIZED);
4163 if (IS_NULL_QREG(addr)) {
4164 gen_addr_fault(s);
4165 return;
4168 if (ext & 0x20) {
4169 len = DREG(ext, 0);
4170 } else {
4171 len = tcg_const_i32(extract32(ext, 0, 5));
4173 if (ext & 0x800) {
4174 ofs = DREG(ext, 6);
4175 } else {
4176 ofs = tcg_const_i32(extract32(ext, 6, 5));
4179 switch (insn & 0x0f00) {
4180 case 0x0a00: /* bfchg */
4181 gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4182 break;
4183 case 0x0c00: /* bfclr */
4184 gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4185 break;
4186 case 0x0d00: /* bfffo */
4187 t64 = tcg_temp_new_i64();
4188 gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4189 tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4190 tcg_temp_free_i64(t64);
4191 break;
4192 case 0x0e00: /* bfset */
4193 gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4194 break;
4195 case 0x0800: /* bftst */
4196 gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4197 break;
4198 default:
4199 g_assert_not_reached();
4201 set_cc_op(s, CC_OP_LOGIC);
4203 if (!(ext & 0x20)) {
4204 tcg_temp_free(len);
4206 if (!(ext & 0x800)) {
4207 tcg_temp_free(ofs);
4211 DISAS_INSN(bfins_reg)
4213 int ext = read_im16(env, s);
4214 TCGv dst = DREG(insn, 0);
4215 TCGv src = DREG(ext, 12);
4216 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4217 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4218 int pos = 32 - ofs - len; /* little bit-endian */
4219 TCGv tmp;
4221 tmp = tcg_temp_new();
4223 if (ext & 0x20) {
4224 /* Variable width */
4225 tcg_gen_neg_i32(tmp, DREG(ext, 0));
4226 tcg_gen_andi_i32(tmp, tmp, 31);
4227 tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4228 } else {
4229 /* Immediate width */
4230 tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4232 set_cc_op(s, CC_OP_LOGIC);
4234 /* Immediate width and offset */
4235 if ((ext & 0x820) == 0) {
4236 /* Check for suitability for deposit. */
4237 if (pos >= 0) {
4238 tcg_gen_deposit_i32(dst, dst, src, pos, len);
4239 } else {
4240 uint32_t maski = -2U << (len - 1);
4241 uint32_t roti = (ofs + len) & 31;
4242 tcg_gen_andi_i32(tmp, src, ~maski);
4243 tcg_gen_rotri_i32(tmp, tmp, roti);
4244 tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4245 tcg_gen_or_i32(dst, dst, tmp);
4247 } else {
4248 TCGv mask = tcg_temp_new();
4249 TCGv rot = tcg_temp_new();
4251 if (ext & 0x20) {
4252 /* Variable width */
4253 tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4254 tcg_gen_andi_i32(rot, rot, 31);
4255 tcg_gen_movi_i32(mask, -2);
4256 tcg_gen_shl_i32(mask, mask, rot);
4257 tcg_gen_mov_i32(rot, DREG(ext, 0));
4258 tcg_gen_andc_i32(tmp, src, mask);
4259 } else {
4260 /* Immediate width (variable offset) */
4261 uint32_t maski = -2U << (len - 1);
4262 tcg_gen_andi_i32(tmp, src, ~maski);
4263 tcg_gen_movi_i32(mask, maski);
4264 tcg_gen_movi_i32(rot, len & 31);
4266 if (ext & 0x800) {
4267 /* Variable offset */
4268 tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4269 } else {
4270 /* Immediate offset (variable width) */
4271 tcg_gen_addi_i32(rot, rot, ofs);
4273 tcg_gen_andi_i32(rot, rot, 31);
4274 tcg_gen_rotr_i32(mask, mask, rot);
4275 tcg_gen_rotr_i32(tmp, tmp, rot);
4276 tcg_gen_and_i32(dst, dst, mask);
4277 tcg_gen_or_i32(dst, dst, tmp);
4279 tcg_temp_free(rot);
4280 tcg_temp_free(mask);
4282 tcg_temp_free(tmp);
4285 DISAS_INSN(bfins_mem)
4287 int ext = read_im16(env, s);
4288 TCGv src = DREG(ext, 12);
4289 TCGv addr, len, ofs;
4291 addr = gen_lea(env, s, insn, OS_UNSIZED);
4292 if (IS_NULL_QREG(addr)) {
4293 gen_addr_fault(s);
4294 return;
4297 if (ext & 0x20) {
4298 len = DREG(ext, 0);
4299 } else {
4300 len = tcg_const_i32(extract32(ext, 0, 5));
4302 if (ext & 0x800) {
4303 ofs = DREG(ext, 6);
4304 } else {
4305 ofs = tcg_const_i32(extract32(ext, 6, 5));
4308 gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4309 set_cc_op(s, CC_OP_LOGIC);
4311 if (!(ext & 0x20)) {
4312 tcg_temp_free(len);
4314 if (!(ext & 0x800)) {
4315 tcg_temp_free(ofs);
4319 DISAS_INSN(ff1)
4321 TCGv reg;
4322 reg = DREG(insn, 0);
4323 gen_logic_cc(s, reg, OS_LONG);
4324 gen_helper_ff1(reg, reg);
4327 DISAS_INSN(chk)
4329 TCGv src, reg;
4330 int opsize;
4332 switch ((insn >> 7) & 3) {
4333 case 3:
4334 opsize = OS_WORD;
4335 break;
4336 case 2:
4337 if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4338 opsize = OS_LONG;
4339 break;
4341 /* fallthru */
4342 default:
4343 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4344 return;
4346 SRC_EA(env, src, opsize, 1, NULL);
4347 reg = gen_extend(DREG(insn, 9), opsize, 1);
4349 gen_flush_flags(s);
4350 gen_helper_chk(cpu_env, reg, src);
4353 DISAS_INSN(chk2)
4355 uint16_t ext;
4356 TCGv addr1, addr2, bound1, bound2, reg;
4357 int opsize;
4359 switch ((insn >> 9) & 3) {
4360 case 0:
4361 opsize = OS_BYTE;
4362 break;
4363 case 1:
4364 opsize = OS_WORD;
4365 break;
4366 case 2:
4367 opsize = OS_LONG;
4368 break;
4369 default:
4370 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4371 return;
4374 ext = read_im16(env, s);
4375 if ((ext & 0x0800) == 0) {
4376 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4377 return;
4380 addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4381 addr2 = tcg_temp_new();
4382 tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4384 bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4385 tcg_temp_free(addr1);
4386 bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4387 tcg_temp_free(addr2);
4389 reg = tcg_temp_new();
4390 if (ext & 0x8000) {
4391 tcg_gen_mov_i32(reg, AREG(ext, 12));
4392 } else {
4393 gen_ext(reg, DREG(ext, 12), opsize, 1);
4396 gen_flush_flags(s);
4397 gen_helper_chk2(cpu_env, reg, bound1, bound2);
4398 tcg_temp_free(reg);
4401 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4403 TCGv addr;
4404 TCGv_i64 t0, t1;
4406 addr = tcg_temp_new();
4408 t0 = tcg_temp_new_i64();
4409 t1 = tcg_temp_new_i64();
4411 tcg_gen_andi_i32(addr, src, ~15);
4412 tcg_gen_qemu_ld64(t0, addr, index);
4413 tcg_gen_addi_i32(addr, addr, 8);
4414 tcg_gen_qemu_ld64(t1, addr, index);
4416 tcg_gen_andi_i32(addr, dst, ~15);
4417 tcg_gen_qemu_st64(t0, addr, index);
4418 tcg_gen_addi_i32(addr, addr, 8);
4419 tcg_gen_qemu_st64(t1, addr, index);
4421 tcg_temp_free_i64(t0);
4422 tcg_temp_free_i64(t1);
4423 tcg_temp_free(addr);
4426 DISAS_INSN(move16_reg)
4428 int index = IS_USER(s);
4429 TCGv tmp;
4430 uint16_t ext;
4432 ext = read_im16(env, s);
4433 if ((ext & (1 << 15)) == 0) {
4434 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4437 m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4439 /* Ax can be Ay, so save Ay before incrementing Ax */
4440 tmp = tcg_temp_new();
4441 tcg_gen_mov_i32(tmp, AREG(ext, 12));
4442 tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4443 tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4444 tcg_temp_free(tmp);
4447 DISAS_INSN(move16_mem)
4449 int index = IS_USER(s);
4450 TCGv reg, addr;
4452 reg = AREG(insn, 0);
4453 addr = tcg_const_i32(read_im32(env, s));
4455 if ((insn >> 3) & 1) {
4456 /* MOVE16 (xxx).L, (Ay) */
4457 m68k_copy_line(reg, addr, index);
4458 } else {
4459 /* MOVE16 (Ay), (xxx).L */
4460 m68k_copy_line(addr, reg, index);
4463 tcg_temp_free(addr);
4465 if (((insn >> 3) & 2) == 0) {
4466 /* (Ay)+ */
4467 tcg_gen_addi_i32(reg, reg, 16);
4471 DISAS_INSN(strldsr)
4473 uint16_t ext;
4474 uint32_t addr;
4476 addr = s->pc - 2;
4477 ext = read_im16(env, s);
4478 if (ext != 0x46FC) {
4479 gen_exception(s, addr, EXCP_UNSUPPORTED);
4480 return;
4482 ext = read_im16(env, s);
4483 if (IS_USER(s) || (ext & SR_S) == 0) {
4484 gen_exception(s, addr, EXCP_PRIVILEGE);
4485 return;
4487 gen_push(s, gen_get_sr(s));
4488 gen_set_sr_im(s, ext, 0);
4491 DISAS_INSN(move_from_sr)
4493 TCGv sr;
4495 if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4496 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4497 return;
4499 sr = gen_get_sr(s);
4500 DEST_EA(env, insn, OS_WORD, sr, NULL);
4503 #if defined(CONFIG_SOFTMMU)
4504 DISAS_INSN(moves)
4506 int opsize;
4507 uint16_t ext;
4508 TCGv reg;
4509 TCGv addr;
4510 int extend;
4512 if (IS_USER(s)) {
4513 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4514 return;
4517 ext = read_im16(env, s);
4519 opsize = insn_opsize(insn);
4521 if (ext & 0x8000) {
4522 /* address register */
4523 reg = AREG(ext, 12);
4524 extend = 1;
4525 } else {
4526 /* data register */
4527 reg = DREG(ext, 12);
4528 extend = 0;
4531 addr = gen_lea(env, s, insn, opsize);
4532 if (IS_NULL_QREG(addr)) {
4533 gen_addr_fault(s);
4534 return;
4537 if (ext & 0x0800) {
4538 /* from reg to ea */
4539 gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4540 } else {
4541 /* from ea to reg */
4542 TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4543 if (extend) {
4544 gen_ext(reg, tmp, opsize, 1);
4545 } else {
4546 gen_partset_reg(opsize, reg, tmp);
4549 switch (extract32(insn, 3, 3)) {
4550 case 3: /* Indirect postincrement. */
4551 tcg_gen_addi_i32(AREG(insn, 0), addr,
4552 REG(insn, 0) == 7 && opsize == OS_BYTE
4554 : opsize_bytes(opsize));
4555 break;
4556 case 4: /* Indirect predecrememnt. */
4557 tcg_gen_mov_i32(AREG(insn, 0), addr);
4558 break;
4562 DISAS_INSN(move_to_sr)
4564 if (IS_USER(s)) {
4565 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4566 return;
4568 gen_move_to_sr(env, s, insn, false);
4569 gen_lookup_tb(s);
4572 DISAS_INSN(move_from_usp)
4574 if (IS_USER(s)) {
4575 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4576 return;
4578 tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4579 offsetof(CPUM68KState, sp[M68K_USP]));
4582 DISAS_INSN(move_to_usp)
4584 if (IS_USER(s)) {
4585 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4586 return;
4588 tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4589 offsetof(CPUM68KState, sp[M68K_USP]));
4592 DISAS_INSN(halt)
4594 if (IS_USER(s)) {
4595 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4596 return;
4599 gen_exception(s, s->pc, EXCP_HALT_INSN);
4602 DISAS_INSN(stop)
4604 uint16_t ext;
4606 if (IS_USER(s)) {
4607 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4608 return;
4611 ext = read_im16(env, s);
4613 gen_set_sr_im(s, ext, 0);
4614 tcg_gen_movi_i32(cpu_halted, 1);
4615 gen_exception(s, s->pc, EXCP_HLT);
4618 DISAS_INSN(rte)
4620 if (IS_USER(s)) {
4621 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4622 return;
4624 gen_exception(s, s->insn_pc, EXCP_RTE);
4627 DISAS_INSN(cf_movec)
4629 uint16_t ext;
4630 TCGv reg;
4632 if (IS_USER(s)) {
4633 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4634 return;
4637 ext = read_im16(env, s);
4639 if (ext & 0x8000) {
4640 reg = AREG(ext, 12);
4641 } else {
4642 reg = DREG(ext, 12);
4644 gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4645 gen_lookup_tb(s);
4648 DISAS_INSN(m68k_movec)
4650 uint16_t ext;
4651 TCGv reg;
4653 if (IS_USER(s)) {
4654 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4655 return;
4658 ext = read_im16(env, s);
4660 if (ext & 0x8000) {
4661 reg = AREG(ext, 12);
4662 } else {
4663 reg = DREG(ext, 12);
4665 if (insn & 1) {
4666 gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4667 } else {
4668 gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
4670 gen_lookup_tb(s);
4673 DISAS_INSN(intouch)
4675 if (IS_USER(s)) {
4676 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4677 return;
4679 /* ICache fetch. Implement as no-op. */
4682 DISAS_INSN(cpushl)
4684 if (IS_USER(s)) {
4685 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4686 return;
4688 /* Cache push/invalidate. Implement as no-op. */
4691 DISAS_INSN(cpush)
4693 if (IS_USER(s)) {
4694 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4695 return;
4697 /* Cache push/invalidate. Implement as no-op. */
4700 DISAS_INSN(cinv)
4702 if (IS_USER(s)) {
4703 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4704 return;
4706 /* Invalidate cache line. Implement as no-op. */
4709 #if defined(CONFIG_SOFTMMU)
4710 DISAS_INSN(pflush)
4712 TCGv opmode;
4714 if (IS_USER(s)) {
4715 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4716 return;
4719 opmode = tcg_const_i32((insn >> 3) & 3);
4720 gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4721 tcg_temp_free(opmode);
4724 DISAS_INSN(ptest)
4726 TCGv is_read;
4728 if (IS_USER(s)) {
4729 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4730 return;
4732 is_read = tcg_const_i32((insn >> 5) & 1);
4733 gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4734 tcg_temp_free(is_read);
4736 #endif
4738 DISAS_INSN(wddata)
4740 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4743 DISAS_INSN(wdebug)
4745 M68kCPU *cpu = m68k_env_get_cpu(env);
4747 if (IS_USER(s)) {
4748 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4749 return;
4751 /* TODO: Implement wdebug. */
4752 cpu_abort(CPU(cpu), "WDEBUG not implemented");
4754 #endif
4756 DISAS_INSN(trap)
4758 gen_exception(s, s->insn_pc, EXCP_TRAP0 + (insn & 0xf));
4761 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4763 switch (reg) {
4764 case M68K_FPIAR:
4765 tcg_gen_movi_i32(res, 0);
4766 break;
4767 case M68K_FPSR:
4768 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4769 break;
4770 case M68K_FPCR:
4771 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4772 break;
4776 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4778 switch (reg) {
4779 case M68K_FPIAR:
4780 break;
4781 case M68K_FPSR:
4782 tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4783 break;
4784 case M68K_FPCR:
4785 gen_helper_set_fpcr(cpu_env, val);
4786 break;
4790 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4792 int index = IS_USER(s);
4793 TCGv tmp;
4795 tmp = tcg_temp_new();
4796 gen_load_fcr(s, tmp, reg);
4797 tcg_gen_qemu_st32(tmp, addr, index);
4798 tcg_temp_free(tmp);
4801 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4803 int index = IS_USER(s);
4804 TCGv tmp;
4806 tmp = tcg_temp_new();
4807 tcg_gen_qemu_ld32u(tmp, addr, index);
4808 gen_store_fcr(s, tmp, reg);
4809 tcg_temp_free(tmp);
4813 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4814 uint32_t insn, uint32_t ext)
4816 int mask = (ext >> 10) & 7;
4817 int is_write = (ext >> 13) & 1;
4818 int mode = extract32(insn, 3, 3);
4819 int i;
4820 TCGv addr, tmp;
4822 switch (mode) {
4823 case 0: /* Dn */
4824 if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4825 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4826 return;
4828 if (is_write) {
4829 gen_load_fcr(s, DREG(insn, 0), mask);
4830 } else {
4831 gen_store_fcr(s, DREG(insn, 0), mask);
4833 return;
4834 case 1: /* An, only with FPIAR */
4835 if (mask != M68K_FPIAR) {
4836 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4837 return;
4839 if (is_write) {
4840 gen_load_fcr(s, AREG(insn, 0), mask);
4841 } else {
4842 gen_store_fcr(s, AREG(insn, 0), mask);
4844 return;
4845 default:
4846 break;
4849 tmp = gen_lea(env, s, insn, OS_LONG);
4850 if (IS_NULL_QREG(tmp)) {
4851 gen_addr_fault(s);
4852 return;
4855 addr = tcg_temp_new();
4856 tcg_gen_mov_i32(addr, tmp);
4858 /* mask:
4860 * 0b100 Floating-Point Control Register
4861 * 0b010 Floating-Point Status Register
4862 * 0b001 Floating-Point Instruction Address Register
4866 if (is_write && mode == 4) {
4867 for (i = 2; i >= 0; i--, mask >>= 1) {
4868 if (mask & 1) {
4869 gen_qemu_store_fcr(s, addr, 1 << i);
4870 if (mask != 1) {
4871 tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4875 tcg_gen_mov_i32(AREG(insn, 0), addr);
4876 } else {
4877 for (i = 0; i < 3; i++, mask >>= 1) {
4878 if (mask & 1) {
4879 if (is_write) {
4880 gen_qemu_store_fcr(s, addr, 1 << i);
4881 } else {
4882 gen_qemu_load_fcr(s, addr, 1 << i);
4884 if (mask != 1 || mode == 3) {
4885 tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4889 if (mode == 3) {
4890 tcg_gen_mov_i32(AREG(insn, 0), addr);
4893 tcg_temp_free_i32(addr);
4896 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4897 uint32_t insn, uint32_t ext)
4899 int opsize;
4900 TCGv addr, tmp;
4901 int mode = (ext >> 11) & 0x3;
4902 int is_load = ((ext & 0x2000) == 0);
4904 if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4905 opsize = OS_EXTENDED;
4906 } else {
4907 opsize = OS_DOUBLE; /* FIXME */
4910 addr = gen_lea(env, s, insn, opsize);
4911 if (IS_NULL_QREG(addr)) {
4912 gen_addr_fault(s);
4913 return;
4916 tmp = tcg_temp_new();
4917 if (mode & 0x1) {
4918 /* Dynamic register list */
4919 tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4920 } else {
4921 /* Static register list */
4922 tcg_gen_movi_i32(tmp, ext & 0xff);
4925 if (!is_load && (mode & 2) == 0) {
4926 /* predecrement addressing mode
4927 * only available to store register to memory
4929 if (opsize == OS_EXTENDED) {
4930 gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4931 } else {
4932 gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4934 } else {
4935 /* postincrement addressing mode */
4936 if (opsize == OS_EXTENDED) {
4937 if (is_load) {
4938 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4939 } else {
4940 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4942 } else {
4943 if (is_load) {
4944 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4945 } else {
4946 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4950 if ((insn & 070) == 030 || (insn & 070) == 040) {
4951 tcg_gen_mov_i32(AREG(insn, 0), tmp);
4953 tcg_temp_free(tmp);
4956 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
4957 immediately before the next FP instruction is executed. */
4958 DISAS_INSN(fpu)
4960 uint16_t ext;
4961 int opmode;
4962 int opsize;
4963 TCGv_ptr cpu_src, cpu_dest;
4965 ext = read_im16(env, s);
4966 opmode = ext & 0x7f;
4967 switch ((ext >> 13) & 7) {
4968 case 0:
4969 break;
4970 case 1:
4971 goto undef;
4972 case 2:
4973 if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4974 /* fmovecr */
4975 TCGv rom_offset = tcg_const_i32(opmode);
4976 cpu_dest = gen_fp_ptr(REG(ext, 7));
4977 gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4978 tcg_temp_free_ptr(cpu_dest);
4979 tcg_temp_free(rom_offset);
4980 return;
4982 break;
4983 case 3: /* fmove out */
4984 cpu_src = gen_fp_ptr(REG(ext, 7));
4985 opsize = ext_opsize(ext, 10);
4986 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4987 EA_STORE, IS_USER(s)) == -1) {
4988 gen_addr_fault(s);
4990 gen_helper_ftst(cpu_env, cpu_src);
4991 tcg_temp_free_ptr(cpu_src);
4992 return;
4993 case 4: /* fmove to control register. */
4994 case 5: /* fmove from control register. */
4995 gen_op_fmove_fcr(env, s, insn, ext);
4996 return;
4997 case 6: /* fmovem */
4998 case 7:
4999 if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
5000 goto undef;
5002 gen_op_fmovem(env, s, insn, ext);
5003 return;
5005 if (ext & (1 << 14)) {
5006 /* Source effective address. */
5007 opsize = ext_opsize(ext, 10);
5008 cpu_src = gen_fp_result_ptr();
5009 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5010 EA_LOADS, IS_USER(s)) == -1) {
5011 gen_addr_fault(s);
5012 return;
5014 } else {
5015 /* Source register. */
5016 opsize = OS_EXTENDED;
5017 cpu_src = gen_fp_ptr(REG(ext, 10));
5019 cpu_dest = gen_fp_ptr(REG(ext, 7));
5020 switch (opmode) {
5021 case 0: /* fmove */
5022 gen_fp_move(cpu_dest, cpu_src);
5023 break;
5024 case 0x40: /* fsmove */
5025 gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
5026 break;
5027 case 0x44: /* fdmove */
5028 gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
5029 break;
5030 case 1: /* fint */
5031 gen_helper_firound(cpu_env, cpu_dest, cpu_src);
5032 break;
5033 case 3: /* fintrz */
5034 gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
5035 break;
5036 case 4: /* fsqrt */
5037 gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
5038 break;
5039 case 0x41: /* fssqrt */
5040 gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
5041 break;
5042 case 0x45: /* fdsqrt */
5043 gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
5044 break;
5045 case 0x18: /* fabs */
5046 gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5047 break;
5048 case 0x58: /* fsabs */
5049 gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5050 break;
5051 case 0x5c: /* fdabs */
5052 gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5053 break;
5054 case 0x1a: /* fneg */
5055 gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5056 break;
5057 case 0x5a: /* fsneg */
5058 gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5059 break;
5060 case 0x5e: /* fdneg */
5061 gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5062 break;
5063 case 0x20: /* fdiv */
5064 gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5065 break;
5066 case 0x60: /* fsdiv */
5067 gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5068 break;
5069 case 0x64: /* fddiv */
5070 gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5071 break;
5072 case 0x22: /* fadd */
5073 gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5074 break;
5075 case 0x62: /* fsadd */
5076 gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5077 break;
5078 case 0x66: /* fdadd */
5079 gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5080 break;
5081 case 0x23: /* fmul */
5082 gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5083 break;
5084 case 0x63: /* fsmul */
5085 gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5086 break;
5087 case 0x67: /* fdmul */
5088 gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5089 break;
5090 case 0x24: /* fsgldiv */
5091 gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5092 break;
5093 case 0x27: /* fsglmul */
5094 gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5095 break;
5096 case 0x28: /* fsub */
5097 gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5098 break;
5099 case 0x68: /* fssub */
5100 gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5101 break;
5102 case 0x6c: /* fdsub */
5103 gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5104 break;
5105 case 0x38: /* fcmp */
5106 gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5107 return;
5108 case 0x3a: /* ftst */
5109 gen_helper_ftst(cpu_env, cpu_src);
5110 return;
5111 default:
5112 goto undef;
5114 tcg_temp_free_ptr(cpu_src);
5115 gen_helper_ftst(cpu_env, cpu_dest);
5116 tcg_temp_free_ptr(cpu_dest);
5117 return;
5118 undef:
5119 /* FIXME: Is this right for offset addressing modes? */
5120 s->pc -= 2;
5121 disas_undef_fpu(env, s, insn);
5124 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5126 TCGv fpsr;
5128 c->g1 = 1;
5129 c->v2 = tcg_const_i32(0);
5130 c->g2 = 0;
5131 /* TODO: Raise BSUN exception. */
5132 fpsr = tcg_temp_new();
5133 gen_load_fcr(s, fpsr, M68K_FPSR);
5134 switch (cond) {
5135 case 0: /* False */
5136 case 16: /* Signaling False */
5137 c->v1 = c->v2;
5138 c->tcond = TCG_COND_NEVER;
5139 break;
5140 case 1: /* EQual Z */
5141 case 17: /* Signaling EQual Z */
5142 c->v1 = tcg_temp_new();
5143 c->g1 = 0;
5144 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5145 c->tcond = TCG_COND_NE;
5146 break;
5147 case 2: /* Ordered Greater Than !(A || Z || N) */
5148 case 18: /* Greater Than !(A || Z || N) */
5149 c->v1 = tcg_temp_new();
5150 c->g1 = 0;
5151 tcg_gen_andi_i32(c->v1, fpsr,
5152 FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5153 c->tcond = TCG_COND_EQ;
5154 break;
5155 case 3: /* Ordered Greater than or Equal Z || !(A || N) */
5156 case 19: /* Greater than or Equal Z || !(A || N) */
5157 c->v1 = tcg_temp_new();
5158 c->g1 = 0;
5159 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5160 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5161 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5162 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5163 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5164 c->tcond = TCG_COND_NE;
5165 break;
5166 case 4: /* Ordered Less Than !(!N || A || Z); */
5167 case 20: /* Less Than !(!N || A || Z); */
5168 c->v1 = tcg_temp_new();
5169 c->g1 = 0;
5170 tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5171 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5172 c->tcond = TCG_COND_EQ;
5173 break;
5174 case 5: /* Ordered Less than or Equal Z || (N && !A) */
5175 case 21: /* Less than or Equal Z || (N && !A) */
5176 c->v1 = tcg_temp_new();
5177 c->g1 = 0;
5178 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5179 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5180 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5181 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5182 c->tcond = TCG_COND_NE;
5183 break;
5184 case 6: /* Ordered Greater or Less than !(A || Z) */
5185 case 22: /* Greater or Less than !(A || Z) */
5186 c->v1 = tcg_temp_new();
5187 c->g1 = 0;
5188 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5189 c->tcond = TCG_COND_EQ;
5190 break;
5191 case 7: /* Ordered !A */
5192 case 23: /* Greater, Less or Equal !A */
5193 c->v1 = tcg_temp_new();
5194 c->g1 = 0;
5195 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5196 c->tcond = TCG_COND_EQ;
5197 break;
5198 case 8: /* Unordered A */
5199 case 24: /* Not Greater, Less or Equal A */
5200 c->v1 = tcg_temp_new();
5201 c->g1 = 0;
5202 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5203 c->tcond = TCG_COND_NE;
5204 break;
5205 case 9: /* Unordered or Equal A || Z */
5206 case 25: /* Not Greater or Less then A || Z */
5207 c->v1 = tcg_temp_new();
5208 c->g1 = 0;
5209 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5210 c->tcond = TCG_COND_NE;
5211 break;
5212 case 10: /* Unordered or Greater Than A || !(N || Z)) */
5213 case 26: /* Not Less or Equal A || !(N || Z)) */
5214 c->v1 = tcg_temp_new();
5215 c->g1 = 0;
5216 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5217 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5218 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5219 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5220 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5221 c->tcond = TCG_COND_NE;
5222 break;
5223 case 11: /* Unordered or Greater or Equal A || Z || !N */
5224 case 27: /* Not Less Than A || Z || !N */
5225 c->v1 = tcg_temp_new();
5226 c->g1 = 0;
5227 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5228 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5229 c->tcond = TCG_COND_NE;
5230 break;
5231 case 12: /* Unordered or Less Than A || (N && !Z) */
5232 case 28: /* Not Greater than or Equal A || (N && !Z) */
5233 c->v1 = tcg_temp_new();
5234 c->g1 = 0;
5235 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5236 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5237 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5238 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5239 c->tcond = TCG_COND_NE;
5240 break;
5241 case 13: /* Unordered or Less or Equal A || Z || N */
5242 case 29: /* Not Greater Than A || Z || N */
5243 c->v1 = tcg_temp_new();
5244 c->g1 = 0;
5245 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5246 c->tcond = TCG_COND_NE;
5247 break;
5248 case 14: /* Not Equal !Z */
5249 case 30: /* Signaling Not Equal !Z */
5250 c->v1 = tcg_temp_new();
5251 c->g1 = 0;
5252 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5253 c->tcond = TCG_COND_EQ;
5254 break;
5255 case 15: /* True */
5256 case 31: /* Signaling True */
5257 c->v1 = c->v2;
5258 c->tcond = TCG_COND_ALWAYS;
5259 break;
5261 tcg_temp_free(fpsr);
5264 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5266 DisasCompare c;
5268 gen_fcc_cond(&c, s, cond);
5269 update_cc_op(s);
5270 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5271 free_cond(&c);
5274 DISAS_INSN(fbcc)
5276 uint32_t offset;
5277 uint32_t base;
5278 TCGLabel *l1;
5280 base = s->pc;
5281 offset = (int16_t)read_im16(env, s);
5282 if (insn & (1 << 6)) {
5283 offset = (offset << 16) | read_im16(env, s);
5286 l1 = gen_new_label();
5287 update_cc_op(s);
5288 gen_fjmpcc(s, insn & 0x3f, l1);
5289 gen_jmp_tb(s, 0, s->pc);
5290 gen_set_label(l1);
5291 gen_jmp_tb(s, 1, base + offset);
5294 DISAS_INSN(fscc)
5296 DisasCompare c;
5297 int cond;
5298 TCGv tmp;
5299 uint16_t ext;
5301 ext = read_im16(env, s);
5302 cond = ext & 0x3f;
5303 gen_fcc_cond(&c, s, cond);
5305 tmp = tcg_temp_new();
5306 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5307 free_cond(&c);
5309 tcg_gen_neg_i32(tmp, tmp);
5310 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5311 tcg_temp_free(tmp);
5314 #if defined(CONFIG_SOFTMMU)
5315 DISAS_INSN(frestore)
5317 TCGv addr;
5319 if (IS_USER(s)) {
5320 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
5321 return;
5323 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5324 SRC_EA(env, addr, OS_LONG, 0, NULL);
5325 /* FIXME: check the state frame */
5326 } else {
5327 disas_undef(env, s, insn);
5331 DISAS_INSN(fsave)
5333 if (IS_USER(s)) {
5334 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
5335 return;
5338 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5339 /* always write IDLE */
5340 TCGv idle = tcg_const_i32(0x41000000);
5341 DEST_EA(env, insn, OS_LONG, idle, NULL);
5342 tcg_temp_free(idle);
5343 } else {
5344 disas_undef(env, s, insn);
5347 #endif
5349 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5351 TCGv tmp = tcg_temp_new();
5352 if (s->env->macsr & MACSR_FI) {
5353 if (upper)
5354 tcg_gen_andi_i32(tmp, val, 0xffff0000);
5355 else
5356 tcg_gen_shli_i32(tmp, val, 16);
5357 } else if (s->env->macsr & MACSR_SU) {
5358 if (upper)
5359 tcg_gen_sari_i32(tmp, val, 16);
5360 else
5361 tcg_gen_ext16s_i32(tmp, val);
5362 } else {
5363 if (upper)
5364 tcg_gen_shri_i32(tmp, val, 16);
5365 else
5366 tcg_gen_ext16u_i32(tmp, val);
5368 return tmp;
5371 static void gen_mac_clear_flags(void)
5373 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5374 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5377 DISAS_INSN(mac)
5379 TCGv rx;
5380 TCGv ry;
5381 uint16_t ext;
5382 int acc;
5383 TCGv tmp;
5384 TCGv addr;
5385 TCGv loadval;
5386 int dual;
5387 TCGv saved_flags;
5389 if (!s->done_mac) {
5390 s->mactmp = tcg_temp_new_i64();
5391 s->done_mac = 1;
5394 ext = read_im16(env, s);
5396 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5397 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5398 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5399 disas_undef(env, s, insn);
5400 return;
5402 if (insn & 0x30) {
5403 /* MAC with load. */
5404 tmp = gen_lea(env, s, insn, OS_LONG);
5405 addr = tcg_temp_new();
5406 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5407 /* Load the value now to ensure correct exception behavior.
5408 Perform writeback after reading the MAC inputs. */
5409 loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5411 acc ^= 1;
5412 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5413 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5414 } else {
5415 loadval = addr = NULL_QREG;
5416 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5417 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5420 gen_mac_clear_flags();
5421 #if 0
5422 l1 = -1;
5423 /* Disabled because conditional branches clobber temporary vars. */
5424 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5425 /* Skip the multiply if we know we will ignore it. */
5426 l1 = gen_new_label();
5427 tmp = tcg_temp_new();
5428 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5429 gen_op_jmp_nz32(tmp, l1);
5431 #endif
5433 if ((ext & 0x0800) == 0) {
5434 /* Word. */
5435 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5436 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5438 if (s->env->macsr & MACSR_FI) {
5439 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5440 } else {
5441 if (s->env->macsr & MACSR_SU)
5442 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5443 else
5444 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5445 switch ((ext >> 9) & 3) {
5446 case 1:
5447 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5448 break;
5449 case 3:
5450 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5451 break;
5455 if (dual) {
5456 /* Save the overflow flag from the multiply. */
5457 saved_flags = tcg_temp_new();
5458 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5459 } else {
5460 saved_flags = NULL_QREG;
5463 #if 0
5464 /* Disabled because conditional branches clobber temporary vars. */
5465 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5466 /* Skip the accumulate if the value is already saturated. */
5467 l1 = gen_new_label();
5468 tmp = tcg_temp_new();
5469 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5470 gen_op_jmp_nz32(tmp, l1);
5472 #endif
5474 if (insn & 0x100)
5475 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5476 else
5477 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5479 if (s->env->macsr & MACSR_FI)
5480 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5481 else if (s->env->macsr & MACSR_SU)
5482 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5483 else
5484 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5486 #if 0
5487 /* Disabled because conditional branches clobber temporary vars. */
5488 if (l1 != -1)
5489 gen_set_label(l1);
5490 #endif
5492 if (dual) {
5493 /* Dual accumulate variant. */
5494 acc = (ext >> 2) & 3;
5495 /* Restore the overflow flag from the multiplier. */
5496 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5497 #if 0
5498 /* Disabled because conditional branches clobber temporary vars. */
5499 if ((s->env->macsr & MACSR_OMC) != 0) {
5500 /* Skip the accumulate if the value is already saturated. */
5501 l1 = gen_new_label();
5502 tmp = tcg_temp_new();
5503 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5504 gen_op_jmp_nz32(tmp, l1);
5506 #endif
5507 if (ext & 2)
5508 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5509 else
5510 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5511 if (s->env->macsr & MACSR_FI)
5512 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5513 else if (s->env->macsr & MACSR_SU)
5514 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5515 else
5516 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5517 #if 0
5518 /* Disabled because conditional branches clobber temporary vars. */
5519 if (l1 != -1)
5520 gen_set_label(l1);
5521 #endif
5523 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5525 if (insn & 0x30) {
5526 TCGv rw;
5527 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5528 tcg_gen_mov_i32(rw, loadval);
5529 /* FIXME: Should address writeback happen with the masked or
5530 unmasked value? */
5531 switch ((insn >> 3) & 7) {
5532 case 3: /* Post-increment. */
5533 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5534 break;
5535 case 4: /* Pre-decrement. */
5536 tcg_gen_mov_i32(AREG(insn, 0), addr);
5541 DISAS_INSN(from_mac)
5543 TCGv rx;
5544 TCGv_i64 acc;
5545 int accnum;
5547 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5548 accnum = (insn >> 9) & 3;
5549 acc = MACREG(accnum);
5550 if (s->env->macsr & MACSR_FI) {
5551 gen_helper_get_macf(rx, cpu_env, acc);
5552 } else if ((s->env->macsr & MACSR_OMC) == 0) {
5553 tcg_gen_extrl_i64_i32(rx, acc);
5554 } else if (s->env->macsr & MACSR_SU) {
5555 gen_helper_get_macs(rx, acc);
5556 } else {
5557 gen_helper_get_macu(rx, acc);
5559 if (insn & 0x40) {
5560 tcg_gen_movi_i64(acc, 0);
5561 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5565 DISAS_INSN(move_mac)
5567 /* FIXME: This can be done without a helper. */
5568 int src;
5569 TCGv dest;
5570 src = insn & 3;
5571 dest = tcg_const_i32((insn >> 9) & 3);
5572 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5573 gen_mac_clear_flags();
5574 gen_helper_mac_set_flags(cpu_env, dest);
5577 DISAS_INSN(from_macsr)
5579 TCGv reg;
5581 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5582 tcg_gen_mov_i32(reg, QREG_MACSR);
5585 DISAS_INSN(from_mask)
5587 TCGv reg;
5588 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5589 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5592 DISAS_INSN(from_mext)
5594 TCGv reg;
5595 TCGv acc;
5596 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5597 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5598 if (s->env->macsr & MACSR_FI)
5599 gen_helper_get_mac_extf(reg, cpu_env, acc);
5600 else
5601 gen_helper_get_mac_exti(reg, cpu_env, acc);
5604 DISAS_INSN(macsr_to_ccr)
5606 TCGv tmp = tcg_temp_new();
5607 tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5608 gen_helper_set_sr(cpu_env, tmp);
5609 tcg_temp_free(tmp);
5610 set_cc_op(s, CC_OP_FLAGS);
5613 DISAS_INSN(to_mac)
5615 TCGv_i64 acc;
5616 TCGv val;
5617 int accnum;
5618 accnum = (insn >> 9) & 3;
5619 acc = MACREG(accnum);
5620 SRC_EA(env, val, OS_LONG, 0, NULL);
5621 if (s->env->macsr & MACSR_FI) {
5622 tcg_gen_ext_i32_i64(acc, val);
5623 tcg_gen_shli_i64(acc, acc, 8);
5624 } else if (s->env->macsr & MACSR_SU) {
5625 tcg_gen_ext_i32_i64(acc, val);
5626 } else {
5627 tcg_gen_extu_i32_i64(acc, val);
5629 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5630 gen_mac_clear_flags();
5631 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5634 DISAS_INSN(to_macsr)
5636 TCGv val;
5637 SRC_EA(env, val, OS_LONG, 0, NULL);
5638 gen_helper_set_macsr(cpu_env, val);
5639 gen_lookup_tb(s);
5642 DISAS_INSN(to_mask)
5644 TCGv val;
5645 SRC_EA(env, val, OS_LONG, 0, NULL);
5646 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5649 DISAS_INSN(to_mext)
5651 TCGv val;
5652 TCGv acc;
5653 SRC_EA(env, val, OS_LONG, 0, NULL);
5654 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5655 if (s->env->macsr & MACSR_FI)
5656 gen_helper_set_mac_extf(cpu_env, val, acc);
5657 else if (s->env->macsr & MACSR_SU)
5658 gen_helper_set_mac_exts(cpu_env, val, acc);
5659 else
5660 gen_helper_set_mac_extu(cpu_env, val, acc);
5663 static disas_proc opcode_table[65536];
5665 static void
5666 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5668 int i;
5669 int from;
5670 int to;
5672 /* Sanity check. All set bits must be included in the mask. */
5673 if (opcode & ~mask) {
5674 fprintf(stderr,
5675 "qemu internal error: bogus opcode definition %04x/%04x\n",
5676 opcode, mask);
5677 abort();
5679 /* This could probably be cleverer. For now just optimize the case where
5680 the top bits are known. */
5681 /* Find the first zero bit in the mask. */
5682 i = 0x8000;
5683 while ((i & mask) != 0)
5684 i >>= 1;
5685 /* Iterate over all combinations of this and lower bits. */
5686 if (i == 0)
5687 i = 1;
5688 else
5689 i <<= 1;
5690 from = opcode & ~(i - 1);
5691 to = from + i;
5692 for (i = from; i < to; i++) {
5693 if ((i & mask) == opcode)
5694 opcode_table[i] = proc;
5698 /* Register m68k opcode handlers. Order is important.
5699 Later insn override earlier ones. */
5700 void register_m68k_insns (CPUM68KState *env)
5702 /* Build the opcode table only once to avoid
5703 multithreading issues. */
5704 if (opcode_table[0] != NULL) {
5705 return;
5708 /* use BASE() for instruction available
5709 * for CF_ISA_A and M68000.
5711 #define BASE(name, opcode, mask) \
5712 register_opcode(disas_##name, 0x##opcode, 0x##mask)
5713 #define INSN(name, opcode, mask, feature) do { \
5714 if (m68k_feature(env, M68K_FEATURE_##feature)) \
5715 BASE(name, opcode, mask); \
5716 } while(0)
5717 BASE(undef, 0000, 0000);
5718 INSN(arith_im, 0080, fff8, CF_ISA_A);
5719 INSN(arith_im, 0000, ff00, M68000);
5720 INSN(chk2, 00c0, f9c0, CHK2);
5721 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
5722 BASE(bitop_reg, 0100, f1c0);
5723 BASE(bitop_reg, 0140, f1c0);
5724 BASE(bitop_reg, 0180, f1c0);
5725 BASE(bitop_reg, 01c0, f1c0);
5726 INSN(movep, 0108, f138, MOVEP);
5727 INSN(arith_im, 0280, fff8, CF_ISA_A);
5728 INSN(arith_im, 0200, ff00, M68000);
5729 INSN(undef, 02c0, ffc0, M68000);
5730 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
5731 INSN(arith_im, 0480, fff8, CF_ISA_A);
5732 INSN(arith_im, 0400, ff00, M68000);
5733 INSN(undef, 04c0, ffc0, M68000);
5734 INSN(arith_im, 0600, ff00, M68000);
5735 INSN(undef, 06c0, ffc0, M68000);
5736 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
5737 INSN(arith_im, 0680, fff8, CF_ISA_A);
5738 INSN(arith_im, 0c00, ff38, CF_ISA_A);
5739 INSN(arith_im, 0c00, ff00, M68000);
5740 BASE(bitop_im, 0800, ffc0);
5741 BASE(bitop_im, 0840, ffc0);
5742 BASE(bitop_im, 0880, ffc0);
5743 BASE(bitop_im, 08c0, ffc0);
5744 INSN(arith_im, 0a80, fff8, CF_ISA_A);
5745 INSN(arith_im, 0a00, ff00, M68000);
5746 #if defined(CONFIG_SOFTMMU)
5747 INSN(moves, 0e00, ff00, M68000);
5748 #endif
5749 INSN(cas, 0ac0, ffc0, CAS);
5750 INSN(cas, 0cc0, ffc0, CAS);
5751 INSN(cas, 0ec0, ffc0, CAS);
5752 INSN(cas2w, 0cfc, ffff, CAS);
5753 INSN(cas2l, 0efc, ffff, CAS);
5754 BASE(move, 1000, f000);
5755 BASE(move, 2000, f000);
5756 BASE(move, 3000, f000);
5757 INSN(chk, 4000, f040, M68000);
5758 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
5759 INSN(negx, 4080, fff8, CF_ISA_A);
5760 INSN(negx, 4000, ff00, M68000);
5761 INSN(undef, 40c0, ffc0, M68000);
5762 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5763 INSN(move_from_sr, 40c0, ffc0, M68000);
5764 BASE(lea, 41c0, f1c0);
5765 BASE(clr, 4200, ff00);
5766 BASE(undef, 42c0, ffc0);
5767 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5768 INSN(move_from_ccr, 42c0, ffc0, M68000);
5769 INSN(neg, 4480, fff8, CF_ISA_A);
5770 INSN(neg, 4400, ff00, M68000);
5771 INSN(undef, 44c0, ffc0, M68000);
5772 BASE(move_to_ccr, 44c0, ffc0);
5773 INSN(not, 4680, fff8, CF_ISA_A);
5774 INSN(not, 4600, ff00, M68000);
5775 #if defined(CONFIG_SOFTMMU)
5776 BASE(move_to_sr, 46c0, ffc0);
5777 #endif
5778 INSN(nbcd, 4800, ffc0, M68000);
5779 INSN(linkl, 4808, fff8, M68000);
5780 BASE(pea, 4840, ffc0);
5781 BASE(swap, 4840, fff8);
5782 INSN(bkpt, 4848, fff8, BKPT);
5783 INSN(movem, 48d0, fbf8, CF_ISA_A);
5784 INSN(movem, 48e8, fbf8, CF_ISA_A);
5785 INSN(movem, 4880, fb80, M68000);
5786 BASE(ext, 4880, fff8);
5787 BASE(ext, 48c0, fff8);
5788 BASE(ext, 49c0, fff8);
5789 BASE(tst, 4a00, ff00);
5790 INSN(tas, 4ac0, ffc0, CF_ISA_B);
5791 INSN(tas, 4ac0, ffc0, M68000);
5792 #if defined(CONFIG_SOFTMMU)
5793 INSN(halt, 4ac8, ffff, CF_ISA_A);
5794 #endif
5795 INSN(pulse, 4acc, ffff, CF_ISA_A);
5796 BASE(illegal, 4afc, ffff);
5797 INSN(mull, 4c00, ffc0, CF_ISA_A);
5798 INSN(mull, 4c00, ffc0, LONG_MULDIV);
5799 INSN(divl, 4c40, ffc0, CF_ISA_A);
5800 INSN(divl, 4c40, ffc0, LONG_MULDIV);
5801 INSN(sats, 4c80, fff8, CF_ISA_B);
5802 BASE(trap, 4e40, fff0);
5803 BASE(link, 4e50, fff8);
5804 BASE(unlk, 4e58, fff8);
5805 #if defined(CONFIG_SOFTMMU)
5806 INSN(move_to_usp, 4e60, fff8, USP);
5807 INSN(move_from_usp, 4e68, fff8, USP);
5808 INSN(reset, 4e70, ffff, M68000);
5809 BASE(stop, 4e72, ffff);
5810 BASE(rte, 4e73, ffff);
5811 INSN(cf_movec, 4e7b, ffff, CF_ISA_A);
5812 INSN(m68k_movec, 4e7a, fffe, M68000);
5813 #endif
5814 BASE(nop, 4e71, ffff);
5815 INSN(rtd, 4e74, ffff, RTD);
5816 BASE(rts, 4e75, ffff);
5817 BASE(jump, 4e80, ffc0);
5818 BASE(jump, 4ec0, ffc0);
5819 INSN(addsubq, 5000, f080, M68000);
5820 BASE(addsubq, 5080, f0c0);
5821 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
5822 INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */
5823 INSN(dbcc, 50c8, f0f8, M68000);
5824 INSN(tpf, 51f8, fff8, CF_ISA_A);
5826 /* Branch instructions. */
5827 BASE(branch, 6000, f000);
5828 /* Disable long branch instructions, then add back the ones we want. */
5829 BASE(undef, 60ff, f0ff); /* All long branches. */
5830 INSN(branch, 60ff, f0ff, CF_ISA_B);
5831 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
5832 INSN(branch, 60ff, ffff, BRAL);
5833 INSN(branch, 60ff, f0ff, BCCL);
5835 BASE(moveq, 7000, f100);
5836 INSN(mvzs, 7100, f100, CF_ISA_B);
5837 BASE(or, 8000, f000);
5838 BASE(divw, 80c0, f0c0);
5839 INSN(sbcd_reg, 8100, f1f8, M68000);
5840 INSN(sbcd_mem, 8108, f1f8, M68000);
5841 BASE(addsub, 9000, f000);
5842 INSN(undef, 90c0, f0c0, CF_ISA_A);
5843 INSN(subx_reg, 9180, f1f8, CF_ISA_A);
5844 INSN(subx_reg, 9100, f138, M68000);
5845 INSN(subx_mem, 9108, f138, M68000);
5846 INSN(suba, 91c0, f1c0, CF_ISA_A);
5847 INSN(suba, 90c0, f0c0, M68000);
5849 BASE(undef_mac, a000, f000);
5850 INSN(mac, a000, f100, CF_EMAC);
5851 INSN(from_mac, a180, f9b0, CF_EMAC);
5852 INSN(move_mac, a110, f9fc, CF_EMAC);
5853 INSN(from_macsr,a980, f9f0, CF_EMAC);
5854 INSN(from_mask, ad80, fff0, CF_EMAC);
5855 INSN(from_mext, ab80, fbf0, CF_EMAC);
5856 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5857 INSN(to_mac, a100, f9c0, CF_EMAC);
5858 INSN(to_macsr, a900, ffc0, CF_EMAC);
5859 INSN(to_mext, ab00, fbc0, CF_EMAC);
5860 INSN(to_mask, ad00, ffc0, CF_EMAC);
5862 INSN(mov3q, a140, f1c0, CF_ISA_B);
5863 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
5864 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
5865 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5866 INSN(cmp, b080, f1c0, CF_ISA_A);
5867 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
5868 INSN(cmp, b000, f100, M68000);
5869 INSN(eor, b100, f100, M68000);
5870 INSN(cmpm, b108, f138, M68000);
5871 INSN(cmpa, b0c0, f0c0, M68000);
5872 INSN(eor, b180, f1c0, CF_ISA_A);
5873 BASE(and, c000, f000);
5874 INSN(exg_dd, c140, f1f8, M68000);
5875 INSN(exg_aa, c148, f1f8, M68000);
5876 INSN(exg_da, c188, f1f8, M68000);
5877 BASE(mulw, c0c0, f0c0);
5878 INSN(abcd_reg, c100, f1f8, M68000);
5879 INSN(abcd_mem, c108, f1f8, M68000);
5880 BASE(addsub, d000, f000);
5881 INSN(undef, d0c0, f0c0, CF_ISA_A);
5882 INSN(addx_reg, d180, f1f8, CF_ISA_A);
5883 INSN(addx_reg, d100, f138, M68000);
5884 INSN(addx_mem, d108, f138, M68000);
5885 INSN(adda, d1c0, f1c0, CF_ISA_A);
5886 INSN(adda, d0c0, f0c0, M68000);
5887 INSN(shift_im, e080, f0f0, CF_ISA_A);
5888 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5889 INSN(shift8_im, e000, f0f0, M68000);
5890 INSN(shift16_im, e040, f0f0, M68000);
5891 INSN(shift_im, e080, f0f0, M68000);
5892 INSN(shift8_reg, e020, f0f0, M68000);
5893 INSN(shift16_reg, e060, f0f0, M68000);
5894 INSN(shift_reg, e0a0, f0f0, M68000);
5895 INSN(shift_mem, e0c0, fcc0, M68000);
5896 INSN(rotate_im, e090, f0f0, M68000);
5897 INSN(rotate8_im, e010, f0f0, M68000);
5898 INSN(rotate16_im, e050, f0f0, M68000);
5899 INSN(rotate_reg, e0b0, f0f0, M68000);
5900 INSN(rotate8_reg, e030, f0f0, M68000);
5901 INSN(rotate16_reg, e070, f0f0, M68000);
5902 INSN(rotate_mem, e4c0, fcc0, M68000);
5903 INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
5904 INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5905 INSN(bfins_mem, efc0, ffc0, BITFIELD);
5906 INSN(bfins_reg, efc0, fff8, BITFIELD);
5907 INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
5908 INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
5909 INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
5910 INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
5911 INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
5912 INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
5913 INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
5914 INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
5915 INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
5916 INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
5917 BASE(undef_fpu, f000, f000);
5918 INSN(fpu, f200, ffc0, CF_FPU);
5919 INSN(fbcc, f280, ffc0, CF_FPU);
5920 INSN(fpu, f200, ffc0, FPU);
5921 INSN(fscc, f240, ffc0, FPU);
5922 INSN(fbcc, f280, ff80, FPU);
5923 #if defined(CONFIG_SOFTMMU)
5924 INSN(frestore, f340, ffc0, CF_FPU);
5925 INSN(fsave, f300, ffc0, CF_FPU);
5926 INSN(frestore, f340, ffc0, FPU);
5927 INSN(fsave, f300, ffc0, FPU);
5928 INSN(intouch, f340, ffc0, CF_ISA_A);
5929 INSN(cpushl, f428, ff38, CF_ISA_A);
5930 INSN(cpush, f420, ff20, M68040);
5931 INSN(cinv, f400, ff20, M68040);
5932 INSN(pflush, f500, ffe0, M68040);
5933 INSN(ptest, f548, ffd8, M68040);
5934 INSN(wddata, fb00, ff00, CF_ISA_A);
5935 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
5936 #endif
5937 INSN(move16_mem, f600, ffe0, M68040);
5938 INSN(move16_reg, f620, fff8, M68040);
5939 #undef INSN
5942 /* ??? Some of this implementation is not exception safe. We should always
5943 write back the result to memory before setting the condition codes. */
5944 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
5946 uint16_t insn = read_im16(env, s);
5947 opcode_table[insn](env, s, insn);
5948 do_writebacks(s);
5951 /* generate intermediate code for basic block 'tb'. */
5952 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
5954 CPUM68KState *env = cs->env_ptr;
5955 DisasContext dc1, *dc = &dc1;
5956 target_ulong pc_start;
5957 int pc_offset;
5958 int num_insns;
5959 int max_insns;
5961 /* generate intermediate code */
5962 pc_start = tb->pc;
5964 dc->tb = tb;
5966 dc->env = env;
5967 dc->is_jmp = DISAS_NEXT;
5968 dc->pc = pc_start;
5969 dc->cc_op = CC_OP_DYNAMIC;
5970 dc->cc_op_synced = 1;
5971 dc->singlestep_enabled = cs->singlestep_enabled;
5972 dc->done_mac = 0;
5973 dc->writeback_mask = 0;
5974 num_insns = 0;
5975 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
5976 if (max_insns == 0) {
5977 max_insns = CF_COUNT_MASK;
5979 if (max_insns > TCG_MAX_INSNS) {
5980 max_insns = TCG_MAX_INSNS;
5983 gen_tb_start(tb);
5984 do {
5985 pc_offset = dc->pc - pc_start;
5986 tcg_gen_insn_start(dc->pc, dc->cc_op);
5987 num_insns++;
5989 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5990 gen_exception(dc, dc->pc, EXCP_DEBUG);
5991 dc->is_jmp = DISAS_JUMP;
5992 /* The address covered by the breakpoint must be included in
5993 [tb->pc, tb->pc + tb->size) in order to for it to be
5994 properly cleared -- thus we increment the PC here so that
5995 the logic setting tb->size below does the right thing. */
5996 dc->pc += 2;
5997 break;
6000 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
6001 gen_io_start();
6004 dc->insn_pc = dc->pc;
6005 disas_m68k_insn(env, dc);
6006 } while (!dc->is_jmp && !tcg_op_buf_full() &&
6007 !cs->singlestep_enabled &&
6008 !singlestep &&
6009 (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
6010 num_insns < max_insns);
6012 if (tb_cflags(tb) & CF_LAST_IO)
6013 gen_io_end();
6014 if (unlikely(cs->singlestep_enabled)) {
6015 /* Make sure the pc is updated, and raise a debug exception. */
6016 if (!dc->is_jmp) {
6017 update_cc_op(dc);
6018 tcg_gen_movi_i32(QREG_PC, dc->pc);
6020 gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
6021 } else {
6022 switch(dc->is_jmp) {
6023 case DISAS_NEXT:
6024 update_cc_op(dc);
6025 gen_jmp_tb(dc, 0, dc->pc);
6026 break;
6027 default:
6028 case DISAS_JUMP:
6029 case DISAS_UPDATE:
6030 update_cc_op(dc);
6031 /* indicate that the hash table must be used to find the next TB */
6032 tcg_gen_exit_tb(0);
6033 break;
6034 case DISAS_TB_JUMP:
6035 /* nothing more to generate */
6036 break;
6039 gen_tb_end(tb, num_insns);
6041 #ifdef DEBUG_DISAS
6042 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
6043 && qemu_log_in_addr_range(pc_start)) {
6044 qemu_log_lock();
6045 qemu_log("----------------\n");
6046 qemu_log("IN: %s\n", lookup_symbol(pc_start));
6047 log_target_disas(cs, pc_start, dc->pc - pc_start);
6048 qemu_log("\n");
6049 qemu_log_unlock();
6051 #endif
6052 tb->size = dc->pc - pc_start;
6053 tb->icount = num_insns;
6056 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6058 floatx80 a = { .high = high, .low = low };
6059 union {
6060 float64 f64;
6061 double d;
6062 } u;
6064 u.f64 = floatx80_to_float64(a, &env->fp_status);
6065 return u.d;
6068 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
6069 int flags)
6071 M68kCPU *cpu = M68K_CPU(cs);
6072 CPUM68KState *env = &cpu->env;
6073 int i;
6074 uint16_t sr;
6075 for (i = 0; i < 8; i++) {
6076 cpu_fprintf(f, "D%d = %08x A%d = %08x "
6077 "F%d = %04x %016"PRIx64" (%12g)\n",
6078 i, env->dregs[i], i, env->aregs[i],
6079 i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6080 floatx80_to_double(env, env->fregs[i].l.upper,
6081 env->fregs[i].l.lower));
6083 cpu_fprintf (f, "PC = %08x ", env->pc);
6084 sr = env->sr | cpu_m68k_get_ccr(env);
6085 cpu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6086 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6087 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6088 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6089 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6090 (sr & CCF_C) ? 'C' : '-');
6091 cpu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6092 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6093 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6094 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6095 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6096 cpu_fprintf(f, "\n "
6097 "FPCR = %04x ", env->fpcr);
6098 switch (env->fpcr & FPCR_PREC_MASK) {
6099 case FPCR_PREC_X:
6100 cpu_fprintf(f, "X ");
6101 break;
6102 case FPCR_PREC_S:
6103 cpu_fprintf(f, "S ");
6104 break;
6105 case FPCR_PREC_D:
6106 cpu_fprintf(f, "D ");
6107 break;
6109 switch (env->fpcr & FPCR_RND_MASK) {
6110 case FPCR_RND_N:
6111 cpu_fprintf(f, "RN ");
6112 break;
6113 case FPCR_RND_Z:
6114 cpu_fprintf(f, "RZ ");
6115 break;
6116 case FPCR_RND_M:
6117 cpu_fprintf(f, "RM ");
6118 break;
6119 case FPCR_RND_P:
6120 cpu_fprintf(f, "RP ");
6121 break;
6123 cpu_fprintf(f, "\n");
6124 #ifdef CONFIG_SOFTMMU
6125 cpu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6126 env->current_sp == M68K_SSP ? "->" : " ", env->sp[M68K_SSP],
6127 env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP],
6128 env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]);
6129 cpu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6130 cpu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6131 cpu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6132 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6133 cpu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6134 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6135 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6136 cpu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6137 env->mmu.mmusr, env->mmu.ar);
6138 #endif
6141 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
6142 target_ulong *data)
6144 int cc_op = data[1];
6145 env->pc = data[0];
6146 if (cc_op != CC_OP_DYNAMIC) {
6147 env->cc_op = cc_op;