crypto: Do not fail for EINTR during qcrypto_random_bytes
[qemu/ar7.git] / target / m68k / translate.c
blobf0534a4ba0c3af6dee771fc4fd1d03fbe5a7d658
1 /*
2 * m68k translation
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/translator.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
34 #include "trace-tcg.h"
35 #include "exec/log.h"
36 #include "fpu/softfloat.h"
39 //#define DEBUG_DISPATCH 1
41 #define DEFO32(name, offset) static TCGv QREG_##name;
42 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
43 #include "qregs.def"
44 #undef DEFO32
45 #undef DEFO64
47 static TCGv_i32 cpu_halted;
48 static TCGv_i32 cpu_exception_index;
50 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
51 static TCGv cpu_dregs[8];
52 static TCGv cpu_aregs[8];
53 static TCGv_i64 cpu_macc[4];
55 #define REG(insn, pos) (((insn) >> (pos)) & 7)
56 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
57 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
58 #define MACREG(acc) cpu_macc[acc]
59 #define QREG_SP get_areg(s, 7)
61 static TCGv NULL_QREG;
62 #define IS_NULL_QREG(t) (t == NULL_QREG)
63 /* Used to distinguish stores from bad addressing modes. */
64 static TCGv store_dummy;
66 #include "exec/gen-icount.h"
68 void m68k_tcg_init(void)
70 char *p;
71 int i;
73 #define DEFO32(name, offset) \
74 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
75 offsetof(CPUM68KState, offset), #name);
76 #define DEFO64(name, offset) \
77 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
78 offsetof(CPUM68KState, offset), #name);
79 #include "qregs.def"
80 #undef DEFO32
81 #undef DEFO64
83 cpu_halted = tcg_global_mem_new_i32(cpu_env,
84 -offsetof(M68kCPU, env) +
85 offsetof(CPUState, halted), "HALTED");
86 cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
87 -offsetof(M68kCPU, env) +
88 offsetof(CPUState, exception_index),
89 "EXCEPTION");
91 p = cpu_reg_names;
92 for (i = 0; i < 8; i++) {
93 sprintf(p, "D%d", i);
94 cpu_dregs[i] = tcg_global_mem_new(cpu_env,
95 offsetof(CPUM68KState, dregs[i]), p);
96 p += 3;
97 sprintf(p, "A%d", i);
98 cpu_aregs[i] = tcg_global_mem_new(cpu_env,
99 offsetof(CPUM68KState, aregs[i]), p);
100 p += 3;
102 for (i = 0; i < 4; i++) {
103 sprintf(p, "ACC%d", i);
104 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
105 offsetof(CPUM68KState, macc[i]), p);
106 p += 5;
109 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
110 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
113 /* internal defines */
114 typedef struct DisasContext {
115 DisasContextBase base;
116 CPUM68KState *env;
117 target_ulong pc;
118 CCOp cc_op; /* Current CC operation */
119 int cc_op_synced;
120 TCGv_i64 mactmp;
121 int done_mac;
122 int writeback_mask;
123 TCGv writeback[8];
124 #define MAX_TO_RELEASE 8
125 int release_count;
126 TCGv release[MAX_TO_RELEASE];
127 } DisasContext;
129 static void init_release_array(DisasContext *s)
131 #ifdef CONFIG_DEBUG_TCG
132 memset(s->release, 0, sizeof(s->release));
133 #endif
134 s->release_count = 0;
137 static void do_release(DisasContext *s)
139 int i;
140 for (i = 0; i < s->release_count; i++) {
141 tcg_temp_free(s->release[i]);
143 init_release_array(s);
146 static TCGv mark_to_release(DisasContext *s, TCGv tmp)
148 g_assert(s->release_count < MAX_TO_RELEASE);
149 return s->release[s->release_count++] = tmp;
152 static TCGv get_areg(DisasContext *s, unsigned regno)
154 if (s->writeback_mask & (1 << regno)) {
155 return s->writeback[regno];
156 } else {
157 return cpu_aregs[regno];
161 static void delay_set_areg(DisasContext *s, unsigned regno,
162 TCGv val, bool give_temp)
164 if (s->writeback_mask & (1 << regno)) {
165 if (give_temp) {
166 tcg_temp_free(s->writeback[regno]);
167 s->writeback[regno] = val;
168 } else {
169 tcg_gen_mov_i32(s->writeback[regno], val);
171 } else {
172 s->writeback_mask |= 1 << regno;
173 if (give_temp) {
174 s->writeback[regno] = val;
175 } else {
176 TCGv tmp = tcg_temp_new();
177 s->writeback[regno] = tmp;
178 tcg_gen_mov_i32(tmp, val);
183 static void do_writebacks(DisasContext *s)
185 unsigned mask = s->writeback_mask;
186 if (mask) {
187 s->writeback_mask = 0;
188 do {
189 unsigned regno = ctz32(mask);
190 tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
191 tcg_temp_free(s->writeback[regno]);
192 mask &= mask - 1;
193 } while (mask);
197 /* is_jmp field values */
198 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
199 #define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
201 #if defined(CONFIG_USER_ONLY)
202 #define IS_USER(s) 1
203 #else
204 #define IS_USER(s) (!(s->base.tb->flags & TB_FLAGS_MSR_S))
205 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
206 MMU_KERNEL_IDX : MMU_USER_IDX)
207 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
208 MMU_KERNEL_IDX : MMU_USER_IDX)
209 #endif
211 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
213 #ifdef DEBUG_DISPATCH
214 #define DISAS_INSN(name) \
215 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
216 uint16_t insn); \
217 static void disas_##name(CPUM68KState *env, DisasContext *s, \
218 uint16_t insn) \
220 qemu_log("Dispatch " #name "\n"); \
221 real_disas_##name(env, s, insn); \
223 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
224 uint16_t insn)
225 #else
226 #define DISAS_INSN(name) \
227 static void disas_##name(CPUM68KState *env, DisasContext *s, \
228 uint16_t insn)
229 #endif
231 static const uint8_t cc_op_live[CC_OP_NB] = {
232 [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
233 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
234 [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
235 [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
236 [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
237 [CC_OP_LOGIC] = CCF_X | CCF_N
240 static void set_cc_op(DisasContext *s, CCOp op)
242 CCOp old_op = s->cc_op;
243 int dead;
245 if (old_op == op) {
246 return;
248 s->cc_op = op;
249 s->cc_op_synced = 0;
251 /* Discard CC computation that will no longer be used.
252 Note that X and N are never dead. */
253 dead = cc_op_live[old_op] & ~cc_op_live[op];
254 if (dead & CCF_C) {
255 tcg_gen_discard_i32(QREG_CC_C);
257 if (dead & CCF_Z) {
258 tcg_gen_discard_i32(QREG_CC_Z);
260 if (dead & CCF_V) {
261 tcg_gen_discard_i32(QREG_CC_V);
265 /* Update the CPU env CC_OP state. */
266 static void update_cc_op(DisasContext *s)
268 if (!s->cc_op_synced) {
269 s->cc_op_synced = 1;
270 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
274 /* Generate a jump to an immediate address. */
275 static void gen_jmp_im(DisasContext *s, uint32_t dest)
277 update_cc_op(s);
278 tcg_gen_movi_i32(QREG_PC, dest);
279 s->base.is_jmp = DISAS_JUMP;
282 /* Generate a jump to the address in qreg DEST. */
283 static void gen_jmp(DisasContext *s, TCGv dest)
285 update_cc_op(s);
286 tcg_gen_mov_i32(QREG_PC, dest);
287 s->base.is_jmp = DISAS_JUMP;
290 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
292 TCGv_i32 tmp;
294 update_cc_op(s);
295 tcg_gen_movi_i32(QREG_PC, dest);
297 tmp = tcg_const_i32(nr);
298 gen_helper_raise_exception(cpu_env, tmp);
299 tcg_temp_free_i32(tmp);
301 s->base.is_jmp = DISAS_NORETURN;
304 static inline void gen_addr_fault(DisasContext *s)
306 gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
309 /* Generate a load from the specified address. Narrow values are
310 sign extended to full register width. */
311 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
312 int sign, int index)
314 TCGv tmp;
315 tmp = tcg_temp_new_i32();
316 switch(opsize) {
317 case OS_BYTE:
318 if (sign)
319 tcg_gen_qemu_ld8s(tmp, addr, index);
320 else
321 tcg_gen_qemu_ld8u(tmp, addr, index);
322 break;
323 case OS_WORD:
324 if (sign)
325 tcg_gen_qemu_ld16s(tmp, addr, index);
326 else
327 tcg_gen_qemu_ld16u(tmp, addr, index);
328 break;
329 case OS_LONG:
330 tcg_gen_qemu_ld32u(tmp, addr, index);
331 break;
332 default:
333 g_assert_not_reached();
335 return tmp;
338 /* Generate a store. */
339 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
340 int index)
342 switch(opsize) {
343 case OS_BYTE:
344 tcg_gen_qemu_st8(val, addr, index);
345 break;
346 case OS_WORD:
347 tcg_gen_qemu_st16(val, addr, index);
348 break;
349 case OS_LONG:
350 tcg_gen_qemu_st32(val, addr, index);
351 break;
352 default:
353 g_assert_not_reached();
357 typedef enum {
358 EA_STORE,
359 EA_LOADU,
360 EA_LOADS
361 } ea_what;
363 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
364 otherwise generate a store. */
365 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
366 ea_what what, int index)
368 if (what == EA_STORE) {
369 gen_store(s, opsize, addr, val, index);
370 return store_dummy;
371 } else {
372 return mark_to_release(s, gen_load(s, opsize, addr,
373 what == EA_LOADS, index));
377 /* Read a 16-bit immediate constant */
378 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
380 uint16_t im;
381 im = cpu_lduw_code(env, s->pc);
382 s->pc += 2;
383 return im;
386 /* Read an 8-bit immediate constant */
387 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
389 return read_im16(env, s);
392 /* Read a 32-bit immediate constant. */
393 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
395 uint32_t im;
396 im = read_im16(env, s) << 16;
397 im |= 0xffff & read_im16(env, s);
398 return im;
401 /* Read a 64-bit immediate constant. */
402 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
404 uint64_t im;
405 im = (uint64_t)read_im32(env, s) << 32;
406 im |= (uint64_t)read_im32(env, s);
407 return im;
410 /* Calculate and address index. */
411 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
413 TCGv add;
414 int scale;
416 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
417 if ((ext & 0x800) == 0) {
418 tcg_gen_ext16s_i32(tmp, add);
419 add = tmp;
421 scale = (ext >> 9) & 3;
422 if (scale != 0) {
423 tcg_gen_shli_i32(tmp, add, scale);
424 add = tmp;
426 return add;
429 /* Handle a base + index + displacement effective addresss.
430 A NULL_QREG base means pc-relative. */
431 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
433 uint32_t offset;
434 uint16_t ext;
435 TCGv add;
436 TCGv tmp;
437 uint32_t bd, od;
439 offset = s->pc;
440 ext = read_im16(env, s);
442 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
443 return NULL_QREG;
445 if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
446 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
447 ext &= ~(3 << 9);
450 if (ext & 0x100) {
451 /* full extension word format */
452 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
453 return NULL_QREG;
455 if ((ext & 0x30) > 0x10) {
456 /* base displacement */
457 if ((ext & 0x30) == 0x20) {
458 bd = (int16_t)read_im16(env, s);
459 } else {
460 bd = read_im32(env, s);
462 } else {
463 bd = 0;
465 tmp = mark_to_release(s, tcg_temp_new());
466 if ((ext & 0x44) == 0) {
467 /* pre-index */
468 add = gen_addr_index(s, ext, tmp);
469 } else {
470 add = NULL_QREG;
472 if ((ext & 0x80) == 0) {
473 /* base not suppressed */
474 if (IS_NULL_QREG(base)) {
475 base = mark_to_release(s, tcg_const_i32(offset + bd));
476 bd = 0;
478 if (!IS_NULL_QREG(add)) {
479 tcg_gen_add_i32(tmp, add, base);
480 add = tmp;
481 } else {
482 add = base;
485 if (!IS_NULL_QREG(add)) {
486 if (bd != 0) {
487 tcg_gen_addi_i32(tmp, add, bd);
488 add = tmp;
490 } else {
491 add = mark_to_release(s, tcg_const_i32(bd));
493 if ((ext & 3) != 0) {
494 /* memory indirect */
495 base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s)));
496 if ((ext & 0x44) == 4) {
497 add = gen_addr_index(s, ext, tmp);
498 tcg_gen_add_i32(tmp, add, base);
499 add = tmp;
500 } else {
501 add = base;
503 if ((ext & 3) > 1) {
504 /* outer displacement */
505 if ((ext & 3) == 2) {
506 od = (int16_t)read_im16(env, s);
507 } else {
508 od = read_im32(env, s);
510 } else {
511 od = 0;
513 if (od != 0) {
514 tcg_gen_addi_i32(tmp, add, od);
515 add = tmp;
518 } else {
519 /* brief extension word format */
520 tmp = mark_to_release(s, tcg_temp_new());
521 add = gen_addr_index(s, ext, tmp);
522 if (!IS_NULL_QREG(base)) {
523 tcg_gen_add_i32(tmp, add, base);
524 if ((int8_t)ext)
525 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
526 } else {
527 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
529 add = tmp;
531 return add;
534 /* Sign or zero extend a value. */
536 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
538 switch (opsize) {
539 case OS_BYTE:
540 if (sign) {
541 tcg_gen_ext8s_i32(res, val);
542 } else {
543 tcg_gen_ext8u_i32(res, val);
545 break;
546 case OS_WORD:
547 if (sign) {
548 tcg_gen_ext16s_i32(res, val);
549 } else {
550 tcg_gen_ext16u_i32(res, val);
552 break;
553 case OS_LONG:
554 tcg_gen_mov_i32(res, val);
555 break;
556 default:
557 g_assert_not_reached();
561 /* Evaluate all the CC flags. */
563 static void gen_flush_flags(DisasContext *s)
565 TCGv t0, t1;
567 switch (s->cc_op) {
568 case CC_OP_FLAGS:
569 return;
571 case CC_OP_ADDB:
572 case CC_OP_ADDW:
573 case CC_OP_ADDL:
574 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
575 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
576 /* Compute signed overflow for addition. */
577 t0 = tcg_temp_new();
578 t1 = tcg_temp_new();
579 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
580 gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
581 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
582 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
583 tcg_temp_free(t0);
584 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
585 tcg_temp_free(t1);
586 break;
588 case CC_OP_SUBB:
589 case CC_OP_SUBW:
590 case CC_OP_SUBL:
591 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
592 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
593 /* Compute signed overflow for subtraction. */
594 t0 = tcg_temp_new();
595 t1 = tcg_temp_new();
596 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
597 gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
598 tcg_gen_xor_i32(t1, QREG_CC_N, t0);
599 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
600 tcg_temp_free(t0);
601 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
602 tcg_temp_free(t1);
603 break;
605 case CC_OP_CMPB:
606 case CC_OP_CMPW:
607 case CC_OP_CMPL:
608 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
609 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
610 gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
611 /* Compute signed overflow for subtraction. */
612 t0 = tcg_temp_new();
613 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
614 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
615 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
616 tcg_temp_free(t0);
617 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
618 break;
620 case CC_OP_LOGIC:
621 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
622 tcg_gen_movi_i32(QREG_CC_C, 0);
623 tcg_gen_movi_i32(QREG_CC_V, 0);
624 break;
626 case CC_OP_DYNAMIC:
627 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
628 s->cc_op_synced = 1;
629 break;
631 default:
632 t0 = tcg_const_i32(s->cc_op);
633 gen_helper_flush_flags(cpu_env, t0);
634 tcg_temp_free(t0);
635 s->cc_op_synced = 1;
636 break;
639 /* Note that flush_flags also assigned to env->cc_op. */
640 s->cc_op = CC_OP_FLAGS;
643 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
645 TCGv tmp;
647 if (opsize == OS_LONG) {
648 tmp = val;
649 } else {
650 tmp = mark_to_release(s, tcg_temp_new());
651 gen_ext(tmp, val, opsize, sign);
654 return tmp;
657 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
659 gen_ext(QREG_CC_N, val, opsize, 1);
660 set_cc_op(s, CC_OP_LOGIC);
663 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
665 tcg_gen_mov_i32(QREG_CC_N, dest);
666 tcg_gen_mov_i32(QREG_CC_V, src);
667 set_cc_op(s, CC_OP_CMPB + opsize);
670 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
672 gen_ext(QREG_CC_N, dest, opsize, 1);
673 tcg_gen_mov_i32(QREG_CC_V, src);
676 static inline int opsize_bytes(int opsize)
678 switch (opsize) {
679 case OS_BYTE: return 1;
680 case OS_WORD: return 2;
681 case OS_LONG: return 4;
682 case OS_SINGLE: return 4;
683 case OS_DOUBLE: return 8;
684 case OS_EXTENDED: return 12;
685 case OS_PACKED: return 12;
686 default:
687 g_assert_not_reached();
691 static inline int insn_opsize(int insn)
693 switch ((insn >> 6) & 3) {
694 case 0: return OS_BYTE;
695 case 1: return OS_WORD;
696 case 2: return OS_LONG;
697 default:
698 g_assert_not_reached();
702 static inline int ext_opsize(int ext, int pos)
704 switch ((ext >> pos) & 7) {
705 case 0: return OS_LONG;
706 case 1: return OS_SINGLE;
707 case 2: return OS_EXTENDED;
708 case 3: return OS_PACKED;
709 case 4: return OS_WORD;
710 case 5: return OS_DOUBLE;
711 case 6: return OS_BYTE;
712 default:
713 g_assert_not_reached();
717 /* Assign value to a register. If the width is less than the register width
718 only the low part of the register is set. */
719 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
721 TCGv tmp;
722 switch (opsize) {
723 case OS_BYTE:
724 tcg_gen_andi_i32(reg, reg, 0xffffff00);
725 tmp = tcg_temp_new();
726 tcg_gen_ext8u_i32(tmp, val);
727 tcg_gen_or_i32(reg, reg, tmp);
728 tcg_temp_free(tmp);
729 break;
730 case OS_WORD:
731 tcg_gen_andi_i32(reg, reg, 0xffff0000);
732 tmp = tcg_temp_new();
733 tcg_gen_ext16u_i32(tmp, val);
734 tcg_gen_or_i32(reg, reg, tmp);
735 tcg_temp_free(tmp);
736 break;
737 case OS_LONG:
738 case OS_SINGLE:
739 tcg_gen_mov_i32(reg, val);
740 break;
741 default:
742 g_assert_not_reached();
746 /* Generate code for an "effective address". Does not adjust the base
747 register for autoincrement addressing modes. */
748 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
749 int mode, int reg0, int opsize)
751 TCGv reg;
752 TCGv tmp;
753 uint16_t ext;
754 uint32_t offset;
756 switch (mode) {
757 case 0: /* Data register direct. */
758 case 1: /* Address register direct. */
759 return NULL_QREG;
760 case 3: /* Indirect postincrement. */
761 if (opsize == OS_UNSIZED) {
762 return NULL_QREG;
764 /* fallthru */
765 case 2: /* Indirect register */
766 return get_areg(s, reg0);
767 case 4: /* Indirect predecrememnt. */
768 if (opsize == OS_UNSIZED) {
769 return NULL_QREG;
771 reg = get_areg(s, reg0);
772 tmp = mark_to_release(s, tcg_temp_new());
773 if (reg0 == 7 && opsize == OS_BYTE &&
774 m68k_feature(s->env, M68K_FEATURE_M68000)) {
775 tcg_gen_subi_i32(tmp, reg, 2);
776 } else {
777 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
779 return tmp;
780 case 5: /* Indirect displacement. */
781 reg = get_areg(s, reg0);
782 tmp = mark_to_release(s, tcg_temp_new());
783 ext = read_im16(env, s);
784 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
785 return tmp;
786 case 6: /* Indirect index + displacement. */
787 reg = get_areg(s, reg0);
788 return gen_lea_indexed(env, s, reg);
789 case 7: /* Other */
790 switch (reg0) {
791 case 0: /* Absolute short. */
792 offset = (int16_t)read_im16(env, s);
793 return mark_to_release(s, tcg_const_i32(offset));
794 case 1: /* Absolute long. */
795 offset = read_im32(env, s);
796 return mark_to_release(s, tcg_const_i32(offset));
797 case 2: /* pc displacement */
798 offset = s->pc;
799 offset += (int16_t)read_im16(env, s);
800 return mark_to_release(s, tcg_const_i32(offset));
801 case 3: /* pc index+displacement. */
802 return gen_lea_indexed(env, s, NULL_QREG);
803 case 4: /* Immediate. */
804 default:
805 return NULL_QREG;
808 /* Should never happen. */
809 return NULL_QREG;
812 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
813 int opsize)
815 int mode = extract32(insn, 3, 3);
816 int reg0 = REG(insn, 0);
817 return gen_lea_mode(env, s, mode, reg0, opsize);
820 /* Generate code to load/store a value from/into an EA. If WHAT > 0 this is
821 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
822 ADDRP is non-null for readwrite operands. */
823 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
824 int opsize, TCGv val, TCGv *addrp, ea_what what,
825 int index)
827 TCGv reg, tmp, result;
828 int32_t offset;
830 switch (mode) {
831 case 0: /* Data register direct. */
832 reg = cpu_dregs[reg0];
833 if (what == EA_STORE) {
834 gen_partset_reg(opsize, reg, val);
835 return store_dummy;
836 } else {
837 return gen_extend(s, reg, opsize, what == EA_LOADS);
839 case 1: /* Address register direct. */
840 reg = get_areg(s, reg0);
841 if (what == EA_STORE) {
842 tcg_gen_mov_i32(reg, val);
843 return store_dummy;
844 } else {
845 return gen_extend(s, reg, opsize, what == EA_LOADS);
847 case 2: /* Indirect register */
848 reg = get_areg(s, reg0);
849 return gen_ldst(s, opsize, reg, val, what, index);
850 case 3: /* Indirect postincrement. */
851 reg = get_areg(s, reg0);
852 result = gen_ldst(s, opsize, reg, val, what, index);
853 if (what == EA_STORE || !addrp) {
854 TCGv tmp = tcg_temp_new();
855 if (reg0 == 7 && opsize == OS_BYTE &&
856 m68k_feature(s->env, M68K_FEATURE_M68000)) {
857 tcg_gen_addi_i32(tmp, reg, 2);
858 } else {
859 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
861 delay_set_areg(s, reg0, tmp, true);
863 return result;
864 case 4: /* Indirect predecrememnt. */
865 if (addrp && what == EA_STORE) {
866 tmp = *addrp;
867 } else {
868 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
869 if (IS_NULL_QREG(tmp)) {
870 return tmp;
872 if (addrp) {
873 *addrp = tmp;
876 result = gen_ldst(s, opsize, tmp, val, what, index);
877 if (what == EA_STORE || !addrp) {
878 delay_set_areg(s, reg0, tmp, false);
880 return result;
881 case 5: /* Indirect displacement. */
882 case 6: /* Indirect index + displacement. */
883 do_indirect:
884 if (addrp && what == EA_STORE) {
885 tmp = *addrp;
886 } else {
887 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
888 if (IS_NULL_QREG(tmp)) {
889 return tmp;
891 if (addrp) {
892 *addrp = tmp;
895 return gen_ldst(s, opsize, tmp, val, what, index);
896 case 7: /* Other */
897 switch (reg0) {
898 case 0: /* Absolute short. */
899 case 1: /* Absolute long. */
900 case 2: /* pc displacement */
901 case 3: /* pc index+displacement. */
902 goto do_indirect;
903 case 4: /* Immediate. */
904 /* Sign extend values for consistency. */
905 switch (opsize) {
906 case OS_BYTE:
907 if (what == EA_LOADS) {
908 offset = (int8_t)read_im8(env, s);
909 } else {
910 offset = read_im8(env, s);
912 break;
913 case OS_WORD:
914 if (what == EA_LOADS) {
915 offset = (int16_t)read_im16(env, s);
916 } else {
917 offset = read_im16(env, s);
919 break;
920 case OS_LONG:
921 offset = read_im32(env, s);
922 break;
923 default:
924 g_assert_not_reached();
926 return mark_to_release(s, tcg_const_i32(offset));
927 default:
928 return NULL_QREG;
931 /* Should never happen. */
932 return NULL_QREG;
935 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
936 int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
938 int mode = extract32(insn, 3, 3);
939 int reg0 = REG(insn, 0);
940 return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
943 static TCGv_ptr gen_fp_ptr(int freg)
945 TCGv_ptr fp = tcg_temp_new_ptr();
946 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
947 return fp;
950 static TCGv_ptr gen_fp_result_ptr(void)
952 TCGv_ptr fp = tcg_temp_new_ptr();
953 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
954 return fp;
957 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
959 TCGv t32;
960 TCGv_i64 t64;
962 t32 = tcg_temp_new();
963 tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
964 tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
965 tcg_temp_free(t32);
967 t64 = tcg_temp_new_i64();
968 tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
969 tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
970 tcg_temp_free_i64(t64);
973 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
974 int index)
976 TCGv tmp;
977 TCGv_i64 t64;
979 t64 = tcg_temp_new_i64();
980 tmp = tcg_temp_new();
981 switch (opsize) {
982 case OS_BYTE:
983 tcg_gen_qemu_ld8s(tmp, addr, index);
984 gen_helper_exts32(cpu_env, fp, tmp);
985 break;
986 case OS_WORD:
987 tcg_gen_qemu_ld16s(tmp, addr, index);
988 gen_helper_exts32(cpu_env, fp, tmp);
989 break;
990 case OS_LONG:
991 tcg_gen_qemu_ld32u(tmp, addr, index);
992 gen_helper_exts32(cpu_env, fp, tmp);
993 break;
994 case OS_SINGLE:
995 tcg_gen_qemu_ld32u(tmp, addr, index);
996 gen_helper_extf32(cpu_env, fp, tmp);
997 break;
998 case OS_DOUBLE:
999 tcg_gen_qemu_ld64(t64, addr, index);
1000 gen_helper_extf64(cpu_env, fp, t64);
1001 break;
1002 case OS_EXTENDED:
1003 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1004 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1005 break;
1007 tcg_gen_qemu_ld32u(tmp, addr, index);
1008 tcg_gen_shri_i32(tmp, tmp, 16);
1009 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1010 tcg_gen_addi_i32(tmp, addr, 4);
1011 tcg_gen_qemu_ld64(t64, tmp, index);
1012 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1013 break;
1014 case OS_PACKED:
1015 /* unimplemented data type on 68040/ColdFire
1016 * FIXME if needed for another FPU
1018 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1019 break;
1020 default:
1021 g_assert_not_reached();
1023 tcg_temp_free(tmp);
1024 tcg_temp_free_i64(t64);
1027 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1028 int index)
1030 TCGv tmp;
1031 TCGv_i64 t64;
1033 t64 = tcg_temp_new_i64();
1034 tmp = tcg_temp_new();
1035 switch (opsize) {
1036 case OS_BYTE:
1037 gen_helper_reds32(tmp, cpu_env, fp);
1038 tcg_gen_qemu_st8(tmp, addr, index);
1039 break;
1040 case OS_WORD:
1041 gen_helper_reds32(tmp, cpu_env, fp);
1042 tcg_gen_qemu_st16(tmp, addr, index);
1043 break;
1044 case OS_LONG:
1045 gen_helper_reds32(tmp, cpu_env, fp);
1046 tcg_gen_qemu_st32(tmp, addr, index);
1047 break;
1048 case OS_SINGLE:
1049 gen_helper_redf32(tmp, cpu_env, fp);
1050 tcg_gen_qemu_st32(tmp, addr, index);
1051 break;
1052 case OS_DOUBLE:
1053 gen_helper_redf64(t64, cpu_env, fp);
1054 tcg_gen_qemu_st64(t64, addr, index);
1055 break;
1056 case OS_EXTENDED:
1057 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1058 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1059 break;
1061 tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1062 tcg_gen_shli_i32(tmp, tmp, 16);
1063 tcg_gen_qemu_st32(tmp, addr, index);
1064 tcg_gen_addi_i32(tmp, addr, 4);
1065 tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1066 tcg_gen_qemu_st64(t64, tmp, index);
1067 break;
1068 case OS_PACKED:
1069 /* unimplemented data type on 68040/ColdFire
1070 * FIXME if needed for another FPU
1072 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1073 break;
1074 default:
1075 g_assert_not_reached();
1077 tcg_temp_free(tmp);
1078 tcg_temp_free_i64(t64);
1081 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1082 TCGv_ptr fp, ea_what what, int index)
1084 if (what == EA_STORE) {
1085 gen_store_fp(s, opsize, addr, fp, index);
1086 } else {
1087 gen_load_fp(s, opsize, addr, fp, index);
1091 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1092 int reg0, int opsize, TCGv_ptr fp, ea_what what,
1093 int index)
1095 TCGv reg, addr, tmp;
1096 TCGv_i64 t64;
1098 switch (mode) {
1099 case 0: /* Data register direct. */
1100 reg = cpu_dregs[reg0];
1101 if (what == EA_STORE) {
1102 switch (opsize) {
1103 case OS_BYTE:
1104 case OS_WORD:
1105 case OS_LONG:
1106 gen_helper_reds32(reg, cpu_env, fp);
1107 break;
1108 case OS_SINGLE:
1109 gen_helper_redf32(reg, cpu_env, fp);
1110 break;
1111 default:
1112 g_assert_not_reached();
1114 } else {
1115 tmp = tcg_temp_new();
1116 switch (opsize) {
1117 case OS_BYTE:
1118 tcg_gen_ext8s_i32(tmp, reg);
1119 gen_helper_exts32(cpu_env, fp, tmp);
1120 break;
1121 case OS_WORD:
1122 tcg_gen_ext16s_i32(tmp, reg);
1123 gen_helper_exts32(cpu_env, fp, tmp);
1124 break;
1125 case OS_LONG:
1126 gen_helper_exts32(cpu_env, fp, reg);
1127 break;
1128 case OS_SINGLE:
1129 gen_helper_extf32(cpu_env, fp, reg);
1130 break;
1131 default:
1132 g_assert_not_reached();
1134 tcg_temp_free(tmp);
1136 return 0;
1137 case 1: /* Address register direct. */
1138 return -1;
1139 case 2: /* Indirect register */
1140 addr = get_areg(s, reg0);
1141 gen_ldst_fp(s, opsize, addr, fp, what, index);
1142 return 0;
1143 case 3: /* Indirect postincrement. */
1144 addr = cpu_aregs[reg0];
1145 gen_ldst_fp(s, opsize, addr, fp, what, index);
1146 tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1147 return 0;
1148 case 4: /* Indirect predecrememnt. */
1149 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1150 if (IS_NULL_QREG(addr)) {
1151 return -1;
1153 gen_ldst_fp(s, opsize, addr, fp, what, index);
1154 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1155 return 0;
1156 case 5: /* Indirect displacement. */
1157 case 6: /* Indirect index + displacement. */
1158 do_indirect:
1159 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1160 if (IS_NULL_QREG(addr)) {
1161 return -1;
1163 gen_ldst_fp(s, opsize, addr, fp, what, index);
1164 return 0;
1165 case 7: /* Other */
1166 switch (reg0) {
1167 case 0: /* Absolute short. */
1168 case 1: /* Absolute long. */
1169 case 2: /* pc displacement */
1170 case 3: /* pc index+displacement. */
1171 goto do_indirect;
1172 case 4: /* Immediate. */
1173 if (what == EA_STORE) {
1174 return -1;
1176 switch (opsize) {
1177 case OS_BYTE:
1178 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1179 gen_helper_exts32(cpu_env, fp, tmp);
1180 tcg_temp_free(tmp);
1181 break;
1182 case OS_WORD:
1183 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1184 gen_helper_exts32(cpu_env, fp, tmp);
1185 tcg_temp_free(tmp);
1186 break;
1187 case OS_LONG:
1188 tmp = tcg_const_i32(read_im32(env, s));
1189 gen_helper_exts32(cpu_env, fp, tmp);
1190 tcg_temp_free(tmp);
1191 break;
1192 case OS_SINGLE:
1193 tmp = tcg_const_i32(read_im32(env, s));
1194 gen_helper_extf32(cpu_env, fp, tmp);
1195 tcg_temp_free(tmp);
1196 break;
1197 case OS_DOUBLE:
1198 t64 = tcg_const_i64(read_im64(env, s));
1199 gen_helper_extf64(cpu_env, fp, t64);
1200 tcg_temp_free_i64(t64);
1201 break;
1202 case OS_EXTENDED:
1203 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1204 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1205 break;
1207 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1208 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1209 tcg_temp_free(tmp);
1210 t64 = tcg_const_i64(read_im64(env, s));
1211 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1212 tcg_temp_free_i64(t64);
1213 break;
1214 case OS_PACKED:
1215 /* unimplemented data type on 68040/ColdFire
1216 * FIXME if needed for another FPU
1218 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1219 break;
1220 default:
1221 g_assert_not_reached();
1223 return 0;
1224 default:
1225 return -1;
1228 return -1;
1231 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1232 int opsize, TCGv_ptr fp, ea_what what, int index)
1234 int mode = extract32(insn, 3, 3);
1235 int reg0 = REG(insn, 0);
1236 return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1239 typedef struct {
1240 TCGCond tcond;
1241 bool g1;
1242 bool g2;
1243 TCGv v1;
1244 TCGv v2;
1245 } DisasCompare;
1247 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1249 TCGv tmp, tmp2;
1250 TCGCond tcond;
1251 CCOp op = s->cc_op;
1253 /* The CC_OP_CMP form can handle most normal comparisons directly. */
1254 if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1255 c->g1 = c->g2 = 1;
1256 c->v1 = QREG_CC_N;
1257 c->v2 = QREG_CC_V;
1258 switch (cond) {
1259 case 2: /* HI */
1260 case 3: /* LS */
1261 tcond = TCG_COND_LEU;
1262 goto done;
1263 case 4: /* CC */
1264 case 5: /* CS */
1265 tcond = TCG_COND_LTU;
1266 goto done;
1267 case 6: /* NE */
1268 case 7: /* EQ */
1269 tcond = TCG_COND_EQ;
1270 goto done;
1271 case 10: /* PL */
1272 case 11: /* MI */
1273 c->g1 = c->g2 = 0;
1274 c->v2 = tcg_const_i32(0);
1275 c->v1 = tmp = tcg_temp_new();
1276 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1277 gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1278 /* fallthru */
1279 case 12: /* GE */
1280 case 13: /* LT */
1281 tcond = TCG_COND_LT;
1282 goto done;
1283 case 14: /* GT */
1284 case 15: /* LE */
1285 tcond = TCG_COND_LE;
1286 goto done;
1290 c->g1 = 1;
1291 c->g2 = 0;
1292 c->v2 = tcg_const_i32(0);
1294 switch (cond) {
1295 case 0: /* T */
1296 case 1: /* F */
1297 c->v1 = c->v2;
1298 tcond = TCG_COND_NEVER;
1299 goto done;
1300 case 14: /* GT (!(Z || (N ^ V))) */
1301 case 15: /* LE (Z || (N ^ V)) */
1302 /* Logic operations clear V, which simplifies LE to (Z || N),
1303 and since Z and N are co-located, this becomes a normal
1304 comparison vs N. */
1305 if (op == CC_OP_LOGIC) {
1306 c->v1 = QREG_CC_N;
1307 tcond = TCG_COND_LE;
1308 goto done;
1310 break;
1311 case 12: /* GE (!(N ^ V)) */
1312 case 13: /* LT (N ^ V) */
1313 /* Logic operations clear V, which simplifies this to N. */
1314 if (op != CC_OP_LOGIC) {
1315 break;
1317 /* fallthru */
1318 case 10: /* PL (!N) */
1319 case 11: /* MI (N) */
1320 /* Several cases represent N normally. */
1321 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1322 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1323 op == CC_OP_LOGIC) {
1324 c->v1 = QREG_CC_N;
1325 tcond = TCG_COND_LT;
1326 goto done;
1328 break;
1329 case 6: /* NE (!Z) */
1330 case 7: /* EQ (Z) */
1331 /* Some cases fold Z into N. */
1332 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1333 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1334 op == CC_OP_LOGIC) {
1335 tcond = TCG_COND_EQ;
1336 c->v1 = QREG_CC_N;
1337 goto done;
1339 break;
1340 case 4: /* CC (!C) */
1341 case 5: /* CS (C) */
1342 /* Some cases fold C into X. */
1343 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1344 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1345 tcond = TCG_COND_NE;
1346 c->v1 = QREG_CC_X;
1347 goto done;
1349 /* fallthru */
1350 case 8: /* VC (!V) */
1351 case 9: /* VS (V) */
1352 /* Logic operations clear V and C. */
1353 if (op == CC_OP_LOGIC) {
1354 tcond = TCG_COND_NEVER;
1355 c->v1 = c->v2;
1356 goto done;
1358 break;
1361 /* Otherwise, flush flag state to CC_OP_FLAGS. */
1362 gen_flush_flags(s);
1364 switch (cond) {
1365 case 0: /* T */
1366 case 1: /* F */
1367 default:
1368 /* Invalid, or handled above. */
1369 abort();
1370 case 2: /* HI (!C && !Z) -> !(C || Z)*/
1371 case 3: /* LS (C || Z) */
1372 c->v1 = tmp = tcg_temp_new();
1373 c->g1 = 0;
1374 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1375 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1376 tcond = TCG_COND_NE;
1377 break;
1378 case 4: /* CC (!C) */
1379 case 5: /* CS (C) */
1380 c->v1 = QREG_CC_C;
1381 tcond = TCG_COND_NE;
1382 break;
1383 case 6: /* NE (!Z) */
1384 case 7: /* EQ (Z) */
1385 c->v1 = QREG_CC_Z;
1386 tcond = TCG_COND_EQ;
1387 break;
1388 case 8: /* VC (!V) */
1389 case 9: /* VS (V) */
1390 c->v1 = QREG_CC_V;
1391 tcond = TCG_COND_LT;
1392 break;
1393 case 10: /* PL (!N) */
1394 case 11: /* MI (N) */
1395 c->v1 = QREG_CC_N;
1396 tcond = TCG_COND_LT;
1397 break;
1398 case 12: /* GE (!(N ^ V)) */
1399 case 13: /* LT (N ^ V) */
1400 c->v1 = tmp = tcg_temp_new();
1401 c->g1 = 0;
1402 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1403 tcond = TCG_COND_LT;
1404 break;
1405 case 14: /* GT (!(Z || (N ^ V))) */
1406 case 15: /* LE (Z || (N ^ V)) */
1407 c->v1 = tmp = tcg_temp_new();
1408 c->g1 = 0;
1409 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1410 tcg_gen_neg_i32(tmp, tmp);
1411 tmp2 = tcg_temp_new();
1412 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1413 tcg_gen_or_i32(tmp, tmp, tmp2);
1414 tcg_temp_free(tmp2);
1415 tcond = TCG_COND_LT;
1416 break;
1419 done:
1420 if ((cond & 1) == 0) {
1421 tcond = tcg_invert_cond(tcond);
1423 c->tcond = tcond;
1426 static void free_cond(DisasCompare *c)
1428 if (!c->g1) {
1429 tcg_temp_free(c->v1);
1431 if (!c->g2) {
1432 tcg_temp_free(c->v2);
1436 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1438 DisasCompare c;
1440 gen_cc_cond(&c, s, cond);
1441 update_cc_op(s);
1442 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1443 free_cond(&c);
1446 /* Force a TB lookup after an instruction that changes the CPU state. */
1447 static void gen_exit_tb(DisasContext *s)
1449 update_cc_op(s);
1450 tcg_gen_movi_i32(QREG_PC, s->pc);
1451 s->base.is_jmp = DISAS_EXIT;
1454 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1455 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1456 op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \
1457 if (IS_NULL_QREG(result)) { \
1458 gen_addr_fault(s); \
1459 return; \
1461 } while (0)
1463 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1464 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \
1465 EA_STORE, IS_USER(s)); \
1466 if (IS_NULL_QREG(ea_result)) { \
1467 gen_addr_fault(s); \
1468 return; \
1470 } while (0)
1472 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1474 #ifndef CONFIG_USER_ONLY
1475 return (s->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)
1476 || (s->base.pc_next & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1477 #else
1478 return true;
1479 #endif
1482 /* Generate a jump to an immediate address. */
1483 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1485 if (unlikely(s->base.singlestep_enabled)) {
1486 gen_exception(s, dest, EXCP_DEBUG);
1487 } else if (use_goto_tb(s, dest)) {
1488 tcg_gen_goto_tb(n);
1489 tcg_gen_movi_i32(QREG_PC, dest);
1490 tcg_gen_exit_tb(s->base.tb, n);
1491 } else {
1492 gen_jmp_im(s, dest);
1493 tcg_gen_exit_tb(NULL, 0);
1495 s->base.is_jmp = DISAS_NORETURN;
1498 DISAS_INSN(scc)
1500 DisasCompare c;
1501 int cond;
1502 TCGv tmp;
1504 cond = (insn >> 8) & 0xf;
1505 gen_cc_cond(&c, s, cond);
1507 tmp = tcg_temp_new();
1508 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1509 free_cond(&c);
1511 tcg_gen_neg_i32(tmp, tmp);
1512 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1513 tcg_temp_free(tmp);
1516 DISAS_INSN(dbcc)
1518 TCGLabel *l1;
1519 TCGv reg;
1520 TCGv tmp;
1521 int16_t offset;
1522 uint32_t base;
1524 reg = DREG(insn, 0);
1525 base = s->pc;
1526 offset = (int16_t)read_im16(env, s);
1527 l1 = gen_new_label();
1528 gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1530 tmp = tcg_temp_new();
1531 tcg_gen_ext16s_i32(tmp, reg);
1532 tcg_gen_addi_i32(tmp, tmp, -1);
1533 gen_partset_reg(OS_WORD, reg, tmp);
1534 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1535 gen_jmp_tb(s, 1, base + offset);
1536 gen_set_label(l1);
1537 gen_jmp_tb(s, 0, s->pc);
1540 DISAS_INSN(undef_mac)
1542 gen_exception(s, s->base.pc_next, EXCP_LINEA);
1545 DISAS_INSN(undef_fpu)
1547 gen_exception(s, s->base.pc_next, EXCP_LINEF);
1550 DISAS_INSN(undef)
1552 /* ??? This is both instructions that are as yet unimplemented
1553 for the 680x0 series, as well as those that are implemented
1554 but actually illegal for CPU32 or pre-68020. */
1555 qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1556 insn, s->base.pc_next);
1557 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1560 DISAS_INSN(mulw)
1562 TCGv reg;
1563 TCGv tmp;
1564 TCGv src;
1565 int sign;
1567 sign = (insn & 0x100) != 0;
1568 reg = DREG(insn, 9);
1569 tmp = tcg_temp_new();
1570 if (sign)
1571 tcg_gen_ext16s_i32(tmp, reg);
1572 else
1573 tcg_gen_ext16u_i32(tmp, reg);
1574 SRC_EA(env, src, OS_WORD, sign, NULL);
1575 tcg_gen_mul_i32(tmp, tmp, src);
1576 tcg_gen_mov_i32(reg, tmp);
1577 gen_logic_cc(s, tmp, OS_LONG);
1578 tcg_temp_free(tmp);
1581 DISAS_INSN(divw)
1583 int sign;
1584 TCGv src;
1585 TCGv destr;
1587 /* divX.w <EA>,Dn 32/16 -> 16r:16q */
1589 sign = (insn & 0x100) != 0;
1591 /* dest.l / src.w */
1593 SRC_EA(env, src, OS_WORD, sign, NULL);
1594 destr = tcg_const_i32(REG(insn, 9));
1595 if (sign) {
1596 gen_helper_divsw(cpu_env, destr, src);
1597 } else {
1598 gen_helper_divuw(cpu_env, destr, src);
1600 tcg_temp_free(destr);
1602 set_cc_op(s, CC_OP_FLAGS);
1605 DISAS_INSN(divl)
1607 TCGv num, reg, den;
1608 int sign;
1609 uint16_t ext;
1611 ext = read_im16(env, s);
1613 sign = (ext & 0x0800) != 0;
1615 if (ext & 0x400) {
1616 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1617 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1618 return;
1621 /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
1623 SRC_EA(env, den, OS_LONG, 0, NULL);
1624 num = tcg_const_i32(REG(ext, 12));
1625 reg = tcg_const_i32(REG(ext, 0));
1626 if (sign) {
1627 gen_helper_divsll(cpu_env, num, reg, den);
1628 } else {
1629 gen_helper_divull(cpu_env, num, reg, den);
1631 tcg_temp_free(reg);
1632 tcg_temp_free(num);
1633 set_cc_op(s, CC_OP_FLAGS);
1634 return;
1637 /* divX.l <EA>, Dq 32/32 -> 32q */
1638 /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
1640 SRC_EA(env, den, OS_LONG, 0, NULL);
1641 num = tcg_const_i32(REG(ext, 12));
1642 reg = tcg_const_i32(REG(ext, 0));
1643 if (sign) {
1644 gen_helper_divsl(cpu_env, num, reg, den);
1645 } else {
1646 gen_helper_divul(cpu_env, num, reg, den);
1648 tcg_temp_free(reg);
1649 tcg_temp_free(num);
1651 set_cc_op(s, CC_OP_FLAGS);
1654 static void bcd_add(TCGv dest, TCGv src)
1656 TCGv t0, t1;
1658 /* dest10 = dest10 + src10 + X
1660 * t1 = src
1661 * t2 = t1 + 0x066
1662 * t3 = t2 + dest + X
1663 * t4 = t2 ^ dest
1664 * t5 = t3 ^ t4
1665 * t6 = ~t5 & 0x110
1666 * t7 = (t6 >> 2) | (t6 >> 3)
1667 * return t3 - t7
1670 /* t1 = (src + 0x066) + dest + X
1671 * = result with some possible exceding 0x6
1674 t0 = tcg_const_i32(0x066);
1675 tcg_gen_add_i32(t0, t0, src);
1677 t1 = tcg_temp_new();
1678 tcg_gen_add_i32(t1, t0, dest);
1679 tcg_gen_add_i32(t1, t1, QREG_CC_X);
1681 /* we will remove exceding 0x6 where there is no carry */
1683 /* t0 = (src + 0x0066) ^ dest
1684 * = t1 without carries
1687 tcg_gen_xor_i32(t0, t0, dest);
1689 /* extract the carries
1690 * t0 = t0 ^ t1
1691 * = only the carries
1694 tcg_gen_xor_i32(t0, t0, t1);
1696 /* generate 0x1 where there is no carry
1697 * and for each 0x10, generate a 0x6
1700 tcg_gen_shri_i32(t0, t0, 3);
1701 tcg_gen_not_i32(t0, t0);
1702 tcg_gen_andi_i32(t0, t0, 0x22);
1703 tcg_gen_add_i32(dest, t0, t0);
1704 tcg_gen_add_i32(dest, dest, t0);
1705 tcg_temp_free(t0);
1707 /* remove the exceding 0x6
1708 * for digits that have not generated a carry
1711 tcg_gen_sub_i32(dest, t1, dest);
1712 tcg_temp_free(t1);
1715 static void bcd_sub(TCGv dest, TCGv src)
1717 TCGv t0, t1, t2;
1719 /* dest10 = dest10 - src10 - X
1720 * = bcd_add(dest + 1 - X, 0x199 - src)
1723 /* t0 = 0x066 + (0x199 - src) */
1725 t0 = tcg_temp_new();
1726 tcg_gen_subfi_i32(t0, 0x1ff, src);
1728 /* t1 = t0 + dest + 1 - X*/
1730 t1 = tcg_temp_new();
1731 tcg_gen_add_i32(t1, t0, dest);
1732 tcg_gen_addi_i32(t1, t1, 1);
1733 tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1735 /* t2 = t0 ^ dest */
1737 t2 = tcg_temp_new();
1738 tcg_gen_xor_i32(t2, t0, dest);
1740 /* t0 = t1 ^ t2 */
1742 tcg_gen_xor_i32(t0, t1, t2);
1744 /* t2 = ~t0 & 0x110
1745 * t0 = (t2 >> 2) | (t2 >> 3)
1747 * to fit on 8bit operands, changed in:
1749 * t2 = ~(t0 >> 3) & 0x22
1750 * t0 = t2 + t2
1751 * t0 = t0 + t2
1754 tcg_gen_shri_i32(t2, t0, 3);
1755 tcg_gen_not_i32(t2, t2);
1756 tcg_gen_andi_i32(t2, t2, 0x22);
1757 tcg_gen_add_i32(t0, t2, t2);
1758 tcg_gen_add_i32(t0, t0, t2);
1759 tcg_temp_free(t2);
1761 /* return t1 - t0 */
1763 tcg_gen_sub_i32(dest, t1, t0);
1764 tcg_temp_free(t0);
1765 tcg_temp_free(t1);
1768 static void bcd_flags(TCGv val)
1770 tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1771 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1773 tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1775 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1778 DISAS_INSN(abcd_reg)
1780 TCGv src;
1781 TCGv dest;
1783 gen_flush_flags(s); /* !Z is sticky */
1785 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1786 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1787 bcd_add(dest, src);
1788 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1790 bcd_flags(dest);
1793 DISAS_INSN(abcd_mem)
1795 TCGv src, dest, addr;
1797 gen_flush_flags(s); /* !Z is sticky */
1799 /* Indirect pre-decrement load (mode 4) */
1801 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1802 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1803 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1804 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1806 bcd_add(dest, src);
1808 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1809 EA_STORE, IS_USER(s));
1811 bcd_flags(dest);
1814 DISAS_INSN(sbcd_reg)
1816 TCGv src, dest;
1818 gen_flush_flags(s); /* !Z is sticky */
1820 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1821 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1823 bcd_sub(dest, src);
1825 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1827 bcd_flags(dest);
1830 DISAS_INSN(sbcd_mem)
1832 TCGv src, dest, addr;
1834 gen_flush_flags(s); /* !Z is sticky */
1836 /* Indirect pre-decrement load (mode 4) */
1838 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1839 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1840 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1841 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1843 bcd_sub(dest, src);
1845 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1846 EA_STORE, IS_USER(s));
1848 bcd_flags(dest);
1851 DISAS_INSN(nbcd)
1853 TCGv src, dest;
1854 TCGv addr;
1856 gen_flush_flags(s); /* !Z is sticky */
1858 SRC_EA(env, src, OS_BYTE, 0, &addr);
1860 dest = tcg_const_i32(0);
1861 bcd_sub(dest, src);
1863 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1865 bcd_flags(dest);
1867 tcg_temp_free(dest);
1870 DISAS_INSN(addsub)
1872 TCGv reg;
1873 TCGv dest;
1874 TCGv src;
1875 TCGv tmp;
1876 TCGv addr;
1877 int add;
1878 int opsize;
1880 add = (insn & 0x4000) != 0;
1881 opsize = insn_opsize(insn);
1882 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1883 dest = tcg_temp_new();
1884 if (insn & 0x100) {
1885 SRC_EA(env, tmp, opsize, 1, &addr);
1886 src = reg;
1887 } else {
1888 tmp = reg;
1889 SRC_EA(env, src, opsize, 1, NULL);
1891 if (add) {
1892 tcg_gen_add_i32(dest, tmp, src);
1893 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1894 set_cc_op(s, CC_OP_ADDB + opsize);
1895 } else {
1896 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1897 tcg_gen_sub_i32(dest, tmp, src);
1898 set_cc_op(s, CC_OP_SUBB + opsize);
1900 gen_update_cc_add(dest, src, opsize);
1901 if (insn & 0x100) {
1902 DEST_EA(env, insn, opsize, dest, &addr);
1903 } else {
1904 gen_partset_reg(opsize, DREG(insn, 9), dest);
1906 tcg_temp_free(dest);
1909 /* Reverse the order of the bits in REG. */
1910 DISAS_INSN(bitrev)
1912 TCGv reg;
1913 reg = DREG(insn, 0);
1914 gen_helper_bitrev(reg, reg);
1917 DISAS_INSN(bitop_reg)
1919 int opsize;
1920 int op;
1921 TCGv src1;
1922 TCGv src2;
1923 TCGv tmp;
1924 TCGv addr;
1925 TCGv dest;
1927 if ((insn & 0x38) != 0)
1928 opsize = OS_BYTE;
1929 else
1930 opsize = OS_LONG;
1931 op = (insn >> 6) & 3;
1932 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1934 gen_flush_flags(s);
1935 src2 = tcg_temp_new();
1936 if (opsize == OS_BYTE)
1937 tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1938 else
1939 tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1941 tmp = tcg_const_i32(1);
1942 tcg_gen_shl_i32(tmp, tmp, src2);
1943 tcg_temp_free(src2);
1945 tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1947 dest = tcg_temp_new();
1948 switch (op) {
1949 case 1: /* bchg */
1950 tcg_gen_xor_i32(dest, src1, tmp);
1951 break;
1952 case 2: /* bclr */
1953 tcg_gen_andc_i32(dest, src1, tmp);
1954 break;
1955 case 3: /* bset */
1956 tcg_gen_or_i32(dest, src1, tmp);
1957 break;
1958 default: /* btst */
1959 break;
1961 tcg_temp_free(tmp);
1962 if (op) {
1963 DEST_EA(env, insn, opsize, dest, &addr);
1965 tcg_temp_free(dest);
1968 DISAS_INSN(sats)
1970 TCGv reg;
1971 reg = DREG(insn, 0);
1972 gen_flush_flags(s);
1973 gen_helper_sats(reg, reg, QREG_CC_V);
1974 gen_logic_cc(s, reg, OS_LONG);
1977 static void gen_push(DisasContext *s, TCGv val)
1979 TCGv tmp;
1981 tmp = tcg_temp_new();
1982 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1983 gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1984 tcg_gen_mov_i32(QREG_SP, tmp);
1985 tcg_temp_free(tmp);
1988 static TCGv mreg(int reg)
1990 if (reg < 8) {
1991 /* Dx */
1992 return cpu_dregs[reg];
1994 /* Ax */
1995 return cpu_aregs[reg & 7];
1998 DISAS_INSN(movem)
2000 TCGv addr, incr, tmp, r[16];
2001 int is_load = (insn & 0x0400) != 0;
2002 int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
2003 uint16_t mask = read_im16(env, s);
2004 int mode = extract32(insn, 3, 3);
2005 int reg0 = REG(insn, 0);
2006 int i;
2008 tmp = cpu_aregs[reg0];
2010 switch (mode) {
2011 case 0: /* data register direct */
2012 case 1: /* addr register direct */
2013 do_addr_fault:
2014 gen_addr_fault(s);
2015 return;
2017 case 2: /* indirect */
2018 break;
2020 case 3: /* indirect post-increment */
2021 if (!is_load) {
2022 /* post-increment is not allowed */
2023 goto do_addr_fault;
2025 break;
2027 case 4: /* indirect pre-decrement */
2028 if (is_load) {
2029 /* pre-decrement is not allowed */
2030 goto do_addr_fault;
2032 /* We want a bare copy of the address reg, without any pre-decrement
2033 adjustment, as gen_lea would provide. */
2034 break;
2036 default:
2037 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2038 if (IS_NULL_QREG(tmp)) {
2039 goto do_addr_fault;
2041 break;
2044 addr = tcg_temp_new();
2045 tcg_gen_mov_i32(addr, tmp);
2046 incr = tcg_const_i32(opsize_bytes(opsize));
2048 if (is_load) {
2049 /* memory to register */
2050 for (i = 0; i < 16; i++) {
2051 if (mask & (1 << i)) {
2052 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
2053 tcg_gen_add_i32(addr, addr, incr);
2056 for (i = 0; i < 16; i++) {
2057 if (mask & (1 << i)) {
2058 tcg_gen_mov_i32(mreg(i), r[i]);
2059 tcg_temp_free(r[i]);
2062 if (mode == 3) {
2063 /* post-increment: movem (An)+,X */
2064 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2066 } else {
2067 /* register to memory */
2068 if (mode == 4) {
2069 /* pre-decrement: movem X,-(An) */
2070 for (i = 15; i >= 0; i--) {
2071 if ((mask << i) & 0x8000) {
2072 tcg_gen_sub_i32(addr, addr, incr);
2073 if (reg0 + 8 == i &&
2074 m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2075 /* M68020+: if the addressing register is the
2076 * register moved to memory, the value written
2077 * is the initial value decremented by the size of
2078 * the operation, regardless of how many actual
2079 * stores have been performed until this point.
2080 * M68000/M68010: the value is the initial value.
2082 tmp = tcg_temp_new();
2083 tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2084 gen_store(s, opsize, addr, tmp, IS_USER(s));
2085 tcg_temp_free(tmp);
2086 } else {
2087 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2091 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2092 } else {
2093 for (i = 0; i < 16; i++) {
2094 if (mask & (1 << i)) {
2095 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2096 tcg_gen_add_i32(addr, addr, incr);
2102 tcg_temp_free(incr);
2103 tcg_temp_free(addr);
2106 DISAS_INSN(movep)
2108 uint8_t i;
2109 int16_t displ;
2110 TCGv reg;
2111 TCGv addr;
2112 TCGv abuf;
2113 TCGv dbuf;
2115 displ = read_im16(env, s);
2117 addr = AREG(insn, 0);
2118 reg = DREG(insn, 9);
2120 abuf = tcg_temp_new();
2121 tcg_gen_addi_i32(abuf, addr, displ);
2122 dbuf = tcg_temp_new();
2124 if (insn & 0x40) {
2125 i = 4;
2126 } else {
2127 i = 2;
2130 if (insn & 0x80) {
2131 for ( ; i > 0 ; i--) {
2132 tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2133 tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
2134 if (i > 1) {
2135 tcg_gen_addi_i32(abuf, abuf, 2);
2138 } else {
2139 for ( ; i > 0 ; i--) {
2140 tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
2141 tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2142 if (i > 1) {
2143 tcg_gen_addi_i32(abuf, abuf, 2);
2147 tcg_temp_free(abuf);
2148 tcg_temp_free(dbuf);
2151 DISAS_INSN(bitop_im)
2153 int opsize;
2154 int op;
2155 TCGv src1;
2156 uint32_t mask;
2157 int bitnum;
2158 TCGv tmp;
2159 TCGv addr;
2161 if ((insn & 0x38) != 0)
2162 opsize = OS_BYTE;
2163 else
2164 opsize = OS_LONG;
2165 op = (insn >> 6) & 3;
2167 bitnum = read_im16(env, s);
2168 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2169 if (bitnum & 0xfe00) {
2170 disas_undef(env, s, insn);
2171 return;
2173 } else {
2174 if (bitnum & 0xff00) {
2175 disas_undef(env, s, insn);
2176 return;
2180 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2182 gen_flush_flags(s);
2183 if (opsize == OS_BYTE)
2184 bitnum &= 7;
2185 else
2186 bitnum &= 31;
2187 mask = 1 << bitnum;
2189 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2191 if (op) {
2192 tmp = tcg_temp_new();
2193 switch (op) {
2194 case 1: /* bchg */
2195 tcg_gen_xori_i32(tmp, src1, mask);
2196 break;
2197 case 2: /* bclr */
2198 tcg_gen_andi_i32(tmp, src1, ~mask);
2199 break;
2200 case 3: /* bset */
2201 tcg_gen_ori_i32(tmp, src1, mask);
2202 break;
2203 default: /* btst */
2204 break;
2206 DEST_EA(env, insn, opsize, tmp, &addr);
2207 tcg_temp_free(tmp);
2211 static TCGv gen_get_ccr(DisasContext *s)
2213 TCGv dest;
2215 update_cc_op(s);
2216 dest = tcg_temp_new();
2217 gen_helper_get_ccr(dest, cpu_env);
2218 return dest;
2221 static TCGv gen_get_sr(DisasContext *s)
2223 TCGv ccr;
2224 TCGv sr;
2226 ccr = gen_get_ccr(s);
2227 sr = tcg_temp_new();
2228 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2229 tcg_gen_or_i32(sr, sr, ccr);
2230 tcg_temp_free(ccr);
2231 return sr;
2234 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2236 if (ccr_only) {
2237 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2238 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2239 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2240 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2241 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2242 } else {
2243 TCGv sr = tcg_const_i32(val);
2244 gen_helper_set_sr(cpu_env, sr);
2245 tcg_temp_free(sr);
2247 set_cc_op(s, CC_OP_FLAGS);
2250 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2252 if (ccr_only) {
2253 gen_helper_set_ccr(cpu_env, val);
2254 } else {
2255 gen_helper_set_sr(cpu_env, val);
2257 set_cc_op(s, CC_OP_FLAGS);
2260 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2261 bool ccr_only)
2263 if ((insn & 0x3f) == 0x3c) {
2264 uint16_t val;
2265 val = read_im16(env, s);
2266 gen_set_sr_im(s, val, ccr_only);
2267 } else {
2268 TCGv src;
2269 SRC_EA(env, src, OS_WORD, 0, NULL);
2270 gen_set_sr(s, src, ccr_only);
2274 DISAS_INSN(arith_im)
2276 int op;
2277 TCGv im;
2278 TCGv src1;
2279 TCGv dest;
2280 TCGv addr;
2281 int opsize;
2282 bool with_SR = ((insn & 0x3f) == 0x3c);
2284 op = (insn >> 9) & 7;
2285 opsize = insn_opsize(insn);
2286 switch (opsize) {
2287 case OS_BYTE:
2288 im = tcg_const_i32((int8_t)read_im8(env, s));
2289 break;
2290 case OS_WORD:
2291 im = tcg_const_i32((int16_t)read_im16(env, s));
2292 break;
2293 case OS_LONG:
2294 im = tcg_const_i32(read_im32(env, s));
2295 break;
2296 default:
2297 g_assert_not_reached();
2300 if (with_SR) {
2301 /* SR/CCR can only be used with andi/eori/ori */
2302 if (op == 2 || op == 3 || op == 6) {
2303 disas_undef(env, s, insn);
2304 return;
2306 switch (opsize) {
2307 case OS_BYTE:
2308 src1 = gen_get_ccr(s);
2309 break;
2310 case OS_WORD:
2311 if (IS_USER(s)) {
2312 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2313 return;
2315 src1 = gen_get_sr(s);
2316 break;
2317 default:
2318 /* OS_LONG; others already g_assert_not_reached. */
2319 disas_undef(env, s, insn);
2320 return;
2322 } else {
2323 SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2325 dest = tcg_temp_new();
2326 switch (op) {
2327 case 0: /* ori */
2328 tcg_gen_or_i32(dest, src1, im);
2329 if (with_SR) {
2330 gen_set_sr(s, dest, opsize == OS_BYTE);
2331 } else {
2332 DEST_EA(env, insn, opsize, dest, &addr);
2333 gen_logic_cc(s, dest, opsize);
2335 break;
2336 case 1: /* andi */
2337 tcg_gen_and_i32(dest, src1, im);
2338 if (with_SR) {
2339 gen_set_sr(s, dest, opsize == OS_BYTE);
2340 } else {
2341 DEST_EA(env, insn, opsize, dest, &addr);
2342 gen_logic_cc(s, dest, opsize);
2344 break;
2345 case 2: /* subi */
2346 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2347 tcg_gen_sub_i32(dest, src1, im);
2348 gen_update_cc_add(dest, im, opsize);
2349 set_cc_op(s, CC_OP_SUBB + opsize);
2350 DEST_EA(env, insn, opsize, dest, &addr);
2351 break;
2352 case 3: /* addi */
2353 tcg_gen_add_i32(dest, src1, im);
2354 gen_update_cc_add(dest, im, opsize);
2355 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2356 set_cc_op(s, CC_OP_ADDB + opsize);
2357 DEST_EA(env, insn, opsize, dest, &addr);
2358 break;
2359 case 5: /* eori */
2360 tcg_gen_xor_i32(dest, src1, im);
2361 if (with_SR) {
2362 gen_set_sr(s, dest, opsize == OS_BYTE);
2363 } else {
2364 DEST_EA(env, insn, opsize, dest, &addr);
2365 gen_logic_cc(s, dest, opsize);
2367 break;
2368 case 6: /* cmpi */
2369 gen_update_cc_cmp(s, src1, im, opsize);
2370 break;
2371 default:
2372 abort();
2374 tcg_temp_free(im);
2375 tcg_temp_free(dest);
2378 DISAS_INSN(cas)
2380 int opsize;
2381 TCGv addr;
2382 uint16_t ext;
2383 TCGv load;
2384 TCGv cmp;
2385 TCGMemOp opc;
2387 switch ((insn >> 9) & 3) {
2388 case 1:
2389 opsize = OS_BYTE;
2390 opc = MO_SB;
2391 break;
2392 case 2:
2393 opsize = OS_WORD;
2394 opc = MO_TESW;
2395 break;
2396 case 3:
2397 opsize = OS_LONG;
2398 opc = MO_TESL;
2399 break;
2400 default:
2401 g_assert_not_reached();
2404 ext = read_im16(env, s);
2406 /* cas Dc,Du,<EA> */
2408 addr = gen_lea(env, s, insn, opsize);
2409 if (IS_NULL_QREG(addr)) {
2410 gen_addr_fault(s);
2411 return;
2414 cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2416 /* if <EA> == Dc then
2417 * <EA> = Du
2418 * Dc = <EA> (because <EA> == Dc)
2419 * else
2420 * Dc = <EA>
2423 load = tcg_temp_new();
2424 tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2425 IS_USER(s), opc);
2426 /* update flags before setting cmp to load */
2427 gen_update_cc_cmp(s, load, cmp, opsize);
2428 gen_partset_reg(opsize, DREG(ext, 0), load);
2430 tcg_temp_free(load);
2432 switch (extract32(insn, 3, 3)) {
2433 case 3: /* Indirect postincrement. */
2434 tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2435 break;
2436 case 4: /* Indirect predecrememnt. */
2437 tcg_gen_mov_i32(AREG(insn, 0), addr);
2438 break;
2442 DISAS_INSN(cas2w)
2444 uint16_t ext1, ext2;
2445 TCGv addr1, addr2;
2446 TCGv regs;
2448 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2450 ext1 = read_im16(env, s);
2452 if (ext1 & 0x8000) {
2453 /* Address Register */
2454 addr1 = AREG(ext1, 12);
2455 } else {
2456 /* Data Register */
2457 addr1 = DREG(ext1, 12);
2460 ext2 = read_im16(env, s);
2461 if (ext2 & 0x8000) {
2462 /* Address Register */
2463 addr2 = AREG(ext2, 12);
2464 } else {
2465 /* Data Register */
2466 addr2 = DREG(ext2, 12);
2469 /* if (R1) == Dc1 && (R2) == Dc2 then
2470 * (R1) = Du1
2471 * (R2) = Du2
2472 * else
2473 * Dc1 = (R1)
2474 * Dc2 = (R2)
2477 regs = tcg_const_i32(REG(ext2, 6) |
2478 (REG(ext1, 6) << 3) |
2479 (REG(ext2, 0) << 6) |
2480 (REG(ext1, 0) << 9));
2481 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2482 gen_helper_exit_atomic(cpu_env);
2483 } else {
2484 gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2486 tcg_temp_free(regs);
2488 /* Note that cas2w also assigned to env->cc_op. */
2489 s->cc_op = CC_OP_CMPW;
2490 s->cc_op_synced = 1;
2493 DISAS_INSN(cas2l)
2495 uint16_t ext1, ext2;
2496 TCGv addr1, addr2, regs;
2498 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2500 ext1 = read_im16(env, s);
2502 if (ext1 & 0x8000) {
2503 /* Address Register */
2504 addr1 = AREG(ext1, 12);
2505 } else {
2506 /* Data Register */
2507 addr1 = DREG(ext1, 12);
2510 ext2 = read_im16(env, s);
2511 if (ext2 & 0x8000) {
2512 /* Address Register */
2513 addr2 = AREG(ext2, 12);
2514 } else {
2515 /* Data Register */
2516 addr2 = DREG(ext2, 12);
2519 /* if (R1) == Dc1 && (R2) == Dc2 then
2520 * (R1) = Du1
2521 * (R2) = Du2
2522 * else
2523 * Dc1 = (R1)
2524 * Dc2 = (R2)
2527 regs = tcg_const_i32(REG(ext2, 6) |
2528 (REG(ext1, 6) << 3) |
2529 (REG(ext2, 0) << 6) |
2530 (REG(ext1, 0) << 9));
2531 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2532 gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2533 } else {
2534 gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2536 tcg_temp_free(regs);
2538 /* Note that cas2l also assigned to env->cc_op. */
2539 s->cc_op = CC_OP_CMPL;
2540 s->cc_op_synced = 1;
2543 DISAS_INSN(byterev)
2545 TCGv reg;
2547 reg = DREG(insn, 0);
2548 tcg_gen_bswap32_i32(reg, reg);
2551 DISAS_INSN(move)
2553 TCGv src;
2554 TCGv dest;
2555 int op;
2556 int opsize;
2558 switch (insn >> 12) {
2559 case 1: /* move.b */
2560 opsize = OS_BYTE;
2561 break;
2562 case 2: /* move.l */
2563 opsize = OS_LONG;
2564 break;
2565 case 3: /* move.w */
2566 opsize = OS_WORD;
2567 break;
2568 default:
2569 abort();
2571 SRC_EA(env, src, opsize, 1, NULL);
2572 op = (insn >> 6) & 7;
2573 if (op == 1) {
2574 /* movea */
2575 /* The value will already have been sign extended. */
2576 dest = AREG(insn, 9);
2577 tcg_gen_mov_i32(dest, src);
2578 } else {
2579 /* normal move */
2580 uint16_t dest_ea;
2581 dest_ea = ((insn >> 9) & 7) | (op << 3);
2582 DEST_EA(env, dest_ea, opsize, src, NULL);
2583 /* This will be correct because loads sign extend. */
2584 gen_logic_cc(s, src, opsize);
2588 DISAS_INSN(negx)
2590 TCGv z;
2591 TCGv src;
2592 TCGv addr;
2593 int opsize;
2595 opsize = insn_opsize(insn);
2596 SRC_EA(env, src, opsize, 1, &addr);
2598 gen_flush_flags(s); /* compute old Z */
2600 /* Perform substract with borrow.
2601 * (X, N) = -(src + X);
2604 z = tcg_const_i32(0);
2605 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2606 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2607 tcg_temp_free(z);
2608 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2610 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2612 /* Compute signed-overflow for negation. The normal formula for
2613 * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2614 * this simplies to res & src.
2617 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2619 /* Copy the rest of the results into place. */
2620 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2621 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2623 set_cc_op(s, CC_OP_FLAGS);
2625 /* result is in QREG_CC_N */
2627 DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2630 DISAS_INSN(lea)
2632 TCGv reg;
2633 TCGv tmp;
2635 reg = AREG(insn, 9);
2636 tmp = gen_lea(env, s, insn, OS_LONG);
2637 if (IS_NULL_QREG(tmp)) {
2638 gen_addr_fault(s);
2639 return;
2641 tcg_gen_mov_i32(reg, tmp);
2644 DISAS_INSN(clr)
2646 int opsize;
2647 TCGv zero;
2649 zero = tcg_const_i32(0);
2651 opsize = insn_opsize(insn);
2652 DEST_EA(env, insn, opsize, zero, NULL);
2653 gen_logic_cc(s, zero, opsize);
2654 tcg_temp_free(zero);
2657 DISAS_INSN(move_from_ccr)
2659 TCGv ccr;
2661 ccr = gen_get_ccr(s);
2662 DEST_EA(env, insn, OS_WORD, ccr, NULL);
2665 DISAS_INSN(neg)
2667 TCGv src1;
2668 TCGv dest;
2669 TCGv addr;
2670 int opsize;
2672 opsize = insn_opsize(insn);
2673 SRC_EA(env, src1, opsize, 1, &addr);
2674 dest = tcg_temp_new();
2675 tcg_gen_neg_i32(dest, src1);
2676 set_cc_op(s, CC_OP_SUBB + opsize);
2677 gen_update_cc_add(dest, src1, opsize);
2678 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2679 DEST_EA(env, insn, opsize, dest, &addr);
2680 tcg_temp_free(dest);
2683 DISAS_INSN(move_to_ccr)
2685 gen_move_to_sr(env, s, insn, true);
2688 DISAS_INSN(not)
2690 TCGv src1;
2691 TCGv dest;
2692 TCGv addr;
2693 int opsize;
2695 opsize = insn_opsize(insn);
2696 SRC_EA(env, src1, opsize, 1, &addr);
2697 dest = tcg_temp_new();
2698 tcg_gen_not_i32(dest, src1);
2699 DEST_EA(env, insn, opsize, dest, &addr);
2700 gen_logic_cc(s, dest, opsize);
2703 DISAS_INSN(swap)
2705 TCGv src1;
2706 TCGv src2;
2707 TCGv reg;
2709 src1 = tcg_temp_new();
2710 src2 = tcg_temp_new();
2711 reg = DREG(insn, 0);
2712 tcg_gen_shli_i32(src1, reg, 16);
2713 tcg_gen_shri_i32(src2, reg, 16);
2714 tcg_gen_or_i32(reg, src1, src2);
2715 tcg_temp_free(src2);
2716 tcg_temp_free(src1);
2717 gen_logic_cc(s, reg, OS_LONG);
2720 DISAS_INSN(bkpt)
2722 gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2725 DISAS_INSN(pea)
2727 TCGv tmp;
2729 tmp = gen_lea(env, s, insn, OS_LONG);
2730 if (IS_NULL_QREG(tmp)) {
2731 gen_addr_fault(s);
2732 return;
2734 gen_push(s, tmp);
2737 DISAS_INSN(ext)
2739 int op;
2740 TCGv reg;
2741 TCGv tmp;
2743 reg = DREG(insn, 0);
2744 op = (insn >> 6) & 7;
2745 tmp = tcg_temp_new();
2746 if (op == 3)
2747 tcg_gen_ext16s_i32(tmp, reg);
2748 else
2749 tcg_gen_ext8s_i32(tmp, reg);
2750 if (op == 2)
2751 gen_partset_reg(OS_WORD, reg, tmp);
2752 else
2753 tcg_gen_mov_i32(reg, tmp);
2754 gen_logic_cc(s, tmp, OS_LONG);
2755 tcg_temp_free(tmp);
2758 DISAS_INSN(tst)
2760 int opsize;
2761 TCGv tmp;
2763 opsize = insn_opsize(insn);
2764 SRC_EA(env, tmp, opsize, 1, NULL);
2765 gen_logic_cc(s, tmp, opsize);
2768 DISAS_INSN(pulse)
2770 /* Implemented as a NOP. */
2773 DISAS_INSN(illegal)
2775 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2778 /* ??? This should be atomic. */
2779 DISAS_INSN(tas)
2781 TCGv dest;
2782 TCGv src1;
2783 TCGv addr;
2785 dest = tcg_temp_new();
2786 SRC_EA(env, src1, OS_BYTE, 1, &addr);
2787 gen_logic_cc(s, src1, OS_BYTE);
2788 tcg_gen_ori_i32(dest, src1, 0x80);
2789 DEST_EA(env, insn, OS_BYTE, dest, &addr);
2790 tcg_temp_free(dest);
2793 DISAS_INSN(mull)
2795 uint16_t ext;
2796 TCGv src1;
2797 int sign;
2799 ext = read_im16(env, s);
2801 sign = ext & 0x800;
2803 if (ext & 0x400) {
2804 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2805 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2806 return;
2809 SRC_EA(env, src1, OS_LONG, 0, NULL);
2811 if (sign) {
2812 tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2813 } else {
2814 tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2816 /* if Dl == Dh, 68040 returns low word */
2817 tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2818 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2819 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2821 tcg_gen_movi_i32(QREG_CC_V, 0);
2822 tcg_gen_movi_i32(QREG_CC_C, 0);
2824 set_cc_op(s, CC_OP_FLAGS);
2825 return;
2827 SRC_EA(env, src1, OS_LONG, 0, NULL);
2828 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2829 tcg_gen_movi_i32(QREG_CC_C, 0);
2830 if (sign) {
2831 tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2832 /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2833 tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2834 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2835 } else {
2836 tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2837 /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2838 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2840 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2841 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2843 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2845 set_cc_op(s, CC_OP_FLAGS);
2846 } else {
2847 /* The upper 32 bits of the product are discarded, so
2848 muls.l and mulu.l are functionally equivalent. */
2849 tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2850 gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2854 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2856 TCGv reg;
2857 TCGv tmp;
2859 reg = AREG(insn, 0);
2860 tmp = tcg_temp_new();
2861 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2862 gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2863 if ((insn & 7) != 7) {
2864 tcg_gen_mov_i32(reg, tmp);
2866 tcg_gen_addi_i32(QREG_SP, tmp, offset);
2867 tcg_temp_free(tmp);
2870 DISAS_INSN(link)
2872 int16_t offset;
2874 offset = read_im16(env, s);
2875 gen_link(s, insn, offset);
2878 DISAS_INSN(linkl)
2880 int32_t offset;
2882 offset = read_im32(env, s);
2883 gen_link(s, insn, offset);
2886 DISAS_INSN(unlk)
2888 TCGv src;
2889 TCGv reg;
2890 TCGv tmp;
2892 src = tcg_temp_new();
2893 reg = AREG(insn, 0);
2894 tcg_gen_mov_i32(src, reg);
2895 tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2896 tcg_gen_mov_i32(reg, tmp);
2897 tcg_gen_addi_i32(QREG_SP, src, 4);
2898 tcg_temp_free(src);
2899 tcg_temp_free(tmp);
2902 #if defined(CONFIG_SOFTMMU)
2903 DISAS_INSN(reset)
2905 if (IS_USER(s)) {
2906 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2907 return;
2910 gen_helper_reset(cpu_env);
2912 #endif
2914 DISAS_INSN(nop)
2918 DISAS_INSN(rtd)
2920 TCGv tmp;
2921 int16_t offset = read_im16(env, s);
2923 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2924 tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2925 gen_jmp(s, tmp);
2928 DISAS_INSN(rts)
2930 TCGv tmp;
2932 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2933 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2934 gen_jmp(s, tmp);
2937 DISAS_INSN(jump)
2939 TCGv tmp;
2941 /* Load the target address first to ensure correct exception
2942 behavior. */
2943 tmp = gen_lea(env, s, insn, OS_LONG);
2944 if (IS_NULL_QREG(tmp)) {
2945 gen_addr_fault(s);
2946 return;
2948 if ((insn & 0x40) == 0) {
2949 /* jsr */
2950 gen_push(s, tcg_const_i32(s->pc));
2952 gen_jmp(s, tmp);
2955 DISAS_INSN(addsubq)
2957 TCGv src;
2958 TCGv dest;
2959 TCGv val;
2960 int imm;
2961 TCGv addr;
2962 int opsize;
2964 if ((insn & 070) == 010) {
2965 /* Operation on address register is always long. */
2966 opsize = OS_LONG;
2967 } else {
2968 opsize = insn_opsize(insn);
2970 SRC_EA(env, src, opsize, 1, &addr);
2971 imm = (insn >> 9) & 7;
2972 if (imm == 0) {
2973 imm = 8;
2975 val = tcg_const_i32(imm);
2976 dest = tcg_temp_new();
2977 tcg_gen_mov_i32(dest, src);
2978 if ((insn & 0x38) == 0x08) {
2979 /* Don't update condition codes if the destination is an
2980 address register. */
2981 if (insn & 0x0100) {
2982 tcg_gen_sub_i32(dest, dest, val);
2983 } else {
2984 tcg_gen_add_i32(dest, dest, val);
2986 } else {
2987 if (insn & 0x0100) {
2988 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2989 tcg_gen_sub_i32(dest, dest, val);
2990 set_cc_op(s, CC_OP_SUBB + opsize);
2991 } else {
2992 tcg_gen_add_i32(dest, dest, val);
2993 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2994 set_cc_op(s, CC_OP_ADDB + opsize);
2996 gen_update_cc_add(dest, val, opsize);
2998 tcg_temp_free(val);
2999 DEST_EA(env, insn, opsize, dest, &addr);
3000 tcg_temp_free(dest);
3003 DISAS_INSN(tpf)
3005 switch (insn & 7) {
3006 case 2: /* One extension word. */
3007 s->pc += 2;
3008 break;
3009 case 3: /* Two extension words. */
3010 s->pc += 4;
3011 break;
3012 case 4: /* No extension words. */
3013 break;
3014 default:
3015 disas_undef(env, s, insn);
3019 DISAS_INSN(branch)
3021 int32_t offset;
3022 uint32_t base;
3023 int op;
3025 base = s->pc;
3026 op = (insn >> 8) & 0xf;
3027 offset = (int8_t)insn;
3028 if (offset == 0) {
3029 offset = (int16_t)read_im16(env, s);
3030 } else if (offset == -1) {
3031 offset = read_im32(env, s);
3033 if (op == 1) {
3034 /* bsr */
3035 gen_push(s, tcg_const_i32(s->pc));
3037 if (op > 1) {
3038 /* Bcc */
3039 TCGLabel *l1 = gen_new_label();
3040 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
3041 gen_jmp_tb(s, 1, base + offset);
3042 gen_set_label(l1);
3043 gen_jmp_tb(s, 0, s->pc);
3044 } else {
3045 /* Unconditional branch. */
3046 update_cc_op(s);
3047 gen_jmp_tb(s, 0, base + offset);
3051 DISAS_INSN(moveq)
3053 tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3054 gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3057 DISAS_INSN(mvzs)
3059 int opsize;
3060 TCGv src;
3061 TCGv reg;
3063 if (insn & 0x40)
3064 opsize = OS_WORD;
3065 else
3066 opsize = OS_BYTE;
3067 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3068 reg = DREG(insn, 9);
3069 tcg_gen_mov_i32(reg, src);
3070 gen_logic_cc(s, src, opsize);
3073 DISAS_INSN(or)
3075 TCGv reg;
3076 TCGv dest;
3077 TCGv src;
3078 TCGv addr;
3079 int opsize;
3081 opsize = insn_opsize(insn);
3082 reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3083 dest = tcg_temp_new();
3084 if (insn & 0x100) {
3085 SRC_EA(env, src, opsize, 0, &addr);
3086 tcg_gen_or_i32(dest, src, reg);
3087 DEST_EA(env, insn, opsize, dest, &addr);
3088 } else {
3089 SRC_EA(env, src, opsize, 0, NULL);
3090 tcg_gen_or_i32(dest, src, reg);
3091 gen_partset_reg(opsize, DREG(insn, 9), dest);
3093 gen_logic_cc(s, dest, opsize);
3094 tcg_temp_free(dest);
3097 DISAS_INSN(suba)
3099 TCGv src;
3100 TCGv reg;
3102 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3103 reg = AREG(insn, 9);
3104 tcg_gen_sub_i32(reg, reg, src);
3107 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3109 TCGv tmp;
3111 gen_flush_flags(s); /* compute old Z */
3113 /* Perform substract with borrow.
3114 * (X, N) = dest - (src + X);
3117 tmp = tcg_const_i32(0);
3118 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
3119 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
3120 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3121 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3123 /* Compute signed-overflow for substract. */
3125 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3126 tcg_gen_xor_i32(tmp, dest, src);
3127 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3128 tcg_temp_free(tmp);
3130 /* Copy the rest of the results into place. */
3131 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3132 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3134 set_cc_op(s, CC_OP_FLAGS);
3136 /* result is in QREG_CC_N */
3139 DISAS_INSN(subx_reg)
3141 TCGv dest;
3142 TCGv src;
3143 int opsize;
3145 opsize = insn_opsize(insn);
3147 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3148 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3150 gen_subx(s, src, dest, opsize);
3152 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3155 DISAS_INSN(subx_mem)
3157 TCGv src;
3158 TCGv addr_src;
3159 TCGv dest;
3160 TCGv addr_dest;
3161 int opsize;
3163 opsize = insn_opsize(insn);
3165 addr_src = AREG(insn, 0);
3166 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3167 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3169 addr_dest = AREG(insn, 9);
3170 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3171 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3173 gen_subx(s, src, dest, opsize);
3175 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3177 tcg_temp_free(dest);
3178 tcg_temp_free(src);
3181 DISAS_INSN(mov3q)
3183 TCGv src;
3184 int val;
3186 val = (insn >> 9) & 7;
3187 if (val == 0)
3188 val = -1;
3189 src = tcg_const_i32(val);
3190 gen_logic_cc(s, src, OS_LONG);
3191 DEST_EA(env, insn, OS_LONG, src, NULL);
3192 tcg_temp_free(src);
3195 DISAS_INSN(cmp)
3197 TCGv src;
3198 TCGv reg;
3199 int opsize;
3201 opsize = insn_opsize(insn);
3202 SRC_EA(env, src, opsize, 1, NULL);
3203 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3204 gen_update_cc_cmp(s, reg, src, opsize);
3207 DISAS_INSN(cmpa)
3209 int opsize;
3210 TCGv src;
3211 TCGv reg;
3213 if (insn & 0x100) {
3214 opsize = OS_LONG;
3215 } else {
3216 opsize = OS_WORD;
3218 SRC_EA(env, src, opsize, 1, NULL);
3219 reg = AREG(insn, 9);
3220 gen_update_cc_cmp(s, reg, src, OS_LONG);
3223 DISAS_INSN(cmpm)
3225 int opsize = insn_opsize(insn);
3226 TCGv src, dst;
3228 /* Post-increment load (mode 3) from Ay. */
3229 src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3230 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3231 /* Post-increment load (mode 3) from Ax. */
3232 dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3233 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3235 gen_update_cc_cmp(s, dst, src, opsize);
3238 DISAS_INSN(eor)
3240 TCGv src;
3241 TCGv dest;
3242 TCGv addr;
3243 int opsize;
3245 opsize = insn_opsize(insn);
3247 SRC_EA(env, src, opsize, 0, &addr);
3248 dest = tcg_temp_new();
3249 tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3250 gen_logic_cc(s, dest, opsize);
3251 DEST_EA(env, insn, opsize, dest, &addr);
3252 tcg_temp_free(dest);
3255 static void do_exg(TCGv reg1, TCGv reg2)
3257 TCGv temp = tcg_temp_new();
3258 tcg_gen_mov_i32(temp, reg1);
3259 tcg_gen_mov_i32(reg1, reg2);
3260 tcg_gen_mov_i32(reg2, temp);
3261 tcg_temp_free(temp);
3264 DISAS_INSN(exg_dd)
3266 /* exchange Dx and Dy */
3267 do_exg(DREG(insn, 9), DREG(insn, 0));
3270 DISAS_INSN(exg_aa)
3272 /* exchange Ax and Ay */
3273 do_exg(AREG(insn, 9), AREG(insn, 0));
3276 DISAS_INSN(exg_da)
3278 /* exchange Dx and Ay */
3279 do_exg(DREG(insn, 9), AREG(insn, 0));
3282 DISAS_INSN(and)
3284 TCGv src;
3285 TCGv reg;
3286 TCGv dest;
3287 TCGv addr;
3288 int opsize;
3290 dest = tcg_temp_new();
3292 opsize = insn_opsize(insn);
3293 reg = DREG(insn, 9);
3294 if (insn & 0x100) {
3295 SRC_EA(env, src, opsize, 0, &addr);
3296 tcg_gen_and_i32(dest, src, reg);
3297 DEST_EA(env, insn, opsize, dest, &addr);
3298 } else {
3299 SRC_EA(env, src, opsize, 0, NULL);
3300 tcg_gen_and_i32(dest, src, reg);
3301 gen_partset_reg(opsize, reg, dest);
3303 gen_logic_cc(s, dest, opsize);
3304 tcg_temp_free(dest);
3307 DISAS_INSN(adda)
3309 TCGv src;
3310 TCGv reg;
3312 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3313 reg = AREG(insn, 9);
3314 tcg_gen_add_i32(reg, reg, src);
3317 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3319 TCGv tmp;
3321 gen_flush_flags(s); /* compute old Z */
3323 /* Perform addition with carry.
3324 * (X, N) = src + dest + X;
3327 tmp = tcg_const_i32(0);
3328 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3329 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3330 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3332 /* Compute signed-overflow for addition. */
3334 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3335 tcg_gen_xor_i32(tmp, dest, src);
3336 tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3337 tcg_temp_free(tmp);
3339 /* Copy the rest of the results into place. */
3340 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3341 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3343 set_cc_op(s, CC_OP_FLAGS);
3345 /* result is in QREG_CC_N */
3348 DISAS_INSN(addx_reg)
3350 TCGv dest;
3351 TCGv src;
3352 int opsize;
3354 opsize = insn_opsize(insn);
3356 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3357 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3359 gen_addx(s, src, dest, opsize);
3361 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3364 DISAS_INSN(addx_mem)
3366 TCGv src;
3367 TCGv addr_src;
3368 TCGv dest;
3369 TCGv addr_dest;
3370 int opsize;
3372 opsize = insn_opsize(insn);
3374 addr_src = AREG(insn, 0);
3375 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3376 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3378 addr_dest = AREG(insn, 9);
3379 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3380 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3382 gen_addx(s, src, dest, opsize);
3384 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3386 tcg_temp_free(dest);
3387 tcg_temp_free(src);
3390 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3392 int count = (insn >> 9) & 7;
3393 int logical = insn & 8;
3394 int left = insn & 0x100;
3395 int bits = opsize_bytes(opsize) * 8;
3396 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3398 if (count == 0) {
3399 count = 8;
3402 tcg_gen_movi_i32(QREG_CC_V, 0);
3403 if (left) {
3404 tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3405 tcg_gen_shli_i32(QREG_CC_N, reg, count);
3407 /* Note that ColdFire always clears V (done above),
3408 while M68000 sets if the most significant bit is changed at
3409 any time during the shift operation */
3410 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3411 /* if shift count >= bits, V is (reg != 0) */
3412 if (count >= bits) {
3413 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3414 } else {
3415 TCGv t0 = tcg_temp_new();
3416 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3417 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3418 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3419 tcg_temp_free(t0);
3421 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3423 } else {
3424 tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3425 if (logical) {
3426 tcg_gen_shri_i32(QREG_CC_N, reg, count);
3427 } else {
3428 tcg_gen_sari_i32(QREG_CC_N, reg, count);
3432 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3433 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3434 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3435 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3437 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3438 set_cc_op(s, CC_OP_FLAGS);
3441 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3443 int logical = insn & 8;
3444 int left = insn & 0x100;
3445 int bits = opsize_bytes(opsize) * 8;
3446 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3447 TCGv s32;
3448 TCGv_i64 t64, s64;
3450 t64 = tcg_temp_new_i64();
3451 s64 = tcg_temp_new_i64();
3452 s32 = tcg_temp_new();
3454 /* Note that m68k truncates the shift count modulo 64, not 32.
3455 In addition, a 64-bit shift makes it easy to find "the last
3456 bit shifted out", for the carry flag. */
3457 tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3458 tcg_gen_extu_i32_i64(s64, s32);
3459 tcg_gen_extu_i32_i64(t64, reg);
3461 /* Optimistically set V=0. Also used as a zero source below. */
3462 tcg_gen_movi_i32(QREG_CC_V, 0);
3463 if (left) {
3464 tcg_gen_shl_i64(t64, t64, s64);
3466 if (opsize == OS_LONG) {
3467 tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3468 /* Note that C=0 if shift count is 0, and we get that for free. */
3469 } else {
3470 TCGv zero = tcg_const_i32(0);
3471 tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3472 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3473 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3474 s32, zero, zero, QREG_CC_C);
3475 tcg_temp_free(zero);
3477 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3479 /* X = C, but only if the shift count was non-zero. */
3480 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3481 QREG_CC_C, QREG_CC_X);
3483 /* M68000 sets V if the most significant bit is changed at
3484 * any time during the shift operation. Do this via creating
3485 * an extension of the sign bit, comparing, and discarding
3486 * the bits below the sign bit. I.e.
3487 * int64_t s = (intN_t)reg;
3488 * int64_t t = (int64_t)(intN_t)reg << count;
3489 * V = ((s ^ t) & (-1 << (bits - 1))) != 0
3491 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3492 TCGv_i64 tt = tcg_const_i64(32);
3493 /* if shift is greater than 32, use 32 */
3494 tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3495 tcg_temp_free_i64(tt);
3496 /* Sign extend the input to 64 bits; re-do the shift. */
3497 tcg_gen_ext_i32_i64(t64, reg);
3498 tcg_gen_shl_i64(s64, t64, s64);
3499 /* Clear all bits that are unchanged. */
3500 tcg_gen_xor_i64(t64, t64, s64);
3501 /* Ignore the bits below the sign bit. */
3502 tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3503 /* If any bits remain set, we have overflow. */
3504 tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3505 tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3506 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3508 } else {
3509 tcg_gen_shli_i64(t64, t64, 32);
3510 if (logical) {
3511 tcg_gen_shr_i64(t64, t64, s64);
3512 } else {
3513 tcg_gen_sar_i64(t64, t64, s64);
3515 tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3517 /* Note that C=0 if shift count is 0, and we get that for free. */
3518 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3520 /* X = C, but only if the shift count was non-zero. */
3521 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3522 QREG_CC_C, QREG_CC_X);
3524 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3525 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3527 tcg_temp_free(s32);
3528 tcg_temp_free_i64(s64);
3529 tcg_temp_free_i64(t64);
3531 /* Write back the result. */
3532 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3533 set_cc_op(s, CC_OP_FLAGS);
3536 DISAS_INSN(shift8_im)
3538 shift_im(s, insn, OS_BYTE);
3541 DISAS_INSN(shift16_im)
3543 shift_im(s, insn, OS_WORD);
3546 DISAS_INSN(shift_im)
3548 shift_im(s, insn, OS_LONG);
3551 DISAS_INSN(shift8_reg)
3553 shift_reg(s, insn, OS_BYTE);
3556 DISAS_INSN(shift16_reg)
3558 shift_reg(s, insn, OS_WORD);
3561 DISAS_INSN(shift_reg)
3563 shift_reg(s, insn, OS_LONG);
3566 DISAS_INSN(shift_mem)
3568 int logical = insn & 8;
3569 int left = insn & 0x100;
3570 TCGv src;
3571 TCGv addr;
3573 SRC_EA(env, src, OS_WORD, !logical, &addr);
3574 tcg_gen_movi_i32(QREG_CC_V, 0);
3575 if (left) {
3576 tcg_gen_shri_i32(QREG_CC_C, src, 15);
3577 tcg_gen_shli_i32(QREG_CC_N, src, 1);
3579 /* Note that ColdFire always clears V,
3580 while M68000 sets if the most significant bit is changed at
3581 any time during the shift operation */
3582 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3583 src = gen_extend(s, src, OS_WORD, 1);
3584 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3586 } else {
3587 tcg_gen_mov_i32(QREG_CC_C, src);
3588 if (logical) {
3589 tcg_gen_shri_i32(QREG_CC_N, src, 1);
3590 } else {
3591 tcg_gen_sari_i32(QREG_CC_N, src, 1);
3595 gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3596 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3597 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3598 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3600 DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3601 set_cc_op(s, CC_OP_FLAGS);
3604 static void rotate(TCGv reg, TCGv shift, int left, int size)
3606 switch (size) {
3607 case 8:
3608 /* Replicate the 8-bit input so that a 32-bit rotate works. */
3609 tcg_gen_ext8u_i32(reg, reg);
3610 tcg_gen_muli_i32(reg, reg, 0x01010101);
3611 goto do_long;
3612 case 16:
3613 /* Replicate the 16-bit input so that a 32-bit rotate works. */
3614 tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3615 goto do_long;
3616 do_long:
3617 default:
3618 if (left) {
3619 tcg_gen_rotl_i32(reg, reg, shift);
3620 } else {
3621 tcg_gen_rotr_i32(reg, reg, shift);
3625 /* compute flags */
3627 switch (size) {
3628 case 8:
3629 tcg_gen_ext8s_i32(reg, reg);
3630 break;
3631 case 16:
3632 tcg_gen_ext16s_i32(reg, reg);
3633 break;
3634 default:
3635 break;
3638 /* QREG_CC_X is not affected */
3640 tcg_gen_mov_i32(QREG_CC_N, reg);
3641 tcg_gen_mov_i32(QREG_CC_Z, reg);
3643 if (left) {
3644 tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3645 } else {
3646 tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3649 tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3652 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3654 switch (size) {
3655 case 8:
3656 tcg_gen_ext8s_i32(reg, reg);
3657 break;
3658 case 16:
3659 tcg_gen_ext16s_i32(reg, reg);
3660 break;
3661 default:
3662 break;
3664 tcg_gen_mov_i32(QREG_CC_N, reg);
3665 tcg_gen_mov_i32(QREG_CC_Z, reg);
3666 tcg_gen_mov_i32(QREG_CC_X, X);
3667 tcg_gen_mov_i32(QREG_CC_C, X);
3668 tcg_gen_movi_i32(QREG_CC_V, 0);
3671 /* Result of rotate_x() is valid if 0 <= shift <= size */
3672 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3674 TCGv X, shl, shr, shx, sz, zero;
3676 sz = tcg_const_i32(size);
3678 shr = tcg_temp_new();
3679 shl = tcg_temp_new();
3680 shx = tcg_temp_new();
3681 if (left) {
3682 tcg_gen_mov_i32(shl, shift); /* shl = shift */
3683 tcg_gen_movi_i32(shr, size + 1);
3684 tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3685 tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
3686 /* shx = shx < 0 ? size : shx; */
3687 zero = tcg_const_i32(0);
3688 tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3689 tcg_temp_free(zero);
3690 } else {
3691 tcg_gen_mov_i32(shr, shift); /* shr = shift */
3692 tcg_gen_movi_i32(shl, size + 1);
3693 tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3694 tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3696 tcg_temp_free_i32(sz);
3698 /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3700 tcg_gen_shl_i32(shl, reg, shl);
3701 tcg_gen_shr_i32(shr, reg, shr);
3702 tcg_gen_or_i32(reg, shl, shr);
3703 tcg_temp_free(shl);
3704 tcg_temp_free(shr);
3705 tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3706 tcg_gen_or_i32(reg, reg, shx);
3707 tcg_temp_free(shx);
3709 /* X = (reg >> size) & 1 */
3711 X = tcg_temp_new();
3712 tcg_gen_extract_i32(X, reg, size, 1);
3714 return X;
3717 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3718 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3720 TCGv_i64 t0, shift64;
3721 TCGv X, lo, hi, zero;
3723 shift64 = tcg_temp_new_i64();
3724 tcg_gen_extu_i32_i64(shift64, shift);
3726 t0 = tcg_temp_new_i64();
3728 X = tcg_temp_new();
3729 lo = tcg_temp_new();
3730 hi = tcg_temp_new();
3732 if (left) {
3733 /* create [reg:X:..] */
3735 tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3736 tcg_gen_concat_i32_i64(t0, lo, reg);
3738 /* rotate */
3740 tcg_gen_rotl_i64(t0, t0, shift64);
3741 tcg_temp_free_i64(shift64);
3743 /* result is [reg:..:reg:X] */
3745 tcg_gen_extr_i64_i32(lo, hi, t0);
3746 tcg_gen_andi_i32(X, lo, 1);
3748 tcg_gen_shri_i32(lo, lo, 1);
3749 } else {
3750 /* create [..:X:reg] */
3752 tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3754 tcg_gen_rotr_i64(t0, t0, shift64);
3755 tcg_temp_free_i64(shift64);
3757 /* result is value: [X:reg:..:reg] */
3759 tcg_gen_extr_i64_i32(lo, hi, t0);
3761 /* extract X */
3763 tcg_gen_shri_i32(X, hi, 31);
3765 /* extract result */
3767 tcg_gen_shli_i32(hi, hi, 1);
3769 tcg_temp_free_i64(t0);
3770 tcg_gen_or_i32(lo, lo, hi);
3771 tcg_temp_free(hi);
3773 /* if shift == 0, register and X are not affected */
3775 zero = tcg_const_i32(0);
3776 tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3777 tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3778 tcg_temp_free(zero);
3779 tcg_temp_free(lo);
3781 return X;
3784 DISAS_INSN(rotate_im)
3786 TCGv shift;
3787 int tmp;
3788 int left = (insn & 0x100);
3790 tmp = (insn >> 9) & 7;
3791 if (tmp == 0) {
3792 tmp = 8;
3795 shift = tcg_const_i32(tmp);
3796 if (insn & 8) {
3797 rotate(DREG(insn, 0), shift, left, 32);
3798 } else {
3799 TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3800 rotate_x_flags(DREG(insn, 0), X, 32);
3801 tcg_temp_free(X);
3803 tcg_temp_free(shift);
3805 set_cc_op(s, CC_OP_FLAGS);
3808 DISAS_INSN(rotate8_im)
3810 int left = (insn & 0x100);
3811 TCGv reg;
3812 TCGv shift;
3813 int tmp;
3815 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3817 tmp = (insn >> 9) & 7;
3818 if (tmp == 0) {
3819 tmp = 8;
3822 shift = tcg_const_i32(tmp);
3823 if (insn & 8) {
3824 rotate(reg, shift, left, 8);
3825 } else {
3826 TCGv X = rotate_x(reg, shift, left, 8);
3827 rotate_x_flags(reg, X, 8);
3828 tcg_temp_free(X);
3830 tcg_temp_free(shift);
3831 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3832 set_cc_op(s, CC_OP_FLAGS);
3835 DISAS_INSN(rotate16_im)
3837 int left = (insn & 0x100);
3838 TCGv reg;
3839 TCGv shift;
3840 int tmp;
3842 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3843 tmp = (insn >> 9) & 7;
3844 if (tmp == 0) {
3845 tmp = 8;
3848 shift = tcg_const_i32(tmp);
3849 if (insn & 8) {
3850 rotate(reg, shift, left, 16);
3851 } else {
3852 TCGv X = rotate_x(reg, shift, left, 16);
3853 rotate_x_flags(reg, X, 16);
3854 tcg_temp_free(X);
3856 tcg_temp_free(shift);
3857 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3858 set_cc_op(s, CC_OP_FLAGS);
3861 DISAS_INSN(rotate_reg)
3863 TCGv reg;
3864 TCGv src;
3865 TCGv t0, t1;
3866 int left = (insn & 0x100);
3868 reg = DREG(insn, 0);
3869 src = DREG(insn, 9);
3870 /* shift in [0..63] */
3871 t0 = tcg_temp_new();
3872 tcg_gen_andi_i32(t0, src, 63);
3873 t1 = tcg_temp_new_i32();
3874 if (insn & 8) {
3875 tcg_gen_andi_i32(t1, src, 31);
3876 rotate(reg, t1, left, 32);
3877 /* if shift == 0, clear C */
3878 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3879 t0, QREG_CC_V /* 0 */,
3880 QREG_CC_V /* 0 */, QREG_CC_C);
3881 } else {
3882 TCGv X;
3883 /* modulo 33 */
3884 tcg_gen_movi_i32(t1, 33);
3885 tcg_gen_remu_i32(t1, t0, t1);
3886 X = rotate32_x(DREG(insn, 0), t1, left);
3887 rotate_x_flags(DREG(insn, 0), X, 32);
3888 tcg_temp_free(X);
3890 tcg_temp_free(t1);
3891 tcg_temp_free(t0);
3892 set_cc_op(s, CC_OP_FLAGS);
3895 DISAS_INSN(rotate8_reg)
3897 TCGv reg;
3898 TCGv src;
3899 TCGv t0, t1;
3900 int left = (insn & 0x100);
3902 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3903 src = DREG(insn, 9);
3904 /* shift in [0..63] */
3905 t0 = tcg_temp_new_i32();
3906 tcg_gen_andi_i32(t0, src, 63);
3907 t1 = tcg_temp_new_i32();
3908 if (insn & 8) {
3909 tcg_gen_andi_i32(t1, src, 7);
3910 rotate(reg, t1, left, 8);
3911 /* if shift == 0, clear C */
3912 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3913 t0, QREG_CC_V /* 0 */,
3914 QREG_CC_V /* 0 */, QREG_CC_C);
3915 } else {
3916 TCGv X;
3917 /* modulo 9 */
3918 tcg_gen_movi_i32(t1, 9);
3919 tcg_gen_remu_i32(t1, t0, t1);
3920 X = rotate_x(reg, t1, left, 8);
3921 rotate_x_flags(reg, X, 8);
3922 tcg_temp_free(X);
3924 tcg_temp_free(t1);
3925 tcg_temp_free(t0);
3926 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3927 set_cc_op(s, CC_OP_FLAGS);
3930 DISAS_INSN(rotate16_reg)
3932 TCGv reg;
3933 TCGv src;
3934 TCGv t0, t1;
3935 int left = (insn & 0x100);
3937 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3938 src = DREG(insn, 9);
3939 /* shift in [0..63] */
3940 t0 = tcg_temp_new_i32();
3941 tcg_gen_andi_i32(t0, src, 63);
3942 t1 = tcg_temp_new_i32();
3943 if (insn & 8) {
3944 tcg_gen_andi_i32(t1, src, 15);
3945 rotate(reg, t1, left, 16);
3946 /* if shift == 0, clear C */
3947 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3948 t0, QREG_CC_V /* 0 */,
3949 QREG_CC_V /* 0 */, QREG_CC_C);
3950 } else {
3951 TCGv X;
3952 /* modulo 17 */
3953 tcg_gen_movi_i32(t1, 17);
3954 tcg_gen_remu_i32(t1, t0, t1);
3955 X = rotate_x(reg, t1, left, 16);
3956 rotate_x_flags(reg, X, 16);
3957 tcg_temp_free(X);
3959 tcg_temp_free(t1);
3960 tcg_temp_free(t0);
3961 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3962 set_cc_op(s, CC_OP_FLAGS);
3965 DISAS_INSN(rotate_mem)
3967 TCGv src;
3968 TCGv addr;
3969 TCGv shift;
3970 int left = (insn & 0x100);
3972 SRC_EA(env, src, OS_WORD, 0, &addr);
3974 shift = tcg_const_i32(1);
3975 if (insn & 0x0200) {
3976 rotate(src, shift, left, 16);
3977 } else {
3978 TCGv X = rotate_x(src, shift, left, 16);
3979 rotate_x_flags(src, X, 16);
3980 tcg_temp_free(X);
3982 tcg_temp_free(shift);
3983 DEST_EA(env, insn, OS_WORD, src, &addr);
3984 set_cc_op(s, CC_OP_FLAGS);
3987 DISAS_INSN(bfext_reg)
3989 int ext = read_im16(env, s);
3990 int is_sign = insn & 0x200;
3991 TCGv src = DREG(insn, 0);
3992 TCGv dst = DREG(ext, 12);
3993 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3994 int ofs = extract32(ext, 6, 5); /* big bit-endian */
3995 int pos = 32 - ofs - len; /* little bit-endian */
3996 TCGv tmp = tcg_temp_new();
3997 TCGv shift;
3999 /* In general, we're going to rotate the field so that it's at the
4000 top of the word and then right-shift by the complement of the
4001 width to extend the field. */
4002 if (ext & 0x20) {
4003 /* Variable width. */
4004 if (ext & 0x800) {
4005 /* Variable offset. */
4006 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4007 tcg_gen_rotl_i32(tmp, src, tmp);
4008 } else {
4009 tcg_gen_rotli_i32(tmp, src, ofs);
4012 shift = tcg_temp_new();
4013 tcg_gen_neg_i32(shift, DREG(ext, 0));
4014 tcg_gen_andi_i32(shift, shift, 31);
4015 tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
4016 if (is_sign) {
4017 tcg_gen_mov_i32(dst, QREG_CC_N);
4018 } else {
4019 tcg_gen_shr_i32(dst, tmp, shift);
4021 tcg_temp_free(shift);
4022 } else {
4023 /* Immediate width. */
4024 if (ext & 0x800) {
4025 /* Variable offset */
4026 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4027 tcg_gen_rotl_i32(tmp, src, tmp);
4028 src = tmp;
4029 pos = 32 - len;
4030 } else {
4031 /* Immediate offset. If the field doesn't wrap around the
4032 end of the word, rely on (s)extract completely. */
4033 if (pos < 0) {
4034 tcg_gen_rotli_i32(tmp, src, ofs);
4035 src = tmp;
4036 pos = 32 - len;
4040 tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
4041 if (is_sign) {
4042 tcg_gen_mov_i32(dst, QREG_CC_N);
4043 } else {
4044 tcg_gen_extract_i32(dst, src, pos, len);
4048 tcg_temp_free(tmp);
4049 set_cc_op(s, CC_OP_LOGIC);
4052 DISAS_INSN(bfext_mem)
4054 int ext = read_im16(env, s);
4055 int is_sign = insn & 0x200;
4056 TCGv dest = DREG(ext, 12);
4057 TCGv addr, len, ofs;
4059 addr = gen_lea(env, s, insn, OS_UNSIZED);
4060 if (IS_NULL_QREG(addr)) {
4061 gen_addr_fault(s);
4062 return;
4065 if (ext & 0x20) {
4066 len = DREG(ext, 0);
4067 } else {
4068 len = tcg_const_i32(extract32(ext, 0, 5));
4070 if (ext & 0x800) {
4071 ofs = DREG(ext, 6);
4072 } else {
4073 ofs = tcg_const_i32(extract32(ext, 6, 5));
4076 if (is_sign) {
4077 gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
4078 tcg_gen_mov_i32(QREG_CC_N, dest);
4079 } else {
4080 TCGv_i64 tmp = tcg_temp_new_i64();
4081 gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
4082 tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4083 tcg_temp_free_i64(tmp);
4085 set_cc_op(s, CC_OP_LOGIC);
4087 if (!(ext & 0x20)) {
4088 tcg_temp_free(len);
4090 if (!(ext & 0x800)) {
4091 tcg_temp_free(ofs);
4095 DISAS_INSN(bfop_reg)
4097 int ext = read_im16(env, s);
4098 TCGv src = DREG(insn, 0);
4099 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4100 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4101 TCGv mask, tofs, tlen;
4103 tofs = NULL;
4104 tlen = NULL;
4105 if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
4106 tofs = tcg_temp_new();
4107 tlen = tcg_temp_new();
4110 if ((ext & 0x820) == 0) {
4111 /* Immediate width and offset. */
4112 uint32_t maski = 0x7fffffffu >> (len - 1);
4113 if (ofs + len <= 32) {
4114 tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4115 } else {
4116 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4118 tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4119 mask = tcg_const_i32(ror32(maski, ofs));
4120 if (tofs) {
4121 tcg_gen_movi_i32(tofs, ofs);
4122 tcg_gen_movi_i32(tlen, len);
4124 } else {
4125 TCGv tmp = tcg_temp_new();
4126 if (ext & 0x20) {
4127 /* Variable width */
4128 tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4129 tcg_gen_andi_i32(tmp, tmp, 31);
4130 mask = tcg_const_i32(0x7fffffffu);
4131 tcg_gen_shr_i32(mask, mask, tmp);
4132 if (tlen) {
4133 tcg_gen_addi_i32(tlen, tmp, 1);
4135 } else {
4136 /* Immediate width */
4137 mask = tcg_const_i32(0x7fffffffu >> (len - 1));
4138 if (tlen) {
4139 tcg_gen_movi_i32(tlen, len);
4142 if (ext & 0x800) {
4143 /* Variable offset */
4144 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4145 tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4146 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4147 tcg_gen_rotr_i32(mask, mask, tmp);
4148 if (tofs) {
4149 tcg_gen_mov_i32(tofs, tmp);
4151 } else {
4152 /* Immediate offset (and variable width) */
4153 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4154 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4155 tcg_gen_rotri_i32(mask, mask, ofs);
4156 if (tofs) {
4157 tcg_gen_movi_i32(tofs, ofs);
4160 tcg_temp_free(tmp);
4162 set_cc_op(s, CC_OP_LOGIC);
4164 switch (insn & 0x0f00) {
4165 case 0x0a00: /* bfchg */
4166 tcg_gen_eqv_i32(src, src, mask);
4167 break;
4168 case 0x0c00: /* bfclr */
4169 tcg_gen_and_i32(src, src, mask);
4170 break;
4171 case 0x0d00: /* bfffo */
4172 gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4173 tcg_temp_free(tlen);
4174 tcg_temp_free(tofs);
4175 break;
4176 case 0x0e00: /* bfset */
4177 tcg_gen_orc_i32(src, src, mask);
4178 break;
4179 case 0x0800: /* bftst */
4180 /* flags already set; no other work to do. */
4181 break;
4182 default:
4183 g_assert_not_reached();
4185 tcg_temp_free(mask);
4188 DISAS_INSN(bfop_mem)
4190 int ext = read_im16(env, s);
4191 TCGv addr, len, ofs;
4192 TCGv_i64 t64;
4194 addr = gen_lea(env, s, insn, OS_UNSIZED);
4195 if (IS_NULL_QREG(addr)) {
4196 gen_addr_fault(s);
4197 return;
4200 if (ext & 0x20) {
4201 len = DREG(ext, 0);
4202 } else {
4203 len = tcg_const_i32(extract32(ext, 0, 5));
4205 if (ext & 0x800) {
4206 ofs = DREG(ext, 6);
4207 } else {
4208 ofs = tcg_const_i32(extract32(ext, 6, 5));
4211 switch (insn & 0x0f00) {
4212 case 0x0a00: /* bfchg */
4213 gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4214 break;
4215 case 0x0c00: /* bfclr */
4216 gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4217 break;
4218 case 0x0d00: /* bfffo */
4219 t64 = tcg_temp_new_i64();
4220 gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4221 tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4222 tcg_temp_free_i64(t64);
4223 break;
4224 case 0x0e00: /* bfset */
4225 gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4226 break;
4227 case 0x0800: /* bftst */
4228 gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4229 break;
4230 default:
4231 g_assert_not_reached();
4233 set_cc_op(s, CC_OP_LOGIC);
4235 if (!(ext & 0x20)) {
4236 tcg_temp_free(len);
4238 if (!(ext & 0x800)) {
4239 tcg_temp_free(ofs);
4243 DISAS_INSN(bfins_reg)
4245 int ext = read_im16(env, s);
4246 TCGv dst = DREG(insn, 0);
4247 TCGv src = DREG(ext, 12);
4248 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4249 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4250 int pos = 32 - ofs - len; /* little bit-endian */
4251 TCGv tmp;
4253 tmp = tcg_temp_new();
4255 if (ext & 0x20) {
4256 /* Variable width */
4257 tcg_gen_neg_i32(tmp, DREG(ext, 0));
4258 tcg_gen_andi_i32(tmp, tmp, 31);
4259 tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4260 } else {
4261 /* Immediate width */
4262 tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4264 set_cc_op(s, CC_OP_LOGIC);
4266 /* Immediate width and offset */
4267 if ((ext & 0x820) == 0) {
4268 /* Check for suitability for deposit. */
4269 if (pos >= 0) {
4270 tcg_gen_deposit_i32(dst, dst, src, pos, len);
4271 } else {
4272 uint32_t maski = -2U << (len - 1);
4273 uint32_t roti = (ofs + len) & 31;
4274 tcg_gen_andi_i32(tmp, src, ~maski);
4275 tcg_gen_rotri_i32(tmp, tmp, roti);
4276 tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4277 tcg_gen_or_i32(dst, dst, tmp);
4279 } else {
4280 TCGv mask = tcg_temp_new();
4281 TCGv rot = tcg_temp_new();
4283 if (ext & 0x20) {
4284 /* Variable width */
4285 tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4286 tcg_gen_andi_i32(rot, rot, 31);
4287 tcg_gen_movi_i32(mask, -2);
4288 tcg_gen_shl_i32(mask, mask, rot);
4289 tcg_gen_mov_i32(rot, DREG(ext, 0));
4290 tcg_gen_andc_i32(tmp, src, mask);
4291 } else {
4292 /* Immediate width (variable offset) */
4293 uint32_t maski = -2U << (len - 1);
4294 tcg_gen_andi_i32(tmp, src, ~maski);
4295 tcg_gen_movi_i32(mask, maski);
4296 tcg_gen_movi_i32(rot, len & 31);
4298 if (ext & 0x800) {
4299 /* Variable offset */
4300 tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4301 } else {
4302 /* Immediate offset (variable width) */
4303 tcg_gen_addi_i32(rot, rot, ofs);
4305 tcg_gen_andi_i32(rot, rot, 31);
4306 tcg_gen_rotr_i32(mask, mask, rot);
4307 tcg_gen_rotr_i32(tmp, tmp, rot);
4308 tcg_gen_and_i32(dst, dst, mask);
4309 tcg_gen_or_i32(dst, dst, tmp);
4311 tcg_temp_free(rot);
4312 tcg_temp_free(mask);
4314 tcg_temp_free(tmp);
4317 DISAS_INSN(bfins_mem)
4319 int ext = read_im16(env, s);
4320 TCGv src = DREG(ext, 12);
4321 TCGv addr, len, ofs;
4323 addr = gen_lea(env, s, insn, OS_UNSIZED);
4324 if (IS_NULL_QREG(addr)) {
4325 gen_addr_fault(s);
4326 return;
4329 if (ext & 0x20) {
4330 len = DREG(ext, 0);
4331 } else {
4332 len = tcg_const_i32(extract32(ext, 0, 5));
4334 if (ext & 0x800) {
4335 ofs = DREG(ext, 6);
4336 } else {
4337 ofs = tcg_const_i32(extract32(ext, 6, 5));
4340 gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4341 set_cc_op(s, CC_OP_LOGIC);
4343 if (!(ext & 0x20)) {
4344 tcg_temp_free(len);
4346 if (!(ext & 0x800)) {
4347 tcg_temp_free(ofs);
4351 DISAS_INSN(ff1)
4353 TCGv reg;
4354 reg = DREG(insn, 0);
4355 gen_logic_cc(s, reg, OS_LONG);
4356 gen_helper_ff1(reg, reg);
4359 DISAS_INSN(chk)
4361 TCGv src, reg;
4362 int opsize;
4364 switch ((insn >> 7) & 3) {
4365 case 3:
4366 opsize = OS_WORD;
4367 break;
4368 case 2:
4369 if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4370 opsize = OS_LONG;
4371 break;
4373 /* fallthru */
4374 default:
4375 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4376 return;
4378 SRC_EA(env, src, opsize, 1, NULL);
4379 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4381 gen_flush_flags(s);
4382 gen_helper_chk(cpu_env, reg, src);
4385 DISAS_INSN(chk2)
4387 uint16_t ext;
4388 TCGv addr1, addr2, bound1, bound2, reg;
4389 int opsize;
4391 switch ((insn >> 9) & 3) {
4392 case 0:
4393 opsize = OS_BYTE;
4394 break;
4395 case 1:
4396 opsize = OS_WORD;
4397 break;
4398 case 2:
4399 opsize = OS_LONG;
4400 break;
4401 default:
4402 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4403 return;
4406 ext = read_im16(env, s);
4407 if ((ext & 0x0800) == 0) {
4408 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4409 return;
4412 addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4413 addr2 = tcg_temp_new();
4414 tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4416 bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4417 tcg_temp_free(addr1);
4418 bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4419 tcg_temp_free(addr2);
4421 reg = tcg_temp_new();
4422 if (ext & 0x8000) {
4423 tcg_gen_mov_i32(reg, AREG(ext, 12));
4424 } else {
4425 gen_ext(reg, DREG(ext, 12), opsize, 1);
4428 gen_flush_flags(s);
4429 gen_helper_chk2(cpu_env, reg, bound1, bound2);
4430 tcg_temp_free(reg);
4431 tcg_temp_free(bound1);
4432 tcg_temp_free(bound2);
4435 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4437 TCGv addr;
4438 TCGv_i64 t0, t1;
4440 addr = tcg_temp_new();
4442 t0 = tcg_temp_new_i64();
4443 t1 = tcg_temp_new_i64();
4445 tcg_gen_andi_i32(addr, src, ~15);
4446 tcg_gen_qemu_ld64(t0, addr, index);
4447 tcg_gen_addi_i32(addr, addr, 8);
4448 tcg_gen_qemu_ld64(t1, addr, index);
4450 tcg_gen_andi_i32(addr, dst, ~15);
4451 tcg_gen_qemu_st64(t0, addr, index);
4452 tcg_gen_addi_i32(addr, addr, 8);
4453 tcg_gen_qemu_st64(t1, addr, index);
4455 tcg_temp_free_i64(t0);
4456 tcg_temp_free_i64(t1);
4457 tcg_temp_free(addr);
4460 DISAS_INSN(move16_reg)
4462 int index = IS_USER(s);
4463 TCGv tmp;
4464 uint16_t ext;
4466 ext = read_im16(env, s);
4467 if ((ext & (1 << 15)) == 0) {
4468 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4471 m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4473 /* Ax can be Ay, so save Ay before incrementing Ax */
4474 tmp = tcg_temp_new();
4475 tcg_gen_mov_i32(tmp, AREG(ext, 12));
4476 tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4477 tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4478 tcg_temp_free(tmp);
4481 DISAS_INSN(move16_mem)
4483 int index = IS_USER(s);
4484 TCGv reg, addr;
4486 reg = AREG(insn, 0);
4487 addr = tcg_const_i32(read_im32(env, s));
4489 if ((insn >> 3) & 1) {
4490 /* MOVE16 (xxx).L, (Ay) */
4491 m68k_copy_line(reg, addr, index);
4492 } else {
4493 /* MOVE16 (Ay), (xxx).L */
4494 m68k_copy_line(addr, reg, index);
4497 tcg_temp_free(addr);
4499 if (((insn >> 3) & 2) == 0) {
4500 /* (Ay)+ */
4501 tcg_gen_addi_i32(reg, reg, 16);
4505 DISAS_INSN(strldsr)
4507 uint16_t ext;
4508 uint32_t addr;
4510 addr = s->pc - 2;
4511 ext = read_im16(env, s);
4512 if (ext != 0x46FC) {
4513 gen_exception(s, addr, EXCP_ILLEGAL);
4514 return;
4516 ext = read_im16(env, s);
4517 if (IS_USER(s) || (ext & SR_S) == 0) {
4518 gen_exception(s, addr, EXCP_PRIVILEGE);
4519 return;
4521 gen_push(s, gen_get_sr(s));
4522 gen_set_sr_im(s, ext, 0);
4525 DISAS_INSN(move_from_sr)
4527 TCGv sr;
4529 if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4530 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4531 return;
4533 sr = gen_get_sr(s);
4534 DEST_EA(env, insn, OS_WORD, sr, NULL);
4537 #if defined(CONFIG_SOFTMMU)
4538 DISAS_INSN(moves)
4540 int opsize;
4541 uint16_t ext;
4542 TCGv reg;
4543 TCGv addr;
4544 int extend;
4546 if (IS_USER(s)) {
4547 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4548 return;
4551 ext = read_im16(env, s);
4553 opsize = insn_opsize(insn);
4555 if (ext & 0x8000) {
4556 /* address register */
4557 reg = AREG(ext, 12);
4558 extend = 1;
4559 } else {
4560 /* data register */
4561 reg = DREG(ext, 12);
4562 extend = 0;
4565 addr = gen_lea(env, s, insn, opsize);
4566 if (IS_NULL_QREG(addr)) {
4567 gen_addr_fault(s);
4568 return;
4571 if (ext & 0x0800) {
4572 /* from reg to ea */
4573 gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4574 } else {
4575 /* from ea to reg */
4576 TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4577 if (extend) {
4578 gen_ext(reg, tmp, opsize, 1);
4579 } else {
4580 gen_partset_reg(opsize, reg, tmp);
4582 tcg_temp_free(tmp);
4584 switch (extract32(insn, 3, 3)) {
4585 case 3: /* Indirect postincrement. */
4586 tcg_gen_addi_i32(AREG(insn, 0), addr,
4587 REG(insn, 0) == 7 && opsize == OS_BYTE
4589 : opsize_bytes(opsize));
4590 break;
4591 case 4: /* Indirect predecrememnt. */
4592 tcg_gen_mov_i32(AREG(insn, 0), addr);
4593 break;
4597 DISAS_INSN(move_to_sr)
4599 if (IS_USER(s)) {
4600 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4601 return;
4603 gen_move_to_sr(env, s, insn, false);
4604 gen_exit_tb(s);
4607 DISAS_INSN(move_from_usp)
4609 if (IS_USER(s)) {
4610 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4611 return;
4613 tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4614 offsetof(CPUM68KState, sp[M68K_USP]));
4617 DISAS_INSN(move_to_usp)
4619 if (IS_USER(s)) {
4620 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4621 return;
4623 tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4624 offsetof(CPUM68KState, sp[M68K_USP]));
4627 DISAS_INSN(halt)
4629 if (IS_USER(s)) {
4630 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4631 return;
4634 gen_exception(s, s->pc, EXCP_HALT_INSN);
4637 DISAS_INSN(stop)
4639 uint16_t ext;
4641 if (IS_USER(s)) {
4642 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4643 return;
4646 ext = read_im16(env, s);
4648 gen_set_sr_im(s, ext, 0);
4649 tcg_gen_movi_i32(cpu_halted, 1);
4650 gen_exception(s, s->pc, EXCP_HLT);
4653 DISAS_INSN(rte)
4655 if (IS_USER(s)) {
4656 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4657 return;
4659 gen_exception(s, s->base.pc_next, EXCP_RTE);
4662 DISAS_INSN(cf_movec)
4664 uint16_t ext;
4665 TCGv reg;
4667 if (IS_USER(s)) {
4668 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4669 return;
4672 ext = read_im16(env, s);
4674 if (ext & 0x8000) {
4675 reg = AREG(ext, 12);
4676 } else {
4677 reg = DREG(ext, 12);
4679 gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4680 gen_exit_tb(s);
4683 DISAS_INSN(m68k_movec)
4685 uint16_t ext;
4686 TCGv reg;
4688 if (IS_USER(s)) {
4689 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4690 return;
4693 ext = read_im16(env, s);
4695 if (ext & 0x8000) {
4696 reg = AREG(ext, 12);
4697 } else {
4698 reg = DREG(ext, 12);
4700 if (insn & 1) {
4701 gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4702 } else {
4703 gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
4705 gen_exit_tb(s);
4708 DISAS_INSN(intouch)
4710 if (IS_USER(s)) {
4711 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4712 return;
4714 /* ICache fetch. Implement as no-op. */
4717 DISAS_INSN(cpushl)
4719 if (IS_USER(s)) {
4720 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4721 return;
4723 /* Cache push/invalidate. Implement as no-op. */
4726 DISAS_INSN(cpush)
4728 if (IS_USER(s)) {
4729 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4730 return;
4732 /* Cache push/invalidate. Implement as no-op. */
4735 DISAS_INSN(cinv)
4737 if (IS_USER(s)) {
4738 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4739 return;
4741 /* Invalidate cache line. Implement as no-op. */
4744 #if defined(CONFIG_SOFTMMU)
4745 DISAS_INSN(pflush)
4747 TCGv opmode;
4749 if (IS_USER(s)) {
4750 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4751 return;
4754 opmode = tcg_const_i32((insn >> 3) & 3);
4755 gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4756 tcg_temp_free(opmode);
4759 DISAS_INSN(ptest)
4761 TCGv is_read;
4763 if (IS_USER(s)) {
4764 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4765 return;
4767 is_read = tcg_const_i32((insn >> 5) & 1);
4768 gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4769 tcg_temp_free(is_read);
4771 #endif
4773 DISAS_INSN(wddata)
4775 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4778 DISAS_INSN(wdebug)
4780 M68kCPU *cpu = m68k_env_get_cpu(env);
4782 if (IS_USER(s)) {
4783 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4784 return;
4786 /* TODO: Implement wdebug. */
4787 cpu_abort(CPU(cpu), "WDEBUG not implemented");
4789 #endif
4791 DISAS_INSN(trap)
4793 gen_exception(s, s->base.pc_next, EXCP_TRAP0 + (insn & 0xf));
4796 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4798 switch (reg) {
4799 case M68K_FPIAR:
4800 tcg_gen_movi_i32(res, 0);
4801 break;
4802 case M68K_FPSR:
4803 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4804 break;
4805 case M68K_FPCR:
4806 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4807 break;
4811 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4813 switch (reg) {
4814 case M68K_FPIAR:
4815 break;
4816 case M68K_FPSR:
4817 tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4818 break;
4819 case M68K_FPCR:
4820 gen_helper_set_fpcr(cpu_env, val);
4821 break;
4825 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4827 int index = IS_USER(s);
4828 TCGv tmp;
4830 tmp = tcg_temp_new();
4831 gen_load_fcr(s, tmp, reg);
4832 tcg_gen_qemu_st32(tmp, addr, index);
4833 tcg_temp_free(tmp);
4836 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4838 int index = IS_USER(s);
4839 TCGv tmp;
4841 tmp = tcg_temp_new();
4842 tcg_gen_qemu_ld32u(tmp, addr, index);
4843 gen_store_fcr(s, tmp, reg);
4844 tcg_temp_free(tmp);
4848 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4849 uint32_t insn, uint32_t ext)
4851 int mask = (ext >> 10) & 7;
4852 int is_write = (ext >> 13) & 1;
4853 int mode = extract32(insn, 3, 3);
4854 int i;
4855 TCGv addr, tmp;
4857 switch (mode) {
4858 case 0: /* Dn */
4859 if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4860 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4861 return;
4863 if (is_write) {
4864 gen_load_fcr(s, DREG(insn, 0), mask);
4865 } else {
4866 gen_store_fcr(s, DREG(insn, 0), mask);
4868 return;
4869 case 1: /* An, only with FPIAR */
4870 if (mask != M68K_FPIAR) {
4871 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4872 return;
4874 if (is_write) {
4875 gen_load_fcr(s, AREG(insn, 0), mask);
4876 } else {
4877 gen_store_fcr(s, AREG(insn, 0), mask);
4879 return;
4880 default:
4881 break;
4884 tmp = gen_lea(env, s, insn, OS_LONG);
4885 if (IS_NULL_QREG(tmp)) {
4886 gen_addr_fault(s);
4887 return;
4890 addr = tcg_temp_new();
4891 tcg_gen_mov_i32(addr, tmp);
4893 /* mask:
4895 * 0b100 Floating-Point Control Register
4896 * 0b010 Floating-Point Status Register
4897 * 0b001 Floating-Point Instruction Address Register
4901 if (is_write && mode == 4) {
4902 for (i = 2; i >= 0; i--, mask >>= 1) {
4903 if (mask & 1) {
4904 gen_qemu_store_fcr(s, addr, 1 << i);
4905 if (mask != 1) {
4906 tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4910 tcg_gen_mov_i32(AREG(insn, 0), addr);
4911 } else {
4912 for (i = 0; i < 3; i++, mask >>= 1) {
4913 if (mask & 1) {
4914 if (is_write) {
4915 gen_qemu_store_fcr(s, addr, 1 << i);
4916 } else {
4917 gen_qemu_load_fcr(s, addr, 1 << i);
4919 if (mask != 1 || mode == 3) {
4920 tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4924 if (mode == 3) {
4925 tcg_gen_mov_i32(AREG(insn, 0), addr);
4928 tcg_temp_free_i32(addr);
4931 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4932 uint32_t insn, uint32_t ext)
4934 int opsize;
4935 TCGv addr, tmp;
4936 int mode = (ext >> 11) & 0x3;
4937 int is_load = ((ext & 0x2000) == 0);
4939 if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4940 opsize = OS_EXTENDED;
4941 } else {
4942 opsize = OS_DOUBLE; /* FIXME */
4945 addr = gen_lea(env, s, insn, opsize);
4946 if (IS_NULL_QREG(addr)) {
4947 gen_addr_fault(s);
4948 return;
4951 tmp = tcg_temp_new();
4952 if (mode & 0x1) {
4953 /* Dynamic register list */
4954 tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4955 } else {
4956 /* Static register list */
4957 tcg_gen_movi_i32(tmp, ext & 0xff);
4960 if (!is_load && (mode & 2) == 0) {
4961 /* predecrement addressing mode
4962 * only available to store register to memory
4964 if (opsize == OS_EXTENDED) {
4965 gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4966 } else {
4967 gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4969 } else {
4970 /* postincrement addressing mode */
4971 if (opsize == OS_EXTENDED) {
4972 if (is_load) {
4973 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4974 } else {
4975 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4977 } else {
4978 if (is_load) {
4979 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4980 } else {
4981 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4985 if ((insn & 070) == 030 || (insn & 070) == 040) {
4986 tcg_gen_mov_i32(AREG(insn, 0), tmp);
4988 tcg_temp_free(tmp);
4991 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
4992 immediately before the next FP instruction is executed. */
4993 DISAS_INSN(fpu)
4995 uint16_t ext;
4996 int opmode;
4997 int opsize;
4998 TCGv_ptr cpu_src, cpu_dest;
5000 ext = read_im16(env, s);
5001 opmode = ext & 0x7f;
5002 switch ((ext >> 13) & 7) {
5003 case 0:
5004 break;
5005 case 1:
5006 goto undef;
5007 case 2:
5008 if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
5009 /* fmovecr */
5010 TCGv rom_offset = tcg_const_i32(opmode);
5011 cpu_dest = gen_fp_ptr(REG(ext, 7));
5012 gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
5013 tcg_temp_free_ptr(cpu_dest);
5014 tcg_temp_free(rom_offset);
5015 return;
5017 break;
5018 case 3: /* fmove out */
5019 cpu_src = gen_fp_ptr(REG(ext, 7));
5020 opsize = ext_opsize(ext, 10);
5021 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5022 EA_STORE, IS_USER(s)) == -1) {
5023 gen_addr_fault(s);
5025 gen_helper_ftst(cpu_env, cpu_src);
5026 tcg_temp_free_ptr(cpu_src);
5027 return;
5028 case 4: /* fmove to control register. */
5029 case 5: /* fmove from control register. */
5030 gen_op_fmove_fcr(env, s, insn, ext);
5031 return;
5032 case 6: /* fmovem */
5033 case 7:
5034 if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
5035 goto undef;
5037 gen_op_fmovem(env, s, insn, ext);
5038 return;
5040 if (ext & (1 << 14)) {
5041 /* Source effective address. */
5042 opsize = ext_opsize(ext, 10);
5043 cpu_src = gen_fp_result_ptr();
5044 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5045 EA_LOADS, IS_USER(s)) == -1) {
5046 gen_addr_fault(s);
5047 return;
5049 } else {
5050 /* Source register. */
5051 opsize = OS_EXTENDED;
5052 cpu_src = gen_fp_ptr(REG(ext, 10));
5054 cpu_dest = gen_fp_ptr(REG(ext, 7));
5055 switch (opmode) {
5056 case 0: /* fmove */
5057 gen_fp_move(cpu_dest, cpu_src);
5058 break;
5059 case 0x40: /* fsmove */
5060 gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
5061 break;
5062 case 0x44: /* fdmove */
5063 gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
5064 break;
5065 case 1: /* fint */
5066 gen_helper_firound(cpu_env, cpu_dest, cpu_src);
5067 break;
5068 case 2: /* fsinh */
5069 gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
5070 break;
5071 case 3: /* fintrz */
5072 gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
5073 break;
5074 case 4: /* fsqrt */
5075 gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
5076 break;
5077 case 0x41: /* fssqrt */
5078 gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
5079 break;
5080 case 0x45: /* fdsqrt */
5081 gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
5082 break;
5083 case 0x06: /* flognp1 */
5084 gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
5085 break;
5086 case 0x09: /* ftanh */
5087 gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5088 break;
5089 case 0x0a: /* fatan */
5090 gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5091 break;
5092 case 0x0c: /* fasin */
5093 gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5094 break;
5095 case 0x0d: /* fatanh */
5096 gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5097 break;
5098 case 0x0e: /* fsin */
5099 gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5100 break;
5101 case 0x0f: /* ftan */
5102 gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5103 break;
5104 case 0x10: /* fetox */
5105 gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5106 break;
5107 case 0x11: /* ftwotox */
5108 gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5109 break;
5110 case 0x12: /* ftentox */
5111 gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5112 break;
5113 case 0x14: /* flogn */
5114 gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5115 break;
5116 case 0x15: /* flog10 */
5117 gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5118 break;
5119 case 0x16: /* flog2 */
5120 gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5121 break;
5122 case 0x18: /* fabs */
5123 gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5124 break;
5125 case 0x58: /* fsabs */
5126 gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5127 break;
5128 case 0x5c: /* fdabs */
5129 gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5130 break;
5131 case 0x19: /* fcosh */
5132 gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5133 break;
5134 case 0x1a: /* fneg */
5135 gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5136 break;
5137 case 0x5a: /* fsneg */
5138 gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5139 break;
5140 case 0x5e: /* fdneg */
5141 gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5142 break;
5143 case 0x1c: /* facos */
5144 gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5145 break;
5146 case 0x1d: /* fcos */
5147 gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5148 break;
5149 case 0x1e: /* fgetexp */
5150 gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5151 break;
5152 case 0x1f: /* fgetman */
5153 gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5154 break;
5155 case 0x20: /* fdiv */
5156 gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5157 break;
5158 case 0x60: /* fsdiv */
5159 gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5160 break;
5161 case 0x64: /* fddiv */
5162 gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5163 break;
5164 case 0x21: /* fmod */
5165 gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5166 break;
5167 case 0x22: /* fadd */
5168 gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5169 break;
5170 case 0x62: /* fsadd */
5171 gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5172 break;
5173 case 0x66: /* fdadd */
5174 gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5175 break;
5176 case 0x23: /* fmul */
5177 gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5178 break;
5179 case 0x63: /* fsmul */
5180 gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5181 break;
5182 case 0x67: /* fdmul */
5183 gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5184 break;
5185 case 0x24: /* fsgldiv */
5186 gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5187 break;
5188 case 0x25: /* frem */
5189 gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5190 break;
5191 case 0x26: /* fscale */
5192 gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5193 break;
5194 case 0x27: /* fsglmul */
5195 gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5196 break;
5197 case 0x28: /* fsub */
5198 gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5199 break;
5200 case 0x68: /* fssub */
5201 gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5202 break;
5203 case 0x6c: /* fdsub */
5204 gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5205 break;
5206 case 0x30: case 0x31: case 0x32:
5207 case 0x33: case 0x34: case 0x35:
5208 case 0x36: case 0x37: {
5209 TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5210 gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5211 tcg_temp_free_ptr(cpu_dest2);
5213 break;
5214 case 0x38: /* fcmp */
5215 gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5216 return;
5217 case 0x3a: /* ftst */
5218 gen_helper_ftst(cpu_env, cpu_src);
5219 return;
5220 default:
5221 goto undef;
5223 tcg_temp_free_ptr(cpu_src);
5224 gen_helper_ftst(cpu_env, cpu_dest);
5225 tcg_temp_free_ptr(cpu_dest);
5226 return;
5227 undef:
5228 /* FIXME: Is this right for offset addressing modes? */
5229 s->pc -= 2;
5230 disas_undef_fpu(env, s, insn);
5233 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5235 TCGv fpsr;
5237 c->g1 = 1;
5238 c->v2 = tcg_const_i32(0);
5239 c->g2 = 0;
5240 /* TODO: Raise BSUN exception. */
5241 fpsr = tcg_temp_new();
5242 gen_load_fcr(s, fpsr, M68K_FPSR);
5243 switch (cond) {
5244 case 0: /* False */
5245 case 16: /* Signaling False */
5246 c->v1 = c->v2;
5247 c->tcond = TCG_COND_NEVER;
5248 break;
5249 case 1: /* EQual Z */
5250 case 17: /* Signaling EQual Z */
5251 c->v1 = tcg_temp_new();
5252 c->g1 = 0;
5253 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5254 c->tcond = TCG_COND_NE;
5255 break;
5256 case 2: /* Ordered Greater Than !(A || Z || N) */
5257 case 18: /* Greater Than !(A || Z || N) */
5258 c->v1 = tcg_temp_new();
5259 c->g1 = 0;
5260 tcg_gen_andi_i32(c->v1, fpsr,
5261 FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5262 c->tcond = TCG_COND_EQ;
5263 break;
5264 case 3: /* Ordered Greater than or Equal Z || !(A || N) */
5265 case 19: /* Greater than or Equal Z || !(A || N) */
5266 c->v1 = tcg_temp_new();
5267 c->g1 = 0;
5268 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5269 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5270 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5271 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5272 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5273 c->tcond = TCG_COND_NE;
5274 break;
5275 case 4: /* Ordered Less Than !(!N || A || Z); */
5276 case 20: /* Less Than !(!N || A || Z); */
5277 c->v1 = tcg_temp_new();
5278 c->g1 = 0;
5279 tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5280 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5281 c->tcond = TCG_COND_EQ;
5282 break;
5283 case 5: /* Ordered Less than or Equal Z || (N && !A) */
5284 case 21: /* Less than or Equal Z || (N && !A) */
5285 c->v1 = tcg_temp_new();
5286 c->g1 = 0;
5287 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5288 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5289 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5290 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5291 c->tcond = TCG_COND_NE;
5292 break;
5293 case 6: /* Ordered Greater or Less than !(A || Z) */
5294 case 22: /* Greater or Less than !(A || Z) */
5295 c->v1 = tcg_temp_new();
5296 c->g1 = 0;
5297 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5298 c->tcond = TCG_COND_EQ;
5299 break;
5300 case 7: /* Ordered !A */
5301 case 23: /* Greater, Less or Equal !A */
5302 c->v1 = tcg_temp_new();
5303 c->g1 = 0;
5304 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5305 c->tcond = TCG_COND_EQ;
5306 break;
5307 case 8: /* Unordered A */
5308 case 24: /* Not Greater, Less or Equal A */
5309 c->v1 = tcg_temp_new();
5310 c->g1 = 0;
5311 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5312 c->tcond = TCG_COND_NE;
5313 break;
5314 case 9: /* Unordered or Equal A || Z */
5315 case 25: /* Not Greater or Less then A || Z */
5316 c->v1 = tcg_temp_new();
5317 c->g1 = 0;
5318 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5319 c->tcond = TCG_COND_NE;
5320 break;
5321 case 10: /* Unordered or Greater Than A || !(N || Z)) */
5322 case 26: /* Not Less or Equal A || !(N || Z)) */
5323 c->v1 = tcg_temp_new();
5324 c->g1 = 0;
5325 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5326 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5327 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5328 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5329 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5330 c->tcond = TCG_COND_NE;
5331 break;
5332 case 11: /* Unordered or Greater or Equal A || Z || !N */
5333 case 27: /* Not Less Than A || Z || !N */
5334 c->v1 = tcg_temp_new();
5335 c->g1 = 0;
5336 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5337 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5338 c->tcond = TCG_COND_NE;
5339 break;
5340 case 12: /* Unordered or Less Than A || (N && !Z) */
5341 case 28: /* Not Greater than or Equal A || (N && !Z) */
5342 c->v1 = tcg_temp_new();
5343 c->g1 = 0;
5344 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5345 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5346 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5347 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5348 c->tcond = TCG_COND_NE;
5349 break;
5350 case 13: /* Unordered or Less or Equal A || Z || N */
5351 case 29: /* Not Greater Than A || Z || N */
5352 c->v1 = tcg_temp_new();
5353 c->g1 = 0;
5354 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5355 c->tcond = TCG_COND_NE;
5356 break;
5357 case 14: /* Not Equal !Z */
5358 case 30: /* Signaling Not Equal !Z */
5359 c->v1 = tcg_temp_new();
5360 c->g1 = 0;
5361 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5362 c->tcond = TCG_COND_EQ;
5363 break;
5364 case 15: /* True */
5365 case 31: /* Signaling True */
5366 c->v1 = c->v2;
5367 c->tcond = TCG_COND_ALWAYS;
5368 break;
5370 tcg_temp_free(fpsr);
5373 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5375 DisasCompare c;
5377 gen_fcc_cond(&c, s, cond);
5378 update_cc_op(s);
5379 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5380 free_cond(&c);
5383 DISAS_INSN(fbcc)
5385 uint32_t offset;
5386 uint32_t base;
5387 TCGLabel *l1;
5389 base = s->pc;
5390 offset = (int16_t)read_im16(env, s);
5391 if (insn & (1 << 6)) {
5392 offset = (offset << 16) | read_im16(env, s);
5395 l1 = gen_new_label();
5396 update_cc_op(s);
5397 gen_fjmpcc(s, insn & 0x3f, l1);
5398 gen_jmp_tb(s, 0, s->pc);
5399 gen_set_label(l1);
5400 gen_jmp_tb(s, 1, base + offset);
5403 DISAS_INSN(fscc)
5405 DisasCompare c;
5406 int cond;
5407 TCGv tmp;
5408 uint16_t ext;
5410 ext = read_im16(env, s);
5411 cond = ext & 0x3f;
5412 gen_fcc_cond(&c, s, cond);
5414 tmp = tcg_temp_new();
5415 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5416 free_cond(&c);
5418 tcg_gen_neg_i32(tmp, tmp);
5419 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5420 tcg_temp_free(tmp);
5423 #if defined(CONFIG_SOFTMMU)
5424 DISAS_INSN(frestore)
5426 TCGv addr;
5428 if (IS_USER(s)) {
5429 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5430 return;
5432 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5433 SRC_EA(env, addr, OS_LONG, 0, NULL);
5434 /* FIXME: check the state frame */
5435 } else {
5436 disas_undef(env, s, insn);
5440 DISAS_INSN(fsave)
5442 if (IS_USER(s)) {
5443 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5444 return;
5447 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5448 /* always write IDLE */
5449 TCGv idle = tcg_const_i32(0x41000000);
5450 DEST_EA(env, insn, OS_LONG, idle, NULL);
5451 tcg_temp_free(idle);
5452 } else {
5453 disas_undef(env, s, insn);
5456 #endif
5458 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5460 TCGv tmp = tcg_temp_new();
5461 if (s->env->macsr & MACSR_FI) {
5462 if (upper)
5463 tcg_gen_andi_i32(tmp, val, 0xffff0000);
5464 else
5465 tcg_gen_shli_i32(tmp, val, 16);
5466 } else if (s->env->macsr & MACSR_SU) {
5467 if (upper)
5468 tcg_gen_sari_i32(tmp, val, 16);
5469 else
5470 tcg_gen_ext16s_i32(tmp, val);
5471 } else {
5472 if (upper)
5473 tcg_gen_shri_i32(tmp, val, 16);
5474 else
5475 tcg_gen_ext16u_i32(tmp, val);
5477 return tmp;
5480 static void gen_mac_clear_flags(void)
5482 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5483 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5486 DISAS_INSN(mac)
5488 TCGv rx;
5489 TCGv ry;
5490 uint16_t ext;
5491 int acc;
5492 TCGv tmp;
5493 TCGv addr;
5494 TCGv loadval;
5495 int dual;
5496 TCGv saved_flags;
5498 if (!s->done_mac) {
5499 s->mactmp = tcg_temp_new_i64();
5500 s->done_mac = 1;
5503 ext = read_im16(env, s);
5505 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5506 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5507 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5508 disas_undef(env, s, insn);
5509 return;
5511 if (insn & 0x30) {
5512 /* MAC with load. */
5513 tmp = gen_lea(env, s, insn, OS_LONG);
5514 addr = tcg_temp_new();
5515 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5516 /* Load the value now to ensure correct exception behavior.
5517 Perform writeback after reading the MAC inputs. */
5518 loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5520 acc ^= 1;
5521 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5522 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5523 } else {
5524 loadval = addr = NULL_QREG;
5525 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5526 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5529 gen_mac_clear_flags();
5530 #if 0
5531 l1 = -1;
5532 /* Disabled because conditional branches clobber temporary vars. */
5533 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5534 /* Skip the multiply if we know we will ignore it. */
5535 l1 = gen_new_label();
5536 tmp = tcg_temp_new();
5537 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5538 gen_op_jmp_nz32(tmp, l1);
5540 #endif
5542 if ((ext & 0x0800) == 0) {
5543 /* Word. */
5544 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5545 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5547 if (s->env->macsr & MACSR_FI) {
5548 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5549 } else {
5550 if (s->env->macsr & MACSR_SU)
5551 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5552 else
5553 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5554 switch ((ext >> 9) & 3) {
5555 case 1:
5556 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5557 break;
5558 case 3:
5559 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5560 break;
5564 if (dual) {
5565 /* Save the overflow flag from the multiply. */
5566 saved_flags = tcg_temp_new();
5567 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5568 } else {
5569 saved_flags = NULL_QREG;
5572 #if 0
5573 /* Disabled because conditional branches clobber temporary vars. */
5574 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5575 /* Skip the accumulate if the value is already saturated. */
5576 l1 = gen_new_label();
5577 tmp = tcg_temp_new();
5578 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5579 gen_op_jmp_nz32(tmp, l1);
5581 #endif
5583 if (insn & 0x100)
5584 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5585 else
5586 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5588 if (s->env->macsr & MACSR_FI)
5589 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5590 else if (s->env->macsr & MACSR_SU)
5591 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5592 else
5593 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5595 #if 0
5596 /* Disabled because conditional branches clobber temporary vars. */
5597 if (l1 != -1)
5598 gen_set_label(l1);
5599 #endif
5601 if (dual) {
5602 /* Dual accumulate variant. */
5603 acc = (ext >> 2) & 3;
5604 /* Restore the overflow flag from the multiplier. */
5605 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5606 #if 0
5607 /* Disabled because conditional branches clobber temporary vars. */
5608 if ((s->env->macsr & MACSR_OMC) != 0) {
5609 /* Skip the accumulate if the value is already saturated. */
5610 l1 = gen_new_label();
5611 tmp = tcg_temp_new();
5612 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5613 gen_op_jmp_nz32(tmp, l1);
5615 #endif
5616 if (ext & 2)
5617 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5618 else
5619 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5620 if (s->env->macsr & MACSR_FI)
5621 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5622 else if (s->env->macsr & MACSR_SU)
5623 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5624 else
5625 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5626 #if 0
5627 /* Disabled because conditional branches clobber temporary vars. */
5628 if (l1 != -1)
5629 gen_set_label(l1);
5630 #endif
5632 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5634 if (insn & 0x30) {
5635 TCGv rw;
5636 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5637 tcg_gen_mov_i32(rw, loadval);
5638 /* FIXME: Should address writeback happen with the masked or
5639 unmasked value? */
5640 switch ((insn >> 3) & 7) {
5641 case 3: /* Post-increment. */
5642 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5643 break;
5644 case 4: /* Pre-decrement. */
5645 tcg_gen_mov_i32(AREG(insn, 0), addr);
5647 tcg_temp_free(loadval);
5651 DISAS_INSN(from_mac)
5653 TCGv rx;
5654 TCGv_i64 acc;
5655 int accnum;
5657 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5658 accnum = (insn >> 9) & 3;
5659 acc = MACREG(accnum);
5660 if (s->env->macsr & MACSR_FI) {
5661 gen_helper_get_macf(rx, cpu_env, acc);
5662 } else if ((s->env->macsr & MACSR_OMC) == 0) {
5663 tcg_gen_extrl_i64_i32(rx, acc);
5664 } else if (s->env->macsr & MACSR_SU) {
5665 gen_helper_get_macs(rx, acc);
5666 } else {
5667 gen_helper_get_macu(rx, acc);
5669 if (insn & 0x40) {
5670 tcg_gen_movi_i64(acc, 0);
5671 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5675 DISAS_INSN(move_mac)
5677 /* FIXME: This can be done without a helper. */
5678 int src;
5679 TCGv dest;
5680 src = insn & 3;
5681 dest = tcg_const_i32((insn >> 9) & 3);
5682 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5683 gen_mac_clear_flags();
5684 gen_helper_mac_set_flags(cpu_env, dest);
5687 DISAS_INSN(from_macsr)
5689 TCGv reg;
5691 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5692 tcg_gen_mov_i32(reg, QREG_MACSR);
5695 DISAS_INSN(from_mask)
5697 TCGv reg;
5698 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5699 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5702 DISAS_INSN(from_mext)
5704 TCGv reg;
5705 TCGv acc;
5706 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5707 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5708 if (s->env->macsr & MACSR_FI)
5709 gen_helper_get_mac_extf(reg, cpu_env, acc);
5710 else
5711 gen_helper_get_mac_exti(reg, cpu_env, acc);
5714 DISAS_INSN(macsr_to_ccr)
5716 TCGv tmp = tcg_temp_new();
5717 tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5718 gen_helper_set_sr(cpu_env, tmp);
5719 tcg_temp_free(tmp);
5720 set_cc_op(s, CC_OP_FLAGS);
5723 DISAS_INSN(to_mac)
5725 TCGv_i64 acc;
5726 TCGv val;
5727 int accnum;
5728 accnum = (insn >> 9) & 3;
5729 acc = MACREG(accnum);
5730 SRC_EA(env, val, OS_LONG, 0, NULL);
5731 if (s->env->macsr & MACSR_FI) {
5732 tcg_gen_ext_i32_i64(acc, val);
5733 tcg_gen_shli_i64(acc, acc, 8);
5734 } else if (s->env->macsr & MACSR_SU) {
5735 tcg_gen_ext_i32_i64(acc, val);
5736 } else {
5737 tcg_gen_extu_i32_i64(acc, val);
5739 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5740 gen_mac_clear_flags();
5741 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5744 DISAS_INSN(to_macsr)
5746 TCGv val;
5747 SRC_EA(env, val, OS_LONG, 0, NULL);
5748 gen_helper_set_macsr(cpu_env, val);
5749 gen_exit_tb(s);
5752 DISAS_INSN(to_mask)
5754 TCGv val;
5755 SRC_EA(env, val, OS_LONG, 0, NULL);
5756 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5759 DISAS_INSN(to_mext)
5761 TCGv val;
5762 TCGv acc;
5763 SRC_EA(env, val, OS_LONG, 0, NULL);
5764 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5765 if (s->env->macsr & MACSR_FI)
5766 gen_helper_set_mac_extf(cpu_env, val, acc);
5767 else if (s->env->macsr & MACSR_SU)
5768 gen_helper_set_mac_exts(cpu_env, val, acc);
5769 else
5770 gen_helper_set_mac_extu(cpu_env, val, acc);
5773 static disas_proc opcode_table[65536];
5775 static void
5776 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5778 int i;
5779 int from;
5780 int to;
5782 /* Sanity check. All set bits must be included in the mask. */
5783 if (opcode & ~mask) {
5784 fprintf(stderr,
5785 "qemu internal error: bogus opcode definition %04x/%04x\n",
5786 opcode, mask);
5787 abort();
5789 /* This could probably be cleverer. For now just optimize the case where
5790 the top bits are known. */
5791 /* Find the first zero bit in the mask. */
5792 i = 0x8000;
5793 while ((i & mask) != 0)
5794 i >>= 1;
5795 /* Iterate over all combinations of this and lower bits. */
5796 if (i == 0)
5797 i = 1;
5798 else
5799 i <<= 1;
5800 from = opcode & ~(i - 1);
5801 to = from + i;
5802 for (i = from; i < to; i++) {
5803 if ((i & mask) == opcode)
5804 opcode_table[i] = proc;
5808 /* Register m68k opcode handlers. Order is important.
5809 Later insn override earlier ones. */
5810 void register_m68k_insns (CPUM68KState *env)
5812 /* Build the opcode table only once to avoid
5813 multithreading issues. */
5814 if (opcode_table[0] != NULL) {
5815 return;
5818 /* use BASE() for instruction available
5819 * for CF_ISA_A and M68000.
5821 #define BASE(name, opcode, mask) \
5822 register_opcode(disas_##name, 0x##opcode, 0x##mask)
5823 #define INSN(name, opcode, mask, feature) do { \
5824 if (m68k_feature(env, M68K_FEATURE_##feature)) \
5825 BASE(name, opcode, mask); \
5826 } while(0)
5827 BASE(undef, 0000, 0000);
5828 INSN(arith_im, 0080, fff8, CF_ISA_A);
5829 INSN(arith_im, 0000, ff00, M68000);
5830 INSN(chk2, 00c0, f9c0, CHK2);
5831 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
5832 BASE(bitop_reg, 0100, f1c0);
5833 BASE(bitop_reg, 0140, f1c0);
5834 BASE(bitop_reg, 0180, f1c0);
5835 BASE(bitop_reg, 01c0, f1c0);
5836 INSN(movep, 0108, f138, MOVEP);
5837 INSN(arith_im, 0280, fff8, CF_ISA_A);
5838 INSN(arith_im, 0200, ff00, M68000);
5839 INSN(undef, 02c0, ffc0, M68000);
5840 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
5841 INSN(arith_im, 0480, fff8, CF_ISA_A);
5842 INSN(arith_im, 0400, ff00, M68000);
5843 INSN(undef, 04c0, ffc0, M68000);
5844 INSN(arith_im, 0600, ff00, M68000);
5845 INSN(undef, 06c0, ffc0, M68000);
5846 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
5847 INSN(arith_im, 0680, fff8, CF_ISA_A);
5848 INSN(arith_im, 0c00, ff38, CF_ISA_A);
5849 INSN(arith_im, 0c00, ff00, M68000);
5850 BASE(bitop_im, 0800, ffc0);
5851 BASE(bitop_im, 0840, ffc0);
5852 BASE(bitop_im, 0880, ffc0);
5853 BASE(bitop_im, 08c0, ffc0);
5854 INSN(arith_im, 0a80, fff8, CF_ISA_A);
5855 INSN(arith_im, 0a00, ff00, M68000);
5856 #if defined(CONFIG_SOFTMMU)
5857 INSN(moves, 0e00, ff00, M68000);
5858 #endif
5859 INSN(cas, 0ac0, ffc0, CAS);
5860 INSN(cas, 0cc0, ffc0, CAS);
5861 INSN(cas, 0ec0, ffc0, CAS);
5862 INSN(cas2w, 0cfc, ffff, CAS);
5863 INSN(cas2l, 0efc, ffff, CAS);
5864 BASE(move, 1000, f000);
5865 BASE(move, 2000, f000);
5866 BASE(move, 3000, f000);
5867 INSN(chk, 4000, f040, M68000);
5868 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
5869 INSN(negx, 4080, fff8, CF_ISA_A);
5870 INSN(negx, 4000, ff00, M68000);
5871 INSN(undef, 40c0, ffc0, M68000);
5872 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5873 INSN(move_from_sr, 40c0, ffc0, M68000);
5874 BASE(lea, 41c0, f1c0);
5875 BASE(clr, 4200, ff00);
5876 BASE(undef, 42c0, ffc0);
5877 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5878 INSN(move_from_ccr, 42c0, ffc0, M68000);
5879 INSN(neg, 4480, fff8, CF_ISA_A);
5880 INSN(neg, 4400, ff00, M68000);
5881 INSN(undef, 44c0, ffc0, M68000);
5882 BASE(move_to_ccr, 44c0, ffc0);
5883 INSN(not, 4680, fff8, CF_ISA_A);
5884 INSN(not, 4600, ff00, M68000);
5885 #if defined(CONFIG_SOFTMMU)
5886 BASE(move_to_sr, 46c0, ffc0);
5887 #endif
5888 INSN(nbcd, 4800, ffc0, M68000);
5889 INSN(linkl, 4808, fff8, M68000);
5890 BASE(pea, 4840, ffc0);
5891 BASE(swap, 4840, fff8);
5892 INSN(bkpt, 4848, fff8, BKPT);
5893 INSN(movem, 48d0, fbf8, CF_ISA_A);
5894 INSN(movem, 48e8, fbf8, CF_ISA_A);
5895 INSN(movem, 4880, fb80, M68000);
5896 BASE(ext, 4880, fff8);
5897 BASE(ext, 48c0, fff8);
5898 BASE(ext, 49c0, fff8);
5899 BASE(tst, 4a00, ff00);
5900 INSN(tas, 4ac0, ffc0, CF_ISA_B);
5901 INSN(tas, 4ac0, ffc0, M68000);
5902 #if defined(CONFIG_SOFTMMU)
5903 INSN(halt, 4ac8, ffff, CF_ISA_A);
5904 #endif
5905 INSN(pulse, 4acc, ffff, CF_ISA_A);
5906 BASE(illegal, 4afc, ffff);
5907 INSN(mull, 4c00, ffc0, CF_ISA_A);
5908 INSN(mull, 4c00, ffc0, LONG_MULDIV);
5909 INSN(divl, 4c40, ffc0, CF_ISA_A);
5910 INSN(divl, 4c40, ffc0, LONG_MULDIV);
5911 INSN(sats, 4c80, fff8, CF_ISA_B);
5912 BASE(trap, 4e40, fff0);
5913 BASE(link, 4e50, fff8);
5914 BASE(unlk, 4e58, fff8);
5915 #if defined(CONFIG_SOFTMMU)
5916 INSN(move_to_usp, 4e60, fff8, USP);
5917 INSN(move_from_usp, 4e68, fff8, USP);
5918 INSN(reset, 4e70, ffff, M68000);
5919 BASE(stop, 4e72, ffff);
5920 BASE(rte, 4e73, ffff);
5921 INSN(cf_movec, 4e7b, ffff, CF_ISA_A);
5922 INSN(m68k_movec, 4e7a, fffe, M68000);
5923 #endif
5924 BASE(nop, 4e71, ffff);
5925 INSN(rtd, 4e74, ffff, RTD);
5926 BASE(rts, 4e75, ffff);
5927 BASE(jump, 4e80, ffc0);
5928 BASE(jump, 4ec0, ffc0);
5929 INSN(addsubq, 5000, f080, M68000);
5930 BASE(addsubq, 5080, f0c0);
5931 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
5932 INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */
5933 INSN(dbcc, 50c8, f0f8, M68000);
5934 INSN(tpf, 51f8, fff8, CF_ISA_A);
5936 /* Branch instructions. */
5937 BASE(branch, 6000, f000);
5938 /* Disable long branch instructions, then add back the ones we want. */
5939 BASE(undef, 60ff, f0ff); /* All long branches. */
5940 INSN(branch, 60ff, f0ff, CF_ISA_B);
5941 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
5942 INSN(branch, 60ff, ffff, BRAL);
5943 INSN(branch, 60ff, f0ff, BCCL);
5945 BASE(moveq, 7000, f100);
5946 INSN(mvzs, 7100, f100, CF_ISA_B);
5947 BASE(or, 8000, f000);
5948 BASE(divw, 80c0, f0c0);
5949 INSN(sbcd_reg, 8100, f1f8, M68000);
5950 INSN(sbcd_mem, 8108, f1f8, M68000);
5951 BASE(addsub, 9000, f000);
5952 INSN(undef, 90c0, f0c0, CF_ISA_A);
5953 INSN(subx_reg, 9180, f1f8, CF_ISA_A);
5954 INSN(subx_reg, 9100, f138, M68000);
5955 INSN(subx_mem, 9108, f138, M68000);
5956 INSN(suba, 91c0, f1c0, CF_ISA_A);
5957 INSN(suba, 90c0, f0c0, M68000);
5959 BASE(undef_mac, a000, f000);
5960 INSN(mac, a000, f100, CF_EMAC);
5961 INSN(from_mac, a180, f9b0, CF_EMAC);
5962 INSN(move_mac, a110, f9fc, CF_EMAC);
5963 INSN(from_macsr,a980, f9f0, CF_EMAC);
5964 INSN(from_mask, ad80, fff0, CF_EMAC);
5965 INSN(from_mext, ab80, fbf0, CF_EMAC);
5966 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5967 INSN(to_mac, a100, f9c0, CF_EMAC);
5968 INSN(to_macsr, a900, ffc0, CF_EMAC);
5969 INSN(to_mext, ab00, fbc0, CF_EMAC);
5970 INSN(to_mask, ad00, ffc0, CF_EMAC);
5972 INSN(mov3q, a140, f1c0, CF_ISA_B);
5973 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
5974 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
5975 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5976 INSN(cmp, b080, f1c0, CF_ISA_A);
5977 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
5978 INSN(cmp, b000, f100, M68000);
5979 INSN(eor, b100, f100, M68000);
5980 INSN(cmpm, b108, f138, M68000);
5981 INSN(cmpa, b0c0, f0c0, M68000);
5982 INSN(eor, b180, f1c0, CF_ISA_A);
5983 BASE(and, c000, f000);
5984 INSN(exg_dd, c140, f1f8, M68000);
5985 INSN(exg_aa, c148, f1f8, M68000);
5986 INSN(exg_da, c188, f1f8, M68000);
5987 BASE(mulw, c0c0, f0c0);
5988 INSN(abcd_reg, c100, f1f8, M68000);
5989 INSN(abcd_mem, c108, f1f8, M68000);
5990 BASE(addsub, d000, f000);
5991 INSN(undef, d0c0, f0c0, CF_ISA_A);
5992 INSN(addx_reg, d180, f1f8, CF_ISA_A);
5993 INSN(addx_reg, d100, f138, M68000);
5994 INSN(addx_mem, d108, f138, M68000);
5995 INSN(adda, d1c0, f1c0, CF_ISA_A);
5996 INSN(adda, d0c0, f0c0, M68000);
5997 INSN(shift_im, e080, f0f0, CF_ISA_A);
5998 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5999 INSN(shift8_im, e000, f0f0, M68000);
6000 INSN(shift16_im, e040, f0f0, M68000);
6001 INSN(shift_im, e080, f0f0, M68000);
6002 INSN(shift8_reg, e020, f0f0, M68000);
6003 INSN(shift16_reg, e060, f0f0, M68000);
6004 INSN(shift_reg, e0a0, f0f0, M68000);
6005 INSN(shift_mem, e0c0, fcc0, M68000);
6006 INSN(rotate_im, e090, f0f0, M68000);
6007 INSN(rotate8_im, e010, f0f0, M68000);
6008 INSN(rotate16_im, e050, f0f0, M68000);
6009 INSN(rotate_reg, e0b0, f0f0, M68000);
6010 INSN(rotate8_reg, e030, f0f0, M68000);
6011 INSN(rotate16_reg, e070, f0f0, M68000);
6012 INSN(rotate_mem, e4c0, fcc0, M68000);
6013 INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
6014 INSN(bfext_reg, e9c0, fdf8, BITFIELD);
6015 INSN(bfins_mem, efc0, ffc0, BITFIELD);
6016 INSN(bfins_reg, efc0, fff8, BITFIELD);
6017 INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
6018 INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
6019 INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
6020 INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
6021 INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
6022 INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
6023 INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
6024 INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
6025 INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
6026 INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
6027 BASE(undef_fpu, f000, f000);
6028 INSN(fpu, f200, ffc0, CF_FPU);
6029 INSN(fbcc, f280, ffc0, CF_FPU);
6030 INSN(fpu, f200, ffc0, FPU);
6031 INSN(fscc, f240, ffc0, FPU);
6032 INSN(fbcc, f280, ff80, FPU);
6033 #if defined(CONFIG_SOFTMMU)
6034 INSN(frestore, f340, ffc0, CF_FPU);
6035 INSN(fsave, f300, ffc0, CF_FPU);
6036 INSN(frestore, f340, ffc0, FPU);
6037 INSN(fsave, f300, ffc0, FPU);
6038 INSN(intouch, f340, ffc0, CF_ISA_A);
6039 INSN(cpushl, f428, ff38, CF_ISA_A);
6040 INSN(cpush, f420, ff20, M68040);
6041 INSN(cinv, f400, ff20, M68040);
6042 INSN(pflush, f500, ffe0, M68040);
6043 INSN(ptest, f548, ffd8, M68040);
6044 INSN(wddata, fb00, ff00, CF_ISA_A);
6045 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
6046 #endif
6047 INSN(move16_mem, f600, ffe0, M68040);
6048 INSN(move16_reg, f620, fff8, M68040);
6049 #undef INSN
6052 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6054 DisasContext *dc = container_of(dcbase, DisasContext, base);
6055 CPUM68KState *env = cpu->env_ptr;
6057 dc->env = env;
6058 dc->pc = dc->base.pc_first;
6059 dc->cc_op = CC_OP_DYNAMIC;
6060 dc->cc_op_synced = 1;
6061 dc->done_mac = 0;
6062 dc->writeback_mask = 0;
6063 init_release_array(dc);
6066 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6070 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6072 DisasContext *dc = container_of(dcbase, DisasContext, base);
6073 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6076 static bool m68k_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
6077 const CPUBreakpoint *bp)
6079 DisasContext *dc = container_of(dcbase, DisasContext, base);
6081 gen_exception(dc, dc->base.pc_next, EXCP_DEBUG);
6082 /* The address covered by the breakpoint must be included in
6083 [tb->pc, tb->pc + tb->size) in order to for it to be
6084 properly cleared -- thus we increment the PC here so that
6085 the logic setting tb->size below does the right thing. */
6086 dc->base.pc_next += 2;
6088 return true;
6091 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6093 DisasContext *dc = container_of(dcbase, DisasContext, base);
6094 CPUM68KState *env = cpu->env_ptr;
6095 uint16_t insn = read_im16(env, dc);
6097 opcode_table[insn](env, dc, insn);
6098 do_writebacks(dc);
6099 do_release(dc);
6101 dc->base.pc_next = dc->pc;
6103 if (dc->base.is_jmp == DISAS_NEXT) {
6104 /* Stop translation when the next insn might touch a new page.
6105 * This ensures that prefetch aborts at the right place.
6107 * We cannot determine the size of the next insn without
6108 * completely decoding it. However, the maximum insn size
6109 * is 32 bytes, so end if we do not have that much remaining.
6110 * This may produce several small TBs at the end of each page,
6111 * but they will all be linked with goto_tb.
6113 * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6114 * smaller than MC68020's.
6116 target_ulong start_page_offset
6117 = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6119 if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6120 dc->base.is_jmp = DISAS_TOO_MANY;
6125 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6127 DisasContext *dc = container_of(dcbase, DisasContext, base);
6129 if (dc->base.is_jmp == DISAS_NORETURN) {
6130 return;
6132 if (dc->base.singlestep_enabled) {
6133 gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
6134 return;
6137 switch (dc->base.is_jmp) {
6138 case DISAS_TOO_MANY:
6139 update_cc_op(dc);
6140 gen_jmp_tb(dc, 0, dc->pc);
6141 break;
6142 case DISAS_JUMP:
6143 /* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */
6144 tcg_gen_lookup_and_goto_ptr();
6145 break;
6146 case DISAS_EXIT:
6147 /* We updated CC_OP and PC in gen_exit_tb, but also modified
6148 other state that may require returning to the main loop. */
6149 tcg_gen_exit_tb(NULL, 0);
6150 break;
6151 default:
6152 g_assert_not_reached();
6156 static void m68k_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
6158 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
6159 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
6162 static const TranslatorOps m68k_tr_ops = {
6163 .init_disas_context = m68k_tr_init_disas_context,
6164 .tb_start = m68k_tr_tb_start,
6165 .insn_start = m68k_tr_insn_start,
6166 .breakpoint_check = m68k_tr_breakpoint_check,
6167 .translate_insn = m68k_tr_translate_insn,
6168 .tb_stop = m68k_tr_tb_stop,
6169 .disas_log = m68k_tr_disas_log,
6172 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
6174 DisasContext dc;
6175 translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
6178 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6180 floatx80 a = { .high = high, .low = low };
6181 union {
6182 float64 f64;
6183 double d;
6184 } u;
6186 u.f64 = floatx80_to_float64(a, &env->fp_status);
6187 return u.d;
6190 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6192 M68kCPU *cpu = M68K_CPU(cs);
6193 CPUM68KState *env = &cpu->env;
6194 int i;
6195 uint16_t sr;
6196 for (i = 0; i < 8; i++) {
6197 qemu_fprintf(f, "D%d = %08x A%d = %08x "
6198 "F%d = %04x %016"PRIx64" (%12g)\n",
6199 i, env->dregs[i], i, env->aregs[i],
6200 i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6201 floatx80_to_double(env, env->fregs[i].l.upper,
6202 env->fregs[i].l.lower));
6204 qemu_fprintf(f, "PC = %08x ", env->pc);
6205 sr = env->sr | cpu_m68k_get_ccr(env);
6206 qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6207 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6208 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6209 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6210 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6211 (sr & CCF_C) ? 'C' : '-');
6212 qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6213 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6214 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6215 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6216 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6217 qemu_fprintf(f, "\n "
6218 "FPCR = %04x ", env->fpcr);
6219 switch (env->fpcr & FPCR_PREC_MASK) {
6220 case FPCR_PREC_X:
6221 qemu_fprintf(f, "X ");
6222 break;
6223 case FPCR_PREC_S:
6224 qemu_fprintf(f, "S ");
6225 break;
6226 case FPCR_PREC_D:
6227 qemu_fprintf(f, "D ");
6228 break;
6230 switch (env->fpcr & FPCR_RND_MASK) {
6231 case FPCR_RND_N:
6232 qemu_fprintf(f, "RN ");
6233 break;
6234 case FPCR_RND_Z:
6235 qemu_fprintf(f, "RZ ");
6236 break;
6237 case FPCR_RND_M:
6238 qemu_fprintf(f, "RM ");
6239 break;
6240 case FPCR_RND_P:
6241 qemu_fprintf(f, "RP ");
6242 break;
6244 qemu_fprintf(f, "\n");
6245 #ifdef CONFIG_SOFTMMU
6246 qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6247 env->current_sp == M68K_SSP ? "->" : " ", env->sp[M68K_SSP],
6248 env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP],
6249 env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]);
6250 qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6251 qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6252 qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6253 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6254 qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6255 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6256 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6257 qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6258 env->mmu.mmusr, env->mmu.ar);
6259 #endif
6262 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
6263 target_ulong *data)
6265 int cc_op = data[1];
6266 env->pc = data[0];
6267 if (cc_op != CC_OP_DYNAMIC) {
6268 env->cc_op = cc_op;