target-m68k: and can manage word and byte operands
[qemu/kevin.git] / target-m68k / translate.c
blob3f7db833f84dd3b4a07bee60c966784983a5fad1
1 /*
2 * m68k translation
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
36 //#define DEBUG_DISPATCH 1
38 /* Fake floating point. */
39 #define tcg_gen_mov_f64 tcg_gen_mov_i64
40 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
41 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
43 #define DEFO32(name, offset) static TCGv QREG_##name;
44 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
45 #define DEFF64(name, offset) static TCGv_i64 QREG_##name;
46 #include "qregs.def"
47 #undef DEFO32
48 #undef DEFO64
49 #undef DEFF64
51 static TCGv_i32 cpu_halted;
52 static TCGv_i32 cpu_exception_index;
54 static TCGv_env cpu_env;
56 static char cpu_reg_names[3*8*3 + 5*4];
57 static TCGv cpu_dregs[8];
58 static TCGv cpu_aregs[8];
59 static TCGv_i64 cpu_fregs[8];
60 static TCGv_i64 cpu_macc[4];
62 #define REG(insn, pos) (((insn) >> (pos)) & 7)
63 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
64 #define AREG(insn, pos) cpu_aregs[REG(insn, pos)]
65 #define FREG(insn, pos) cpu_fregs[REG(insn, pos)]
66 #define MACREG(acc) cpu_macc[acc]
67 #define QREG_SP cpu_aregs[7]
69 static TCGv NULL_QREG;
70 #define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
71 /* Used to distinguish stores from bad addressing modes. */
72 static TCGv store_dummy;
74 #include "exec/gen-icount.h"
76 void m68k_tcg_init(void)
78 char *p;
79 int i;
81 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
82 tcg_ctx.tcg_env = cpu_env;
84 #define DEFO32(name, offset) \
85 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
86 offsetof(CPUM68KState, offset), #name);
87 #define DEFO64(name, offset) \
88 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
89 offsetof(CPUM68KState, offset), #name);
90 #define DEFF64(name, offset) DEFO64(name, offset)
91 #include "qregs.def"
92 #undef DEFO32
93 #undef DEFO64
94 #undef DEFF64
96 cpu_halted = tcg_global_mem_new_i32(cpu_env,
97 -offsetof(M68kCPU, env) +
98 offsetof(CPUState, halted), "HALTED");
99 cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
100 -offsetof(M68kCPU, env) +
101 offsetof(CPUState, exception_index),
102 "EXCEPTION");
104 p = cpu_reg_names;
105 for (i = 0; i < 8; i++) {
106 sprintf(p, "D%d", i);
107 cpu_dregs[i] = tcg_global_mem_new(cpu_env,
108 offsetof(CPUM68KState, dregs[i]), p);
109 p += 3;
110 sprintf(p, "A%d", i);
111 cpu_aregs[i] = tcg_global_mem_new(cpu_env,
112 offsetof(CPUM68KState, aregs[i]), p);
113 p += 3;
114 sprintf(p, "F%d", i);
115 cpu_fregs[i] = tcg_global_mem_new_i64(cpu_env,
116 offsetof(CPUM68KState, fregs[i]), p);
117 p += 3;
119 for (i = 0; i < 4; i++) {
120 sprintf(p, "ACC%d", i);
121 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
122 offsetof(CPUM68KState, macc[i]), p);
123 p += 5;
126 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
127 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
130 /* internal defines */
131 typedef struct DisasContext {
132 CPUM68KState *env;
133 target_ulong insn_pc; /* Start of the current instruction. */
134 target_ulong pc;
135 int is_jmp;
136 CCOp cc_op; /* Current CC operation */
137 int cc_op_synced;
138 int user;
139 uint32_t fpcr;
140 struct TranslationBlock *tb;
141 int singlestep_enabled;
142 TCGv_i64 mactmp;
143 int done_mac;
144 } DisasContext;
146 #define DISAS_JUMP_NEXT 4
148 #if defined(CONFIG_USER_ONLY)
149 #define IS_USER(s) 1
150 #else
151 #define IS_USER(s) s->user
152 #endif
154 /* XXX: move that elsewhere */
155 /* ??? Fix exceptions. */
156 static void *gen_throws_exception;
157 #define gen_last_qop NULL
159 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
161 #ifdef DEBUG_DISPATCH
162 #define DISAS_INSN(name) \
163 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
164 uint16_t insn); \
165 static void disas_##name(CPUM68KState *env, DisasContext *s, \
166 uint16_t insn) \
168 qemu_log("Dispatch " #name "\n"); \
169 real_disas_##name(env, s, insn); \
171 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
172 uint16_t insn)
173 #else
174 #define DISAS_INSN(name) \
175 static void disas_##name(CPUM68KState *env, DisasContext *s, \
176 uint16_t insn)
177 #endif
179 static const uint8_t cc_op_live[CC_OP_NB] = {
180 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
181 [CC_OP_ADD] = CCF_X | CCF_N | CCF_V,
182 [CC_OP_SUB] = CCF_X | CCF_N | CCF_V,
183 [CC_OP_CMP] = CCF_X | CCF_N | CCF_V,
184 [CC_OP_LOGIC] = CCF_X | CCF_N
187 static void set_cc_op(DisasContext *s, CCOp op)
189 CCOp old_op = s->cc_op;
190 int dead;
192 if (old_op == op) {
193 return;
195 s->cc_op = op;
196 s->cc_op_synced = 0;
198 /* Discard CC computation that will no longer be used.
199 Note that X and N are never dead. */
200 dead = cc_op_live[old_op] & ~cc_op_live[op];
201 if (dead & CCF_C) {
202 tcg_gen_discard_i32(QREG_CC_C);
204 if (dead & CCF_Z) {
205 tcg_gen_discard_i32(QREG_CC_Z);
207 if (dead & CCF_V) {
208 tcg_gen_discard_i32(QREG_CC_V);
212 /* Update the CPU env CC_OP state. */
213 static void update_cc_op(DisasContext *s)
215 if (!s->cc_op_synced) {
216 s->cc_op_synced = 1;
217 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
221 /* Generate a load from the specified address. Narrow values are
222 sign extended to full register width. */
223 static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
225 TCGv tmp;
226 int index = IS_USER(s);
227 tmp = tcg_temp_new_i32();
228 switch(opsize) {
229 case OS_BYTE:
230 if (sign)
231 tcg_gen_qemu_ld8s(tmp, addr, index);
232 else
233 tcg_gen_qemu_ld8u(tmp, addr, index);
234 break;
235 case OS_WORD:
236 if (sign)
237 tcg_gen_qemu_ld16s(tmp, addr, index);
238 else
239 tcg_gen_qemu_ld16u(tmp, addr, index);
240 break;
241 case OS_LONG:
242 case OS_SINGLE:
243 tcg_gen_qemu_ld32u(tmp, addr, index);
244 break;
245 default:
246 g_assert_not_reached();
248 gen_throws_exception = gen_last_qop;
249 return tmp;
252 static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
254 TCGv_i64 tmp;
255 int index = IS_USER(s);
256 tmp = tcg_temp_new_i64();
257 tcg_gen_qemu_ldf64(tmp, addr, index);
258 gen_throws_exception = gen_last_qop;
259 return tmp;
262 /* Generate a store. */
263 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
265 int index = IS_USER(s);
266 switch(opsize) {
267 case OS_BYTE:
268 tcg_gen_qemu_st8(val, addr, index);
269 break;
270 case OS_WORD:
271 tcg_gen_qemu_st16(val, addr, index);
272 break;
273 case OS_LONG:
274 case OS_SINGLE:
275 tcg_gen_qemu_st32(val, addr, index);
276 break;
277 default:
278 g_assert_not_reached();
280 gen_throws_exception = gen_last_qop;
283 static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
285 int index = IS_USER(s);
286 tcg_gen_qemu_stf64(val, addr, index);
287 gen_throws_exception = gen_last_qop;
290 typedef enum {
291 EA_STORE,
292 EA_LOADU,
293 EA_LOADS
294 } ea_what;
296 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
297 otherwise generate a store. */
298 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
299 ea_what what)
301 if (what == EA_STORE) {
302 gen_store(s, opsize, addr, val);
303 return store_dummy;
304 } else {
305 return gen_load(s, opsize, addr, what == EA_LOADS);
309 /* Read a 16-bit immediate constant */
310 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
312 uint16_t im;
313 im = cpu_lduw_code(env, s->pc);
314 s->pc += 2;
315 return im;
318 /* Read an 8-bit immediate constant */
319 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
321 return read_im16(env, s);
324 /* Read a 32-bit immediate constant. */
325 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
327 uint32_t im;
328 im = read_im16(env, s) << 16;
329 im |= 0xffff & read_im16(env, s);
330 return im;
333 /* Calculate and address index. */
334 static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
336 TCGv add;
337 int scale;
339 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
340 if ((ext & 0x800) == 0) {
341 tcg_gen_ext16s_i32(tmp, add);
342 add = tmp;
344 scale = (ext >> 9) & 3;
345 if (scale != 0) {
346 tcg_gen_shli_i32(tmp, add, scale);
347 add = tmp;
349 return add;
352 /* Handle a base + index + displacement effective addresss.
353 A NULL_QREG base means pc-relative. */
354 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
356 uint32_t offset;
357 uint16_t ext;
358 TCGv add;
359 TCGv tmp;
360 uint32_t bd, od;
362 offset = s->pc;
363 ext = read_im16(env, s);
365 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
366 return NULL_QREG;
368 if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
369 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
370 ext &= ~(3 << 9);
373 if (ext & 0x100) {
374 /* full extension word format */
375 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
376 return NULL_QREG;
378 if ((ext & 0x30) > 0x10) {
379 /* base displacement */
380 if ((ext & 0x30) == 0x20) {
381 bd = (int16_t)read_im16(env, s);
382 } else {
383 bd = read_im32(env, s);
385 } else {
386 bd = 0;
388 tmp = tcg_temp_new();
389 if ((ext & 0x44) == 0) {
390 /* pre-index */
391 add = gen_addr_index(ext, tmp);
392 } else {
393 add = NULL_QREG;
395 if ((ext & 0x80) == 0) {
396 /* base not suppressed */
397 if (IS_NULL_QREG(base)) {
398 base = tcg_const_i32(offset + bd);
399 bd = 0;
401 if (!IS_NULL_QREG(add)) {
402 tcg_gen_add_i32(tmp, add, base);
403 add = tmp;
404 } else {
405 add = base;
408 if (!IS_NULL_QREG(add)) {
409 if (bd != 0) {
410 tcg_gen_addi_i32(tmp, add, bd);
411 add = tmp;
413 } else {
414 add = tcg_const_i32(bd);
416 if ((ext & 3) != 0) {
417 /* memory indirect */
418 base = gen_load(s, OS_LONG, add, 0);
419 if ((ext & 0x44) == 4) {
420 add = gen_addr_index(ext, tmp);
421 tcg_gen_add_i32(tmp, add, base);
422 add = tmp;
423 } else {
424 add = base;
426 if ((ext & 3) > 1) {
427 /* outer displacement */
428 if ((ext & 3) == 2) {
429 od = (int16_t)read_im16(env, s);
430 } else {
431 od = read_im32(env, s);
433 } else {
434 od = 0;
436 if (od != 0) {
437 tcg_gen_addi_i32(tmp, add, od);
438 add = tmp;
441 } else {
442 /* brief extension word format */
443 tmp = tcg_temp_new();
444 add = gen_addr_index(ext, tmp);
445 if (!IS_NULL_QREG(base)) {
446 tcg_gen_add_i32(tmp, add, base);
447 if ((int8_t)ext)
448 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
449 } else {
450 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
452 add = tmp;
454 return add;
457 /* Evaluate all the CC flags. */
459 static void gen_flush_flags(DisasContext *s)
461 TCGv t0, t1;
463 switch (s->cc_op) {
464 case CC_OP_FLAGS:
465 return;
467 case CC_OP_ADD:
468 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
469 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
470 /* Compute signed overflow for addition. */
471 t0 = tcg_temp_new();
472 t1 = tcg_temp_new();
473 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
474 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
475 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
476 tcg_temp_free(t0);
477 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
478 tcg_temp_free(t1);
479 break;
481 case CC_OP_SUB:
482 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
483 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
484 /* Compute signed overflow for subtraction. */
485 t0 = tcg_temp_new();
486 t1 = tcg_temp_new();
487 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
488 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
489 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
490 tcg_temp_free(t0);
491 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
492 tcg_temp_free(t1);
493 break;
495 case CC_OP_CMP:
496 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
497 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
498 /* Compute signed overflow for subtraction. */
499 t0 = tcg_temp_new();
500 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
501 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
502 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
503 tcg_temp_free(t0);
504 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
505 break;
507 case CC_OP_LOGIC:
508 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
509 tcg_gen_movi_i32(QREG_CC_C, 0);
510 tcg_gen_movi_i32(QREG_CC_V, 0);
511 break;
513 case CC_OP_DYNAMIC:
514 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
515 break;
517 default:
518 t0 = tcg_const_i32(s->cc_op);
519 gen_helper_flush_flags(cpu_env, t0);
520 tcg_temp_free(t0);
521 break;
524 /* Note that flush_flags also assigned to env->cc_op. */
525 s->cc_op = CC_OP_FLAGS;
526 s->cc_op_synced = 1;
529 /* Sign or zero extend a value. */
531 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
533 switch (opsize) {
534 case OS_BYTE:
535 if (sign) {
536 tcg_gen_ext8s_i32(res, val);
537 } else {
538 tcg_gen_ext8u_i32(res, val);
540 break;
541 case OS_WORD:
542 if (sign) {
543 tcg_gen_ext16s_i32(res, val);
544 } else {
545 tcg_gen_ext16u_i32(res, val);
547 break;
548 case OS_LONG:
549 tcg_gen_mov_i32(res, val);
550 break;
551 default:
552 g_assert_not_reached();
556 static TCGv gen_extend(TCGv val, int opsize, int sign)
558 TCGv tmp;
560 if (opsize == OS_LONG) {
561 tmp = val;
562 } else {
563 tmp = tcg_temp_new();
564 gen_ext(tmp, val, opsize, sign);
567 return tmp;
570 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
572 gen_ext(QREG_CC_N, val, opsize, 1);
573 set_cc_op(s, CC_OP_LOGIC);
576 static void gen_update_cc_add(TCGv dest, TCGv src)
578 tcg_gen_mov_i32(QREG_CC_N, dest);
579 tcg_gen_mov_i32(QREG_CC_V, src);
582 static inline int opsize_bytes(int opsize)
584 switch (opsize) {
585 case OS_BYTE: return 1;
586 case OS_WORD: return 2;
587 case OS_LONG: return 4;
588 case OS_SINGLE: return 4;
589 case OS_DOUBLE: return 8;
590 case OS_EXTENDED: return 12;
591 case OS_PACKED: return 12;
592 default:
593 g_assert_not_reached();
597 static inline int insn_opsize(int insn)
599 switch ((insn >> 6) & 3) {
600 case 0: return OS_BYTE;
601 case 1: return OS_WORD;
602 case 2: return OS_LONG;
603 default:
604 g_assert_not_reached();
608 /* Assign value to a register. If the width is less than the register width
609 only the low part of the register is set. */
610 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
612 TCGv tmp;
613 switch (opsize) {
614 case OS_BYTE:
615 tcg_gen_andi_i32(reg, reg, 0xffffff00);
616 tmp = tcg_temp_new();
617 tcg_gen_ext8u_i32(tmp, val);
618 tcg_gen_or_i32(reg, reg, tmp);
619 break;
620 case OS_WORD:
621 tcg_gen_andi_i32(reg, reg, 0xffff0000);
622 tmp = tcg_temp_new();
623 tcg_gen_ext16u_i32(tmp, val);
624 tcg_gen_or_i32(reg, reg, tmp);
625 break;
626 case OS_LONG:
627 case OS_SINGLE:
628 tcg_gen_mov_i32(reg, val);
629 break;
630 default:
631 g_assert_not_reached();
635 /* Generate code for an "effective address". Does not adjust the base
636 register for autoincrement addressing modes. */
637 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
638 int opsize)
640 TCGv reg;
641 TCGv tmp;
642 uint16_t ext;
643 uint32_t offset;
645 switch ((insn >> 3) & 7) {
646 case 0: /* Data register direct. */
647 case 1: /* Address register direct. */
648 return NULL_QREG;
649 case 2: /* Indirect register */
650 case 3: /* Indirect postincrement. */
651 return AREG(insn, 0);
652 case 4: /* Indirect predecrememnt. */
653 reg = AREG(insn, 0);
654 tmp = tcg_temp_new();
655 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
656 return tmp;
657 case 5: /* Indirect displacement. */
658 reg = AREG(insn, 0);
659 tmp = tcg_temp_new();
660 ext = read_im16(env, s);
661 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
662 return tmp;
663 case 6: /* Indirect index + displacement. */
664 reg = AREG(insn, 0);
665 return gen_lea_indexed(env, s, reg);
666 case 7: /* Other */
667 switch (insn & 7) {
668 case 0: /* Absolute short. */
669 offset = (int16_t)read_im16(env, s);
670 return tcg_const_i32(offset);
671 case 1: /* Absolute long. */
672 offset = read_im32(env, s);
673 return tcg_const_i32(offset);
674 case 2: /* pc displacement */
675 offset = s->pc;
676 offset += (int16_t)read_im16(env, s);
677 return tcg_const_i32(offset);
678 case 3: /* pc index+displacement. */
679 return gen_lea_indexed(env, s, NULL_QREG);
680 case 4: /* Immediate. */
681 default:
682 return NULL_QREG;
685 /* Should never happen. */
686 return NULL_QREG;
689 /* Helper function for gen_ea. Reuse the computed address between the
690 for read/write operands. */
691 static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
692 uint16_t insn, int opsize, TCGv val,
693 TCGv *addrp, ea_what what)
695 TCGv tmp;
697 if (addrp && what == EA_STORE) {
698 tmp = *addrp;
699 } else {
700 tmp = gen_lea(env, s, insn, opsize);
701 if (IS_NULL_QREG(tmp))
702 return tmp;
703 if (addrp)
704 *addrp = tmp;
706 return gen_ldst(s, opsize, tmp, val, what);
709 /* Generate code to load/store a value from/into an EA. If VAL > 0 this is
710 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
711 ADDRP is non-null for readwrite operands. */
712 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
713 int opsize, TCGv val, TCGv *addrp, ea_what what)
715 TCGv reg;
716 TCGv result;
717 uint32_t offset;
719 switch ((insn >> 3) & 7) {
720 case 0: /* Data register direct. */
721 reg = DREG(insn, 0);
722 if (what == EA_STORE) {
723 gen_partset_reg(opsize, reg, val);
724 return store_dummy;
725 } else {
726 return gen_extend(reg, opsize, what == EA_LOADS);
728 case 1: /* Address register direct. */
729 reg = AREG(insn, 0);
730 if (what == EA_STORE) {
731 tcg_gen_mov_i32(reg, val);
732 return store_dummy;
733 } else {
734 return gen_extend(reg, opsize, what == EA_LOADS);
736 case 2: /* Indirect register */
737 reg = AREG(insn, 0);
738 return gen_ldst(s, opsize, reg, val, what);
739 case 3: /* Indirect postincrement. */
740 reg = AREG(insn, 0);
741 result = gen_ldst(s, opsize, reg, val, what);
742 /* ??? This is not exception safe. The instruction may still
743 fault after this point. */
744 if (what == EA_STORE || !addrp)
745 tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
746 return result;
747 case 4: /* Indirect predecrememnt. */
749 TCGv tmp;
750 if (addrp && what == EA_STORE) {
751 tmp = *addrp;
752 } else {
753 tmp = gen_lea(env, s, insn, opsize);
754 if (IS_NULL_QREG(tmp))
755 return tmp;
756 if (addrp)
757 *addrp = tmp;
759 result = gen_ldst(s, opsize, tmp, val, what);
760 /* ??? This is not exception safe. The instruction may still
761 fault after this point. */
762 if (what == EA_STORE || !addrp) {
763 reg = AREG(insn, 0);
764 tcg_gen_mov_i32(reg, tmp);
767 return result;
768 case 5: /* Indirect displacement. */
769 case 6: /* Indirect index + displacement. */
770 return gen_ea_once(env, s, insn, opsize, val, addrp, what);
771 case 7: /* Other */
772 switch (insn & 7) {
773 case 0: /* Absolute short. */
774 case 1: /* Absolute long. */
775 case 2: /* pc displacement */
776 case 3: /* pc index+displacement. */
777 return gen_ea_once(env, s, insn, opsize, val, addrp, what);
778 case 4: /* Immediate. */
779 /* Sign extend values for consistency. */
780 switch (opsize) {
781 case OS_BYTE:
782 if (what == EA_LOADS) {
783 offset = (int8_t)read_im8(env, s);
784 } else {
785 offset = read_im8(env, s);
787 break;
788 case OS_WORD:
789 if (what == EA_LOADS) {
790 offset = (int16_t)read_im16(env, s);
791 } else {
792 offset = read_im16(env, s);
794 break;
795 case OS_LONG:
796 offset = read_im32(env, s);
797 break;
798 default:
799 g_assert_not_reached();
801 return tcg_const_i32(offset);
802 default:
803 return NULL_QREG;
806 /* Should never happen. */
807 return NULL_QREG;
810 typedef struct {
811 TCGCond tcond;
812 bool g1;
813 bool g2;
814 TCGv v1;
815 TCGv v2;
816 } DisasCompare;
818 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
820 TCGv tmp, tmp2;
821 TCGCond tcond;
822 CCOp op = s->cc_op;
824 /* The CC_OP_CMP form can handle most normal comparisons directly. */
825 if (op == CC_OP_CMP) {
826 c->g1 = c->g2 = 1;
827 c->v1 = QREG_CC_N;
828 c->v2 = QREG_CC_V;
829 switch (cond) {
830 case 2: /* HI */
831 case 3: /* LS */
832 tcond = TCG_COND_LEU;
833 goto done;
834 case 4: /* CC */
835 case 5: /* CS */
836 tcond = TCG_COND_LTU;
837 goto done;
838 case 6: /* NE */
839 case 7: /* EQ */
840 tcond = TCG_COND_EQ;
841 goto done;
842 case 10: /* PL */
843 case 11: /* MI */
844 c->g1 = c->g2 = 0;
845 c->v2 = tcg_const_i32(0);
846 c->v1 = tmp = tcg_temp_new();
847 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
848 /* fallthru */
849 case 12: /* GE */
850 case 13: /* LT */
851 tcond = TCG_COND_LT;
852 goto done;
853 case 14: /* GT */
854 case 15: /* LE */
855 tcond = TCG_COND_LE;
856 goto done;
860 c->g1 = 1;
861 c->g2 = 0;
862 c->v2 = tcg_const_i32(0);
864 switch (cond) {
865 case 0: /* T */
866 case 1: /* F */
867 c->v1 = c->v2;
868 tcond = TCG_COND_NEVER;
869 goto done;
870 case 14: /* GT (!(Z || (N ^ V))) */
871 case 15: /* LE (Z || (N ^ V)) */
872 /* Logic operations clear V, which simplifies LE to (Z || N),
873 and since Z and N are co-located, this becomes a normal
874 comparison vs N. */
875 if (op == CC_OP_LOGIC) {
876 c->v1 = QREG_CC_N;
877 tcond = TCG_COND_LE;
878 goto done;
880 break;
881 case 12: /* GE (!(N ^ V)) */
882 case 13: /* LT (N ^ V) */
883 /* Logic operations clear V, which simplifies this to N. */
884 if (op != CC_OP_LOGIC) {
885 break;
887 /* fallthru */
888 case 10: /* PL (!N) */
889 case 11: /* MI (N) */
890 /* Several cases represent N normally. */
891 if (op == CC_OP_ADD || op == CC_OP_SUB || op == CC_OP_LOGIC) {
892 c->v1 = QREG_CC_N;
893 tcond = TCG_COND_LT;
894 goto done;
896 break;
897 case 6: /* NE (!Z) */
898 case 7: /* EQ (Z) */
899 /* Some cases fold Z into N. */
900 if (op == CC_OP_ADD || op == CC_OP_SUB || op == CC_OP_LOGIC) {
901 tcond = TCG_COND_EQ;
902 c->v1 = QREG_CC_N;
903 goto done;
905 break;
906 case 4: /* CC (!C) */
907 case 5: /* CS (C) */
908 /* Some cases fold C into X. */
909 if (op == CC_OP_ADD || op == CC_OP_SUB) {
910 tcond = TCG_COND_NE;
911 c->v1 = QREG_CC_X;
912 goto done;
914 /* fallthru */
915 case 8: /* VC (!V) */
916 case 9: /* VS (V) */
917 /* Logic operations clear V and C. */
918 if (op == CC_OP_LOGIC) {
919 tcond = TCG_COND_NEVER;
920 c->v1 = c->v2;
921 goto done;
923 break;
926 /* Otherwise, flush flag state to CC_OP_FLAGS. */
927 gen_flush_flags(s);
929 switch (cond) {
930 case 0: /* T */
931 case 1: /* F */
932 default:
933 /* Invalid, or handled above. */
934 abort();
935 case 2: /* HI (!C && !Z) -> !(C || Z)*/
936 case 3: /* LS (C || Z) */
937 c->v1 = tmp = tcg_temp_new();
938 c->g1 = 0;
939 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
940 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
941 tcond = TCG_COND_NE;
942 break;
943 case 4: /* CC (!C) */
944 case 5: /* CS (C) */
945 c->v1 = QREG_CC_C;
946 tcond = TCG_COND_NE;
947 break;
948 case 6: /* NE (!Z) */
949 case 7: /* EQ (Z) */
950 c->v1 = QREG_CC_Z;
951 tcond = TCG_COND_EQ;
952 break;
953 case 8: /* VC (!V) */
954 case 9: /* VS (V) */
955 c->v1 = QREG_CC_V;
956 tcond = TCG_COND_LT;
957 break;
958 case 10: /* PL (!N) */
959 case 11: /* MI (N) */
960 c->v1 = QREG_CC_N;
961 tcond = TCG_COND_LT;
962 break;
963 case 12: /* GE (!(N ^ V)) */
964 case 13: /* LT (N ^ V) */
965 c->v1 = tmp = tcg_temp_new();
966 c->g1 = 0;
967 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
968 tcond = TCG_COND_LT;
969 break;
970 case 14: /* GT (!(Z || (N ^ V))) */
971 case 15: /* LE (Z || (N ^ V)) */
972 c->v1 = tmp = tcg_temp_new();
973 c->g1 = 0;
974 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
975 tcg_gen_neg_i32(tmp, tmp);
976 tmp2 = tcg_temp_new();
977 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
978 tcg_gen_or_i32(tmp, tmp, tmp2);
979 tcg_temp_free(tmp2);
980 tcond = TCG_COND_LT;
981 break;
984 done:
985 if ((cond & 1) == 0) {
986 tcond = tcg_invert_cond(tcond);
988 c->tcond = tcond;
991 static void free_cond(DisasCompare *c)
993 if (!c->g1) {
994 tcg_temp_free(c->v1);
996 if (!c->g2) {
997 tcg_temp_free(c->v2);
1001 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1003 DisasCompare c;
1005 gen_cc_cond(&c, s, cond);
1006 update_cc_op(s);
1007 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1008 free_cond(&c);
1011 /* Force a TB lookup after an instruction that changes the CPU state. */
1012 static void gen_lookup_tb(DisasContext *s)
1014 update_cc_op(s);
1015 tcg_gen_movi_i32(QREG_PC, s->pc);
1016 s->is_jmp = DISAS_UPDATE;
1019 /* Generate a jump to an immediate address. */
1020 static void gen_jmp_im(DisasContext *s, uint32_t dest)
1022 update_cc_op(s);
1023 tcg_gen_movi_i32(QREG_PC, dest);
1024 s->is_jmp = DISAS_JUMP;
1027 /* Generate a jump to the address in qreg DEST. */
1028 static void gen_jmp(DisasContext *s, TCGv dest)
1030 update_cc_op(s);
1031 tcg_gen_mov_i32(QREG_PC, dest);
1032 s->is_jmp = DISAS_JUMP;
1035 static void gen_exception(DisasContext *s, uint32_t where, int nr)
1037 update_cc_op(s);
1038 gen_jmp_im(s, where);
1039 gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
1042 static inline void gen_addr_fault(DisasContext *s)
1044 gen_exception(s, s->insn_pc, EXCP_ADDRESS);
1047 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1048 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1049 op_sign ? EA_LOADS : EA_LOADU); \
1050 if (IS_NULL_QREG(result)) { \
1051 gen_addr_fault(s); \
1052 return; \
1054 } while (0)
1056 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1057 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
1058 if (IS_NULL_QREG(ea_result)) { \
1059 gen_addr_fault(s); \
1060 return; \
1062 } while (0)
1064 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1066 #ifndef CONFIG_USER_ONLY
1067 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1068 (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1069 #else
1070 return true;
1071 #endif
1074 /* Generate a jump to an immediate address. */
1075 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1077 if (unlikely(s->singlestep_enabled)) {
1078 gen_exception(s, dest, EXCP_DEBUG);
1079 } else if (use_goto_tb(s, dest)) {
1080 tcg_gen_goto_tb(n);
1081 tcg_gen_movi_i32(QREG_PC, dest);
1082 tcg_gen_exit_tb((uintptr_t)s->tb + n);
1083 } else {
1084 gen_jmp_im(s, dest);
1085 tcg_gen_exit_tb(0);
1087 s->is_jmp = DISAS_TB_JUMP;
1090 DISAS_INSN(scc)
1092 DisasCompare c;
1093 int cond;
1094 TCGv tmp;
1096 cond = (insn >> 8) & 0xf;
1097 gen_cc_cond(&c, s, cond);
1099 tmp = tcg_temp_new();
1100 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1101 free_cond(&c);
1103 tcg_gen_neg_i32(tmp, tmp);
1104 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1105 tcg_temp_free(tmp);
1108 DISAS_INSN(dbcc)
1110 TCGLabel *l1;
1111 TCGv reg;
1112 TCGv tmp;
1113 int16_t offset;
1114 uint32_t base;
1116 reg = DREG(insn, 0);
1117 base = s->pc;
1118 offset = (int16_t)read_im16(env, s);
1119 l1 = gen_new_label();
1120 gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1122 tmp = tcg_temp_new();
1123 tcg_gen_ext16s_i32(tmp, reg);
1124 tcg_gen_addi_i32(tmp, tmp, -1);
1125 gen_partset_reg(OS_WORD, reg, tmp);
1126 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1127 gen_jmp_tb(s, 1, base + offset);
1128 gen_set_label(l1);
1129 gen_jmp_tb(s, 0, s->pc);
1132 DISAS_INSN(undef_mac)
1134 gen_exception(s, s->pc - 2, EXCP_LINEA);
1137 DISAS_INSN(undef_fpu)
1139 gen_exception(s, s->pc - 2, EXCP_LINEF);
1142 DISAS_INSN(undef)
1144 M68kCPU *cpu = m68k_env_get_cpu(env);
1146 gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
1147 cpu_abort(CPU(cpu), "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
1150 DISAS_INSN(mulw)
1152 TCGv reg;
1153 TCGv tmp;
1154 TCGv src;
1155 int sign;
1157 sign = (insn & 0x100) != 0;
1158 reg = DREG(insn, 9);
1159 tmp = tcg_temp_new();
1160 if (sign)
1161 tcg_gen_ext16s_i32(tmp, reg);
1162 else
1163 tcg_gen_ext16u_i32(tmp, reg);
1164 SRC_EA(env, src, OS_WORD, sign, NULL);
1165 tcg_gen_mul_i32(tmp, tmp, src);
1166 tcg_gen_mov_i32(reg, tmp);
1167 gen_logic_cc(s, tmp, OS_WORD);
1170 DISAS_INSN(divw)
1172 TCGv reg;
1173 TCGv tmp;
1174 TCGv src;
1175 int sign;
1177 sign = (insn & 0x100) != 0;
1178 reg = DREG(insn, 9);
1179 if (sign) {
1180 tcg_gen_ext16s_i32(QREG_DIV1, reg);
1181 } else {
1182 tcg_gen_ext16u_i32(QREG_DIV1, reg);
1184 SRC_EA(env, src, OS_WORD, sign, NULL);
1185 tcg_gen_mov_i32(QREG_DIV2, src);
1186 if (sign) {
1187 gen_helper_divs(cpu_env, tcg_const_i32(1));
1188 } else {
1189 gen_helper_divu(cpu_env, tcg_const_i32(1));
1192 tmp = tcg_temp_new();
1193 src = tcg_temp_new();
1194 tcg_gen_ext16u_i32(tmp, QREG_DIV1);
1195 tcg_gen_shli_i32(src, QREG_DIV2, 16);
1196 tcg_gen_or_i32(reg, tmp, src);
1198 set_cc_op(s, CC_OP_FLAGS);
1201 DISAS_INSN(divl)
1203 TCGv num;
1204 TCGv den;
1205 TCGv reg;
1206 uint16_t ext;
1208 ext = read_im16(env, s);
1209 if (ext & 0x87f8) {
1210 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1211 return;
1213 num = DREG(ext, 12);
1214 reg = DREG(ext, 0);
1215 tcg_gen_mov_i32(QREG_DIV1, num);
1216 SRC_EA(env, den, OS_LONG, 0, NULL);
1217 tcg_gen_mov_i32(QREG_DIV2, den);
1218 if (ext & 0x0800) {
1219 gen_helper_divs(cpu_env, tcg_const_i32(0));
1220 } else {
1221 gen_helper_divu(cpu_env, tcg_const_i32(0));
1223 if ((ext & 7) == ((ext >> 12) & 7)) {
1224 /* div */
1225 tcg_gen_mov_i32 (reg, QREG_DIV1);
1226 } else {
1227 /* rem */
1228 tcg_gen_mov_i32 (reg, QREG_DIV2);
1230 set_cc_op(s, CC_OP_FLAGS);
1233 DISAS_INSN(addsub)
1235 TCGv reg;
1236 TCGv dest;
1237 TCGv src;
1238 TCGv tmp;
1239 TCGv addr;
1240 int add;
1242 add = (insn & 0x4000) != 0;
1243 reg = DREG(insn, 9);
1244 dest = tcg_temp_new();
1245 if (insn & 0x100) {
1246 SRC_EA(env, tmp, OS_LONG, 0, &addr);
1247 src = reg;
1248 } else {
1249 tmp = reg;
1250 SRC_EA(env, src, OS_LONG, 0, NULL);
1252 if (add) {
1253 tcg_gen_add_i32(dest, tmp, src);
1254 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1255 set_cc_op(s, CC_OP_ADD);
1256 } else {
1257 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1258 tcg_gen_sub_i32(dest, tmp, src);
1259 set_cc_op(s, CC_OP_SUB);
1261 gen_update_cc_add(dest, src);
1262 if (insn & 0x100) {
1263 DEST_EA(env, insn, OS_LONG, dest, &addr);
1264 } else {
1265 tcg_gen_mov_i32(reg, dest);
1270 /* Reverse the order of the bits in REG. */
1271 DISAS_INSN(bitrev)
1273 TCGv reg;
1274 reg = DREG(insn, 0);
1275 gen_helper_bitrev(reg, reg);
1278 DISAS_INSN(bitop_reg)
1280 int opsize;
1281 int op;
1282 TCGv src1;
1283 TCGv src2;
1284 TCGv tmp;
1285 TCGv addr;
1286 TCGv dest;
1288 if ((insn & 0x38) != 0)
1289 opsize = OS_BYTE;
1290 else
1291 opsize = OS_LONG;
1292 op = (insn >> 6) & 3;
1294 gen_flush_flags(s);
1296 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1297 src2 = DREG(insn, 9);
1298 dest = tcg_temp_new();
1300 tmp = tcg_temp_new();
1301 if (opsize == OS_BYTE)
1302 tcg_gen_andi_i32(tmp, src2, 7);
1303 else
1304 tcg_gen_andi_i32(tmp, src2, 31);
1306 src2 = tcg_const_i32(1);
1307 tcg_gen_shl_i32(src2, src2, tmp);
1308 tcg_temp_free(tmp);
1310 tcg_gen_and_i32(QREG_CC_Z, src1, src2);
1312 switch (op) {
1313 case 1: /* bchg */
1314 tcg_gen_xor_i32(dest, src1, src2);
1315 break;
1316 case 2: /* bclr */
1317 tcg_gen_andc_i32(dest, src1, src2);
1318 break;
1319 case 3: /* bset */
1320 tcg_gen_or_i32(dest, src1, src2);
1321 break;
1322 default: /* btst */
1323 break;
1325 tcg_temp_free(src2);
1326 if (op) {
1327 DEST_EA(env, insn, opsize, dest, &addr);
1329 tcg_temp_free(dest);
1332 DISAS_INSN(sats)
1334 TCGv reg;
1335 reg = DREG(insn, 0);
1336 gen_flush_flags(s);
1337 gen_helper_sats(reg, reg, QREG_CC_V);
1338 gen_logic_cc(s, reg, OS_LONG);
1341 static void gen_push(DisasContext *s, TCGv val)
1343 TCGv tmp;
1345 tmp = tcg_temp_new();
1346 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1347 gen_store(s, OS_LONG, tmp, val);
1348 tcg_gen_mov_i32(QREG_SP, tmp);
1351 DISAS_INSN(movem)
1353 TCGv addr;
1354 int i;
1355 uint16_t mask;
1356 TCGv reg;
1357 TCGv tmp;
1358 int is_load;
1360 mask = read_im16(env, s);
1361 tmp = gen_lea(env, s, insn, OS_LONG);
1362 if (IS_NULL_QREG(tmp)) {
1363 gen_addr_fault(s);
1364 return;
1366 addr = tcg_temp_new();
1367 tcg_gen_mov_i32(addr, tmp);
1368 is_load = ((insn & 0x0400) != 0);
1369 for (i = 0; i < 16; i++, mask >>= 1) {
1370 if (mask & 1) {
1371 if (i < 8)
1372 reg = DREG(i, 0);
1373 else
1374 reg = AREG(i, 0);
1375 if (is_load) {
1376 tmp = gen_load(s, OS_LONG, addr, 0);
1377 tcg_gen_mov_i32(reg, tmp);
1378 } else {
1379 gen_store(s, OS_LONG, addr, reg);
1381 if (mask != 1)
1382 tcg_gen_addi_i32(addr, addr, 4);
1387 DISAS_INSN(bitop_im)
1389 int opsize;
1390 int op;
1391 TCGv src1;
1392 uint32_t mask;
1393 int bitnum;
1394 TCGv tmp;
1395 TCGv addr;
1397 if ((insn & 0x38) != 0)
1398 opsize = OS_BYTE;
1399 else
1400 opsize = OS_LONG;
1401 op = (insn >> 6) & 3;
1403 bitnum = read_im16(env, s);
1404 if (bitnum & 0xff00) {
1405 disas_undef(env, s, insn);
1406 return;
1409 gen_flush_flags(s);
1411 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1413 if (opsize == OS_BYTE)
1414 bitnum &= 7;
1415 else
1416 bitnum &= 31;
1417 mask = 1 << bitnum;
1419 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
1421 if (op) {
1422 tmp = tcg_temp_new();
1423 switch (op) {
1424 case 1: /* bchg */
1425 tcg_gen_xori_i32(tmp, src1, mask);
1426 break;
1427 case 2: /* bclr */
1428 tcg_gen_andi_i32(tmp, src1, ~mask);
1429 break;
1430 case 3: /* bset */
1431 tcg_gen_ori_i32(tmp, src1, mask);
1432 break;
1433 default: /* btst */
1434 break;
1436 DEST_EA(env, insn, opsize, tmp, &addr);
1437 tcg_temp_free(tmp);
1441 DISAS_INSN(arith_im)
1443 int op;
1444 uint32_t im;
1445 TCGv src1;
1446 TCGv dest;
1447 TCGv addr;
1449 op = (insn >> 9) & 7;
1450 SRC_EA(env, src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
1451 im = read_im32(env, s);
1452 dest = tcg_temp_new();
1453 switch (op) {
1454 case 0: /* ori */
1455 tcg_gen_ori_i32(dest, src1, im);
1456 gen_logic_cc(s, dest, OS_LONG);
1457 break;
1458 case 1: /* andi */
1459 tcg_gen_andi_i32(dest, src1, im);
1460 gen_logic_cc(s, dest, OS_LONG);
1461 break;
1462 case 2: /* subi */
1463 tcg_gen_mov_i32(dest, src1);
1464 tcg_gen_setcondi_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
1465 tcg_gen_subi_i32(dest, dest, im);
1466 gen_update_cc_add(dest, tcg_const_i32(im));
1467 set_cc_op(s, CC_OP_SUB);
1468 break;
1469 case 3: /* addi */
1470 tcg_gen_mov_i32(dest, src1);
1471 tcg_gen_addi_i32(dest, dest, im);
1472 gen_update_cc_add(dest, tcg_const_i32(im));
1473 tcg_gen_setcondi_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
1474 set_cc_op(s, CC_OP_ADD);
1475 break;
1476 case 5: /* eori */
1477 tcg_gen_xori_i32(dest, src1, im);
1478 gen_logic_cc(s, dest, OS_LONG);
1479 break;
1480 case 6: /* cmpi */
1481 gen_update_cc_add(src1, tcg_const_i32(im));
1482 set_cc_op(s, CC_OP_CMP);
1483 break;
1484 default:
1485 abort();
1487 if (op != 6) {
1488 DEST_EA(env, insn, OS_LONG, dest, &addr);
1492 DISAS_INSN(byterev)
1494 TCGv reg;
1496 reg = DREG(insn, 0);
1497 tcg_gen_bswap32_i32(reg, reg);
1500 DISAS_INSN(move)
1502 TCGv src;
1503 TCGv dest;
1504 int op;
1505 int opsize;
1507 switch (insn >> 12) {
1508 case 1: /* move.b */
1509 opsize = OS_BYTE;
1510 break;
1511 case 2: /* move.l */
1512 opsize = OS_LONG;
1513 break;
1514 case 3: /* move.w */
1515 opsize = OS_WORD;
1516 break;
1517 default:
1518 abort();
1520 SRC_EA(env, src, opsize, 1, NULL);
1521 op = (insn >> 6) & 7;
1522 if (op == 1) {
1523 /* movea */
1524 /* The value will already have been sign extended. */
1525 dest = AREG(insn, 9);
1526 tcg_gen_mov_i32(dest, src);
1527 } else {
1528 /* normal move */
1529 uint16_t dest_ea;
1530 dest_ea = ((insn >> 9) & 7) | (op << 3);
1531 DEST_EA(env, dest_ea, opsize, src, NULL);
1532 /* This will be correct because loads sign extend. */
1533 gen_logic_cc(s, src, opsize);
1537 DISAS_INSN(negx)
1539 TCGv z;
1540 TCGv src;
1541 TCGv addr;
1542 int opsize;
1544 opsize = insn_opsize(insn);
1545 SRC_EA(env, src, opsize, 1, &addr);
1547 gen_flush_flags(s); /* compute old Z */
1549 /* Perform substract with borrow.
1550 * (X, N) = -(src + X);
1553 z = tcg_const_i32(0);
1554 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
1555 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
1556 tcg_temp_free(z);
1557 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
1559 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
1561 /* Compute signed-overflow for negation. The normal formula for
1562 * subtraction is (res ^ src) & (src ^ dest), but with dest==0
1563 * this simplies to res & src.
1566 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
1568 /* Copy the rest of the results into place. */
1569 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
1570 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
1572 set_cc_op(s, CC_OP_FLAGS);
1574 /* result is in QREG_CC_N */
1576 DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
1579 DISAS_INSN(lea)
1581 TCGv reg;
1582 TCGv tmp;
1584 reg = AREG(insn, 9);
1585 tmp = gen_lea(env, s, insn, OS_LONG);
1586 if (IS_NULL_QREG(tmp)) {
1587 gen_addr_fault(s);
1588 return;
1590 tcg_gen_mov_i32(reg, tmp);
1593 DISAS_INSN(clr)
1595 int opsize;
1597 opsize = insn_opsize(insn);
1598 DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
1599 gen_logic_cc(s, tcg_const_i32(0), opsize);
1602 static TCGv gen_get_ccr(DisasContext *s)
1604 TCGv dest;
1606 gen_flush_flags(s);
1607 update_cc_op(s);
1608 dest = tcg_temp_new();
1609 gen_helper_get_ccr(dest, cpu_env);
1610 return dest;
1613 DISAS_INSN(move_from_ccr)
1615 TCGv ccr;
1617 ccr = gen_get_ccr(s);
1618 DEST_EA(env, insn, OS_WORD, ccr, NULL);
1621 DISAS_INSN(neg)
1623 TCGv reg;
1624 TCGv src1;
1626 reg = DREG(insn, 0);
1627 src1 = tcg_temp_new();
1628 tcg_gen_mov_i32(src1, reg);
1629 tcg_gen_neg_i32(reg, src1);
1630 gen_update_cc_add(reg, src1);
1631 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, src1, 0);
1632 set_cc_op(s, CC_OP_SUB);
1635 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1637 if (ccr_only) {
1638 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
1639 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
1640 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
1641 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
1642 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
1643 } else {
1644 gen_helper_set_sr(cpu_env, tcg_const_i32(val));
1646 set_cc_op(s, CC_OP_FLAGS);
1649 static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
1650 int ccr_only)
1652 if ((insn & 0x38) == 0) {
1653 if (ccr_only) {
1654 gen_helper_set_ccr(cpu_env, DREG(insn, 0));
1655 } else {
1656 gen_helper_set_sr(cpu_env, DREG(insn, 0));
1658 set_cc_op(s, CC_OP_FLAGS);
1659 } else if ((insn & 0x3f) == 0x3c) {
1660 uint16_t val;
1661 val = read_im16(env, s);
1662 gen_set_sr_im(s, val, ccr_only);
1663 } else {
1664 disas_undef(env, s, insn);
1669 DISAS_INSN(move_to_ccr)
1671 gen_set_sr(env, s, insn, 1);
1674 DISAS_INSN(not)
1676 TCGv src1;
1677 TCGv dest;
1678 TCGv addr;
1679 int opsize;
1681 opsize = insn_opsize(insn);
1682 SRC_EA(env, src1, opsize, 1, &addr);
1683 dest = tcg_temp_new();
1684 tcg_gen_not_i32(dest, src1);
1685 DEST_EA(env, insn, opsize, dest, &addr);
1686 gen_logic_cc(s, dest, opsize);
1689 DISAS_INSN(swap)
1691 TCGv src1;
1692 TCGv src2;
1693 TCGv reg;
1695 src1 = tcg_temp_new();
1696 src2 = tcg_temp_new();
1697 reg = DREG(insn, 0);
1698 tcg_gen_shli_i32(src1, reg, 16);
1699 tcg_gen_shri_i32(src2, reg, 16);
1700 tcg_gen_or_i32(reg, src1, src2);
1701 gen_logic_cc(s, reg, OS_LONG);
1704 DISAS_INSN(bkpt)
1706 gen_exception(s, s->pc - 2, EXCP_DEBUG);
1709 DISAS_INSN(pea)
1711 TCGv tmp;
1713 tmp = gen_lea(env, s, insn, OS_LONG);
1714 if (IS_NULL_QREG(tmp)) {
1715 gen_addr_fault(s);
1716 return;
1718 gen_push(s, tmp);
1721 DISAS_INSN(ext)
1723 int op;
1724 TCGv reg;
1725 TCGv tmp;
1727 reg = DREG(insn, 0);
1728 op = (insn >> 6) & 7;
1729 tmp = tcg_temp_new();
1730 if (op == 3)
1731 tcg_gen_ext16s_i32(tmp, reg);
1732 else
1733 tcg_gen_ext8s_i32(tmp, reg);
1734 if (op == 2)
1735 gen_partset_reg(OS_WORD, reg, tmp);
1736 else
1737 tcg_gen_mov_i32(reg, tmp);
1738 gen_logic_cc(s, tmp, OS_LONG);
1741 DISAS_INSN(tst)
1743 int opsize;
1744 TCGv tmp;
1746 opsize = insn_opsize(insn);
1747 SRC_EA(env, tmp, opsize, 1, NULL);
1748 gen_logic_cc(s, tmp, opsize);
1751 DISAS_INSN(pulse)
1753 /* Implemented as a NOP. */
1756 DISAS_INSN(illegal)
1758 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1761 /* ??? This should be atomic. */
1762 DISAS_INSN(tas)
1764 TCGv dest;
1765 TCGv src1;
1766 TCGv addr;
1768 dest = tcg_temp_new();
1769 SRC_EA(env, src1, OS_BYTE, 1, &addr);
1770 gen_logic_cc(s, src1, OS_BYTE);
1771 tcg_gen_ori_i32(dest, src1, 0x80);
1772 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1775 DISAS_INSN(mull)
1777 uint16_t ext;
1778 TCGv reg;
1779 TCGv src1;
1780 TCGv dest;
1782 /* The upper 32 bits of the product are discarded, so
1783 muls.l and mulu.l are functionally equivalent. */
1784 ext = read_im16(env, s);
1785 if (ext & 0x87ff) {
1786 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1787 return;
1789 reg = DREG(ext, 12);
1790 SRC_EA(env, src1, OS_LONG, 0, NULL);
1791 dest = tcg_temp_new();
1792 tcg_gen_mul_i32(dest, src1, reg);
1793 tcg_gen_mov_i32(reg, dest);
1794 /* Unlike m68k, coldfire always clears the overflow bit. */
1795 gen_logic_cc(s, dest, OS_LONG);
1798 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
1800 TCGv reg;
1801 TCGv tmp;
1803 reg = AREG(insn, 0);
1804 tmp = tcg_temp_new();
1805 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1806 gen_store(s, OS_LONG, tmp, reg);
1807 if ((insn & 7) != 7) {
1808 tcg_gen_mov_i32(reg, tmp);
1810 tcg_gen_addi_i32(QREG_SP, tmp, offset);
1811 tcg_temp_free(tmp);
1814 DISAS_INSN(link)
1816 int16_t offset;
1818 offset = read_im16(env, s);
1819 gen_link(s, insn, offset);
1822 DISAS_INSN(linkl)
1824 int32_t offset;
1826 offset = read_im32(env, s);
1827 gen_link(s, insn, offset);
1830 DISAS_INSN(unlk)
1832 TCGv src;
1833 TCGv reg;
1834 TCGv tmp;
1836 src = tcg_temp_new();
1837 reg = AREG(insn, 0);
1838 tcg_gen_mov_i32(src, reg);
1839 tmp = gen_load(s, OS_LONG, src, 0);
1840 tcg_gen_mov_i32(reg, tmp);
1841 tcg_gen_addi_i32(QREG_SP, src, 4);
1844 DISAS_INSN(nop)
1848 DISAS_INSN(rts)
1850 TCGv tmp;
1852 tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1853 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1854 gen_jmp(s, tmp);
1857 DISAS_INSN(jump)
1859 TCGv tmp;
1861 /* Load the target address first to ensure correct exception
1862 behavior. */
1863 tmp = gen_lea(env, s, insn, OS_LONG);
1864 if (IS_NULL_QREG(tmp)) {
1865 gen_addr_fault(s);
1866 return;
1868 if ((insn & 0x40) == 0) {
1869 /* jsr */
1870 gen_push(s, tcg_const_i32(s->pc));
1872 gen_jmp(s, tmp);
1875 DISAS_INSN(addsubq)
1877 TCGv src1;
1878 TCGv src2;
1879 TCGv dest;
1880 int val;
1881 TCGv addr;
1883 SRC_EA(env, src1, OS_LONG, 0, &addr);
1884 val = (insn >> 9) & 7;
1885 if (val == 0)
1886 val = 8;
1887 dest = tcg_temp_new();
1888 tcg_gen_mov_i32(dest, src1);
1889 if ((insn & 0x38) == 0x08) {
1890 /* Don't update condition codes if the destination is an
1891 address register. */
1892 if (insn & 0x0100) {
1893 tcg_gen_subi_i32(dest, dest, val);
1894 } else {
1895 tcg_gen_addi_i32(dest, dest, val);
1897 } else {
1898 src2 = tcg_const_i32(val);
1899 if (insn & 0x0100) {
1900 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src2);
1901 tcg_gen_sub_i32(dest, dest, src2);
1902 set_cc_op(s, CC_OP_SUB);
1903 } else {
1904 tcg_gen_add_i32(dest, dest, src2);
1905 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src2);
1906 set_cc_op(s, CC_OP_ADD);
1908 gen_update_cc_add(dest, src2);
1910 DEST_EA(env, insn, OS_LONG, dest, &addr);
1913 DISAS_INSN(tpf)
1915 switch (insn & 7) {
1916 case 2: /* One extension word. */
1917 s->pc += 2;
1918 break;
1919 case 3: /* Two extension words. */
1920 s->pc += 4;
1921 break;
1922 case 4: /* No extension words. */
1923 break;
1924 default:
1925 disas_undef(env, s, insn);
1929 DISAS_INSN(branch)
1931 int32_t offset;
1932 uint32_t base;
1933 int op;
1934 TCGLabel *l1;
1936 base = s->pc;
1937 op = (insn >> 8) & 0xf;
1938 offset = (int8_t)insn;
1939 if (offset == 0) {
1940 offset = (int16_t)read_im16(env, s);
1941 } else if (offset == -1) {
1942 offset = read_im32(env, s);
1944 if (op == 1) {
1945 /* bsr */
1946 gen_push(s, tcg_const_i32(s->pc));
1948 if (op > 1) {
1949 /* Bcc */
1950 l1 = gen_new_label();
1951 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1952 gen_jmp_tb(s, 1, base + offset);
1953 gen_set_label(l1);
1954 gen_jmp_tb(s, 0, s->pc);
1955 } else {
1956 /* Unconditional branch. */
1957 gen_jmp_tb(s, 0, base + offset);
1961 DISAS_INSN(moveq)
1963 uint32_t val;
1965 val = (int8_t)insn;
1966 tcg_gen_movi_i32(DREG(insn, 9), val);
1967 gen_logic_cc(s, tcg_const_i32(val), OS_LONG);
1970 DISAS_INSN(mvzs)
1972 int opsize;
1973 TCGv src;
1974 TCGv reg;
1976 if (insn & 0x40)
1977 opsize = OS_WORD;
1978 else
1979 opsize = OS_BYTE;
1980 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
1981 reg = DREG(insn, 9);
1982 tcg_gen_mov_i32(reg, src);
1983 gen_logic_cc(s, src, opsize);
1986 DISAS_INSN(or)
1988 TCGv reg;
1989 TCGv dest;
1990 TCGv src;
1991 TCGv addr;
1992 int opsize;
1994 opsize = insn_opsize(insn);
1995 reg = gen_extend(DREG(insn, 9), opsize, 0);
1996 dest = tcg_temp_new();
1997 if (insn & 0x100) {
1998 SRC_EA(env, src, opsize, 0, &addr);
1999 tcg_gen_or_i32(dest, src, reg);
2000 DEST_EA(env, insn, opsize, dest, &addr);
2001 } else {
2002 SRC_EA(env, src, opsize, 0, NULL);
2003 tcg_gen_or_i32(dest, src, reg);
2004 gen_partset_reg(opsize, DREG(insn, 9), dest);
2006 gen_logic_cc(s, dest, opsize);
2009 DISAS_INSN(suba)
2011 TCGv src;
2012 TCGv reg;
2014 SRC_EA(env, src, OS_LONG, 0, NULL);
2015 reg = AREG(insn, 9);
2016 tcg_gen_sub_i32(reg, reg, src);
2019 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2021 TCGv tmp;
2023 gen_flush_flags(s); /* compute old Z */
2025 /* Perform substract with borrow.
2026 * (X, N) = dest - (src + X);
2029 tmp = tcg_const_i32(0);
2030 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
2031 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
2032 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2033 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2035 /* Compute signed-overflow for substract. */
2037 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
2038 tcg_gen_xor_i32(tmp, dest, src);
2039 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
2040 tcg_temp_free(tmp);
2042 /* Copy the rest of the results into place. */
2043 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2044 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2046 set_cc_op(s, CC_OP_FLAGS);
2048 /* result is in QREG_CC_N */
2051 DISAS_INSN(subx_reg)
2053 TCGv dest;
2054 TCGv src;
2055 int opsize;
2057 opsize = insn_opsize(insn);
2059 src = gen_extend(DREG(insn, 0), opsize, 1);
2060 dest = gen_extend(DREG(insn, 9), opsize, 1);
2062 gen_subx(s, src, dest, opsize);
2064 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
2067 DISAS_INSN(subx_mem)
2069 TCGv src;
2070 TCGv addr_src;
2071 TCGv dest;
2072 TCGv addr_dest;
2073 int opsize;
2075 opsize = insn_opsize(insn);
2077 addr_src = AREG(insn, 0);
2078 tcg_gen_subi_i32(addr_src, addr_src, opsize);
2079 src = gen_load(s, opsize, addr_src, 1);
2081 addr_dest = AREG(insn, 9);
2082 tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
2083 dest = gen_load(s, opsize, addr_dest, 1);
2085 gen_subx(s, src, dest, opsize);
2087 gen_store(s, opsize, addr_dest, QREG_CC_N);
2090 DISAS_INSN(mov3q)
2092 TCGv src;
2093 int val;
2095 val = (insn >> 9) & 7;
2096 if (val == 0)
2097 val = -1;
2098 src = tcg_const_i32(val);
2099 gen_logic_cc(s, src, OS_LONG);
2100 DEST_EA(env, insn, OS_LONG, src, NULL);
2103 DISAS_INSN(cmp)
2105 TCGv src;
2106 TCGv reg;
2107 int opsize;
2109 opsize = insn_opsize(insn);
2110 SRC_EA(env, src, opsize, -1, NULL);
2111 reg = DREG(insn, 9);
2112 gen_update_cc_add(reg, src);
2113 set_cc_op(s, CC_OP_CMP);
2116 DISAS_INSN(cmpa)
2118 int opsize;
2119 TCGv src;
2120 TCGv reg;
2122 if (insn & 0x100) {
2123 opsize = OS_LONG;
2124 } else {
2125 opsize = OS_WORD;
2127 SRC_EA(env, src, opsize, 1, NULL);
2128 reg = AREG(insn, 9);
2129 gen_update_cc_add(reg, src);
2130 set_cc_op(s, CC_OP_CMP);
2133 DISAS_INSN(eor)
2135 TCGv src;
2136 TCGv dest;
2137 TCGv addr;
2138 int opsize;
2140 opsize = insn_opsize(insn);
2142 SRC_EA(env, src, opsize, 0, &addr);
2143 dest = tcg_temp_new();
2144 tcg_gen_xor_i32(dest, src, DREG(insn, 9));
2145 gen_logic_cc(s, dest, opsize);
2146 DEST_EA(env, insn, opsize, dest, &addr);
2149 static void do_exg(TCGv reg1, TCGv reg2)
2151 TCGv temp = tcg_temp_new();
2152 tcg_gen_mov_i32(temp, reg1);
2153 tcg_gen_mov_i32(reg1, reg2);
2154 tcg_gen_mov_i32(reg2, temp);
2155 tcg_temp_free(temp);
2158 DISAS_INSN(exg_aa)
2160 /* exchange Dx and Dy */
2161 do_exg(DREG(insn, 9), DREG(insn, 0));
2164 DISAS_INSN(exg_dd)
2166 /* exchange Ax and Ay */
2167 do_exg(AREG(insn, 9), AREG(insn, 0));
2170 DISAS_INSN(exg_da)
2172 /* exchange Dx and Ay */
2173 do_exg(DREG(insn, 9), AREG(insn, 0));
2176 DISAS_INSN(and)
2178 TCGv src;
2179 TCGv reg;
2180 TCGv dest;
2181 TCGv addr;
2182 int opsize;
2184 dest = tcg_temp_new();
2186 opsize = insn_opsize(insn);
2187 reg = DREG(insn, 9);
2188 if (insn & 0x100) {
2189 SRC_EA(env, src, opsize, 0, &addr);
2190 tcg_gen_and_i32(dest, src, reg);
2191 DEST_EA(env, insn, opsize, dest, &addr);
2192 } else {
2193 SRC_EA(env, src, opsize, 0, NULL);
2194 tcg_gen_and_i32(dest, src, reg);
2195 gen_partset_reg(opsize, reg, dest);
2197 tcg_temp_free(dest);
2198 gen_logic_cc(s, dest, opsize);
2201 DISAS_INSN(adda)
2203 TCGv src;
2204 TCGv reg;
2206 SRC_EA(env, src, OS_LONG, 0, NULL);
2207 reg = AREG(insn, 9);
2208 tcg_gen_add_i32(reg, reg, src);
2211 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2213 TCGv tmp;
2215 gen_flush_flags(s); /* compute old Z */
2217 /* Perform addition with carry.
2218 * (X, N) = src + dest + X;
2221 tmp = tcg_const_i32(0);
2222 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
2223 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
2224 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2226 /* Compute signed-overflow for addition. */
2228 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
2229 tcg_gen_xor_i32(tmp, dest, src);
2230 tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
2231 tcg_temp_free(tmp);
2233 /* Copy the rest of the results into place. */
2234 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2235 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2237 set_cc_op(s, CC_OP_FLAGS);
2239 /* result is in QREG_CC_N */
2242 DISAS_INSN(addx_reg)
2244 TCGv dest;
2245 TCGv src;
2246 int opsize;
2248 opsize = insn_opsize(insn);
2250 dest = gen_extend(DREG(insn, 9), opsize, 1);
2251 src = gen_extend(DREG(insn, 0), opsize, 1);
2253 gen_addx(s, src, dest, opsize);
2255 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
2258 DISAS_INSN(addx_mem)
2260 TCGv src;
2261 TCGv addr_src;
2262 TCGv dest;
2263 TCGv addr_dest;
2264 int opsize;
2266 opsize = insn_opsize(insn);
2268 addr_src = AREG(insn, 0);
2269 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
2270 src = gen_load(s, opsize, addr_src, 1);
2272 addr_dest = AREG(insn, 9);
2273 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
2274 dest = gen_load(s, opsize, addr_dest, 1);
2276 gen_addx(s, src, dest, opsize);
2278 gen_store(s, opsize, addr_dest, QREG_CC_N);
2281 /* TODO: This could be implemented without helper functions. */
2282 DISAS_INSN(shift_im)
2284 TCGv reg;
2285 int tmp;
2286 TCGv shift;
2288 set_cc_op(s, CC_OP_FLAGS);
2290 reg = DREG(insn, 0);
2291 tmp = (insn >> 9) & 7;
2292 if (tmp == 0)
2293 tmp = 8;
2294 shift = tcg_const_i32(tmp);
2295 /* No need to flush flags becuse we know we will set C flag. */
2296 if (insn & 0x100) {
2297 gen_helper_shl_cc(reg, cpu_env, reg, shift);
2298 } else {
2299 if (insn & 8) {
2300 gen_helper_shr_cc(reg, cpu_env, reg, shift);
2301 } else {
2302 gen_helper_sar_cc(reg, cpu_env, reg, shift);
2307 DISAS_INSN(shift_reg)
2309 TCGv reg;
2310 TCGv shift;
2312 reg = DREG(insn, 0);
2313 shift = DREG(insn, 9);
2314 if (insn & 0x100) {
2315 gen_helper_shl_cc(reg, cpu_env, reg, shift);
2316 } else {
2317 if (insn & 8) {
2318 gen_helper_shr_cc(reg, cpu_env, reg, shift);
2319 } else {
2320 gen_helper_sar_cc(reg, cpu_env, reg, shift);
2323 set_cc_op(s, CC_OP_FLAGS);
2326 DISAS_INSN(ff1)
2328 TCGv reg;
2329 reg = DREG(insn, 0);
2330 gen_logic_cc(s, reg, OS_LONG);
2331 gen_helper_ff1(reg, reg);
2334 static TCGv gen_get_sr(DisasContext *s)
2336 TCGv ccr;
2337 TCGv sr;
2339 ccr = gen_get_ccr(s);
2340 sr = tcg_temp_new();
2341 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2342 tcg_gen_or_i32(sr, sr, ccr);
2343 return sr;
2346 DISAS_INSN(strldsr)
2348 uint16_t ext;
2349 uint32_t addr;
2351 addr = s->pc - 2;
2352 ext = read_im16(env, s);
2353 if (ext != 0x46FC) {
2354 gen_exception(s, addr, EXCP_UNSUPPORTED);
2355 return;
2357 ext = read_im16(env, s);
2358 if (IS_USER(s) || (ext & SR_S) == 0) {
2359 gen_exception(s, addr, EXCP_PRIVILEGE);
2360 return;
2362 gen_push(s, gen_get_sr(s));
2363 gen_set_sr_im(s, ext, 0);
2366 DISAS_INSN(move_from_sr)
2368 TCGv sr;
2370 if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
2371 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2372 return;
2374 sr = gen_get_sr(s);
2375 DEST_EA(env, insn, OS_WORD, sr, NULL);
2378 DISAS_INSN(move_to_sr)
2380 if (IS_USER(s)) {
2381 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2382 return;
2384 gen_set_sr(env, s, insn, 0);
2385 gen_lookup_tb(s);
2388 DISAS_INSN(move_from_usp)
2390 if (IS_USER(s)) {
2391 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2392 return;
2394 tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
2395 offsetof(CPUM68KState, sp[M68K_USP]));
2398 DISAS_INSN(move_to_usp)
2400 if (IS_USER(s)) {
2401 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2402 return;
2404 tcg_gen_st_i32(AREG(insn, 0), cpu_env,
2405 offsetof(CPUM68KState, sp[M68K_USP]));
2408 DISAS_INSN(halt)
2410 gen_exception(s, s->pc, EXCP_HALT_INSN);
2413 DISAS_INSN(stop)
2415 uint16_t ext;
2417 if (IS_USER(s)) {
2418 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2419 return;
2422 ext = read_im16(env, s);
2424 gen_set_sr_im(s, ext, 0);
2425 tcg_gen_movi_i32(cpu_halted, 1);
2426 gen_exception(s, s->pc, EXCP_HLT);
2429 DISAS_INSN(rte)
2431 if (IS_USER(s)) {
2432 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2433 return;
2435 gen_exception(s, s->pc - 2, EXCP_RTE);
2438 DISAS_INSN(movec)
2440 uint16_t ext;
2441 TCGv reg;
2443 if (IS_USER(s)) {
2444 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2445 return;
2448 ext = read_im16(env, s);
2450 if (ext & 0x8000) {
2451 reg = AREG(ext, 12);
2452 } else {
2453 reg = DREG(ext, 12);
2455 gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2456 gen_lookup_tb(s);
2459 DISAS_INSN(intouch)
2461 if (IS_USER(s)) {
2462 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2463 return;
2465 /* ICache fetch. Implement as no-op. */
2468 DISAS_INSN(cpushl)
2470 if (IS_USER(s)) {
2471 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2472 return;
2474 /* Cache push/invalidate. Implement as no-op. */
2477 DISAS_INSN(wddata)
2479 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2482 DISAS_INSN(wdebug)
2484 M68kCPU *cpu = m68k_env_get_cpu(env);
2486 if (IS_USER(s)) {
2487 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2488 return;
2490 /* TODO: Implement wdebug. */
2491 cpu_abort(CPU(cpu), "WDEBUG not implemented");
2494 DISAS_INSN(trap)
2496 gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2499 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2500 immediately before the next FP instruction is executed. */
2501 DISAS_INSN(fpu)
2503 uint16_t ext;
2504 int32_t offset;
2505 int opmode;
2506 TCGv_i64 src;
2507 TCGv_i64 dest;
2508 TCGv_i64 res;
2509 TCGv tmp32;
2510 int round;
2511 int set_dest;
2512 int opsize;
2514 ext = read_im16(env, s);
2515 opmode = ext & 0x7f;
2516 switch ((ext >> 13) & 7) {
2517 case 0: case 2:
2518 break;
2519 case 1:
2520 goto undef;
2521 case 3: /* fmove out */
2522 src = FREG(ext, 7);
2523 tmp32 = tcg_temp_new_i32();
2524 /* fmove */
2525 /* ??? TODO: Proper behavior on overflow. */
2526 switch ((ext >> 10) & 7) {
2527 case 0:
2528 opsize = OS_LONG;
2529 gen_helper_f64_to_i32(tmp32, cpu_env, src);
2530 break;
2531 case 1:
2532 opsize = OS_SINGLE;
2533 gen_helper_f64_to_f32(tmp32, cpu_env, src);
2534 break;
2535 case 4:
2536 opsize = OS_WORD;
2537 gen_helper_f64_to_i32(tmp32, cpu_env, src);
2538 break;
2539 case 5: /* OS_DOUBLE */
2540 tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2541 switch ((insn >> 3) & 7) {
2542 case 2:
2543 case 3:
2544 break;
2545 case 4:
2546 tcg_gen_addi_i32(tmp32, tmp32, -8);
2547 break;
2548 case 5:
2549 offset = cpu_ldsw_code(env, s->pc);
2550 s->pc += 2;
2551 tcg_gen_addi_i32(tmp32, tmp32, offset);
2552 break;
2553 default:
2554 goto undef;
2556 gen_store64(s, tmp32, src);
2557 switch ((insn >> 3) & 7) {
2558 case 3:
2559 tcg_gen_addi_i32(tmp32, tmp32, 8);
2560 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2561 break;
2562 case 4:
2563 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2564 break;
2566 tcg_temp_free_i32(tmp32);
2567 return;
2568 case 6:
2569 opsize = OS_BYTE;
2570 gen_helper_f64_to_i32(tmp32, cpu_env, src);
2571 break;
2572 default:
2573 goto undef;
2575 DEST_EA(env, insn, opsize, tmp32, NULL);
2576 tcg_temp_free_i32(tmp32);
2577 return;
2578 case 4: /* fmove to control register. */
2579 switch ((ext >> 10) & 7) {
2580 case 4: /* FPCR */
2581 /* Not implemented. Ignore writes. */
2582 break;
2583 case 1: /* FPIAR */
2584 case 2: /* FPSR */
2585 default:
2586 cpu_abort(NULL, "Unimplemented: fmove to control %d",
2587 (ext >> 10) & 7);
2589 break;
2590 case 5: /* fmove from control register. */
2591 switch ((ext >> 10) & 7) {
2592 case 4: /* FPCR */
2593 /* Not implemented. Always return zero. */
2594 tmp32 = tcg_const_i32(0);
2595 break;
2596 case 1: /* FPIAR */
2597 case 2: /* FPSR */
2598 default:
2599 cpu_abort(NULL, "Unimplemented: fmove from control %d",
2600 (ext >> 10) & 7);
2601 goto undef;
2603 DEST_EA(env, insn, OS_LONG, tmp32, NULL);
2604 break;
2605 case 6: /* fmovem */
2606 case 7:
2608 TCGv addr;
2609 uint16_t mask;
2610 int i;
2611 if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2612 goto undef;
2613 tmp32 = gen_lea(env, s, insn, OS_LONG);
2614 if (IS_NULL_QREG(tmp32)) {
2615 gen_addr_fault(s);
2616 return;
2618 addr = tcg_temp_new_i32();
2619 tcg_gen_mov_i32(addr, tmp32);
2620 mask = 0x80;
2621 for (i = 0; i < 8; i++) {
2622 if (ext & mask) {
2623 dest = FREG(i, 0);
2624 if (ext & (1 << 13)) {
2625 /* store */
2626 tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2627 } else {
2628 /* load */
2629 tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2631 if (ext & (mask - 1))
2632 tcg_gen_addi_i32(addr, addr, 8);
2634 mask >>= 1;
2636 tcg_temp_free_i32(addr);
2638 return;
2640 if (ext & (1 << 14)) {
2641 /* Source effective address. */
2642 switch ((ext >> 10) & 7) {
2643 case 0: opsize = OS_LONG; break;
2644 case 1: opsize = OS_SINGLE; break;
2645 case 4: opsize = OS_WORD; break;
2646 case 5: opsize = OS_DOUBLE; break;
2647 case 6: opsize = OS_BYTE; break;
2648 default:
2649 goto undef;
2651 if (opsize == OS_DOUBLE) {
2652 tmp32 = tcg_temp_new_i32();
2653 tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2654 switch ((insn >> 3) & 7) {
2655 case 2:
2656 case 3:
2657 break;
2658 case 4:
2659 tcg_gen_addi_i32(tmp32, tmp32, -8);
2660 break;
2661 case 5:
2662 offset = cpu_ldsw_code(env, s->pc);
2663 s->pc += 2;
2664 tcg_gen_addi_i32(tmp32, tmp32, offset);
2665 break;
2666 case 7:
2667 offset = cpu_ldsw_code(env, s->pc);
2668 offset += s->pc - 2;
2669 s->pc += 2;
2670 tcg_gen_addi_i32(tmp32, tmp32, offset);
2671 break;
2672 default:
2673 goto undef;
2675 src = gen_load64(s, tmp32);
2676 switch ((insn >> 3) & 7) {
2677 case 3:
2678 tcg_gen_addi_i32(tmp32, tmp32, 8);
2679 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2680 break;
2681 case 4:
2682 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2683 break;
2685 tcg_temp_free_i32(tmp32);
2686 } else {
2687 SRC_EA(env, tmp32, opsize, 1, NULL);
2688 src = tcg_temp_new_i64();
2689 switch (opsize) {
2690 case OS_LONG:
2691 case OS_WORD:
2692 case OS_BYTE:
2693 gen_helper_i32_to_f64(src, cpu_env, tmp32);
2694 break;
2695 case OS_SINGLE:
2696 gen_helper_f32_to_f64(src, cpu_env, tmp32);
2697 break;
2700 } else {
2701 /* Source register. */
2702 src = FREG(ext, 10);
2704 dest = FREG(ext, 7);
2705 res = tcg_temp_new_i64();
2706 if (opmode != 0x3a)
2707 tcg_gen_mov_f64(res, dest);
2708 round = 1;
2709 set_dest = 1;
2710 switch (opmode) {
2711 case 0: case 0x40: case 0x44: /* fmove */
2712 tcg_gen_mov_f64(res, src);
2713 break;
2714 case 1: /* fint */
2715 gen_helper_iround_f64(res, cpu_env, src);
2716 round = 0;
2717 break;
2718 case 3: /* fintrz */
2719 gen_helper_itrunc_f64(res, cpu_env, src);
2720 round = 0;
2721 break;
2722 case 4: case 0x41: case 0x45: /* fsqrt */
2723 gen_helper_sqrt_f64(res, cpu_env, src);
2724 break;
2725 case 0x18: case 0x58: case 0x5c: /* fabs */
2726 gen_helper_abs_f64(res, src);
2727 break;
2728 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2729 gen_helper_chs_f64(res, src);
2730 break;
2731 case 0x20: case 0x60: case 0x64: /* fdiv */
2732 gen_helper_div_f64(res, cpu_env, res, src);
2733 break;
2734 case 0x22: case 0x62: case 0x66: /* fadd */
2735 gen_helper_add_f64(res, cpu_env, res, src);
2736 break;
2737 case 0x23: case 0x63: case 0x67: /* fmul */
2738 gen_helper_mul_f64(res, cpu_env, res, src);
2739 break;
2740 case 0x28: case 0x68: case 0x6c: /* fsub */
2741 gen_helper_sub_f64(res, cpu_env, res, src);
2742 break;
2743 case 0x38: /* fcmp */
2744 gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2745 set_dest = 0;
2746 round = 0;
2747 break;
2748 case 0x3a: /* ftst */
2749 tcg_gen_mov_f64(res, src);
2750 set_dest = 0;
2751 round = 0;
2752 break;
2753 default:
2754 goto undef;
2756 if (ext & (1 << 14)) {
2757 tcg_temp_free_i64(src);
2759 if (round) {
2760 if (opmode & 0x40) {
2761 if ((opmode & 0x4) != 0)
2762 round = 0;
2763 } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2764 round = 0;
2767 if (round) {
2768 TCGv tmp = tcg_temp_new_i32();
2769 gen_helper_f64_to_f32(tmp, cpu_env, res);
2770 gen_helper_f32_to_f64(res, cpu_env, tmp);
2771 tcg_temp_free_i32(tmp);
2773 tcg_gen_mov_f64(QREG_FP_RESULT, res);
2774 if (set_dest) {
2775 tcg_gen_mov_f64(dest, res);
2777 tcg_temp_free_i64(res);
2778 return;
2779 undef:
2780 /* FIXME: Is this right for offset addressing modes? */
2781 s->pc -= 2;
2782 disas_undef_fpu(env, s, insn);
2785 DISAS_INSN(fbcc)
2787 uint32_t offset;
2788 uint32_t addr;
2789 TCGv flag;
2790 TCGLabel *l1;
2792 addr = s->pc;
2793 offset = cpu_ldsw_code(env, s->pc);
2794 s->pc += 2;
2795 if (insn & (1 << 6)) {
2796 offset = (offset << 16) | read_im16(env, s);
2799 l1 = gen_new_label();
2800 /* TODO: Raise BSUN exception. */
2801 flag = tcg_temp_new();
2802 gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2803 /* Jump to l1 if condition is true. */
2804 switch (insn & 0xf) {
2805 case 0: /* f */
2806 break;
2807 case 1: /* eq (=0) */
2808 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2809 break;
2810 case 2: /* ogt (=1) */
2811 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2812 break;
2813 case 3: /* oge (=0 or =1) */
2814 tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2815 break;
2816 case 4: /* olt (=-1) */
2817 tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2818 break;
2819 case 5: /* ole (=-1 or =0) */
2820 tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2821 break;
2822 case 6: /* ogl (=-1 or =1) */
2823 tcg_gen_andi_i32(flag, flag, 1);
2824 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2825 break;
2826 case 7: /* or (=2) */
2827 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2828 break;
2829 case 8: /* un (<2) */
2830 tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2831 break;
2832 case 9: /* ueq (=0 or =2) */
2833 tcg_gen_andi_i32(flag, flag, 1);
2834 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2835 break;
2836 case 10: /* ugt (>0) */
2837 tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2838 break;
2839 case 11: /* uge (>=0) */
2840 tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2841 break;
2842 case 12: /* ult (=-1 or =2) */
2843 tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2844 break;
2845 case 13: /* ule (!=1) */
2846 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2847 break;
2848 case 14: /* ne (!=0) */
2849 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2850 break;
2851 case 15: /* t */
2852 tcg_gen_br(l1);
2853 break;
2855 gen_jmp_tb(s, 0, s->pc);
2856 gen_set_label(l1);
2857 gen_jmp_tb(s, 1, addr + offset);
2860 DISAS_INSN(frestore)
2862 M68kCPU *cpu = m68k_env_get_cpu(env);
2864 /* TODO: Implement frestore. */
2865 cpu_abort(CPU(cpu), "FRESTORE not implemented");
2868 DISAS_INSN(fsave)
2870 M68kCPU *cpu = m68k_env_get_cpu(env);
2872 /* TODO: Implement fsave. */
2873 cpu_abort(CPU(cpu), "FSAVE not implemented");
2876 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2878 TCGv tmp = tcg_temp_new();
2879 if (s->env->macsr & MACSR_FI) {
2880 if (upper)
2881 tcg_gen_andi_i32(tmp, val, 0xffff0000);
2882 else
2883 tcg_gen_shli_i32(tmp, val, 16);
2884 } else if (s->env->macsr & MACSR_SU) {
2885 if (upper)
2886 tcg_gen_sari_i32(tmp, val, 16);
2887 else
2888 tcg_gen_ext16s_i32(tmp, val);
2889 } else {
2890 if (upper)
2891 tcg_gen_shri_i32(tmp, val, 16);
2892 else
2893 tcg_gen_ext16u_i32(tmp, val);
2895 return tmp;
2898 static void gen_mac_clear_flags(void)
2900 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2901 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2904 DISAS_INSN(mac)
2906 TCGv rx;
2907 TCGv ry;
2908 uint16_t ext;
2909 int acc;
2910 TCGv tmp;
2911 TCGv addr;
2912 TCGv loadval;
2913 int dual;
2914 TCGv saved_flags;
2916 if (!s->done_mac) {
2917 s->mactmp = tcg_temp_new_i64();
2918 s->done_mac = 1;
2921 ext = read_im16(env, s);
2923 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2924 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2925 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2926 disas_undef(env, s, insn);
2927 return;
2929 if (insn & 0x30) {
2930 /* MAC with load. */
2931 tmp = gen_lea(env, s, insn, OS_LONG);
2932 addr = tcg_temp_new();
2933 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2934 /* Load the value now to ensure correct exception behavior.
2935 Perform writeback after reading the MAC inputs. */
2936 loadval = gen_load(s, OS_LONG, addr, 0);
2938 acc ^= 1;
2939 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2940 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2941 } else {
2942 loadval = addr = NULL_QREG;
2943 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2944 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2947 gen_mac_clear_flags();
2948 #if 0
2949 l1 = -1;
2950 /* Disabled because conditional branches clobber temporary vars. */
2951 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2952 /* Skip the multiply if we know we will ignore it. */
2953 l1 = gen_new_label();
2954 tmp = tcg_temp_new();
2955 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2956 gen_op_jmp_nz32(tmp, l1);
2958 #endif
2960 if ((ext & 0x0800) == 0) {
2961 /* Word. */
2962 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
2963 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
2965 if (s->env->macsr & MACSR_FI) {
2966 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
2967 } else {
2968 if (s->env->macsr & MACSR_SU)
2969 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
2970 else
2971 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
2972 switch ((ext >> 9) & 3) {
2973 case 1:
2974 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
2975 break;
2976 case 3:
2977 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
2978 break;
2982 if (dual) {
2983 /* Save the overflow flag from the multiply. */
2984 saved_flags = tcg_temp_new();
2985 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
2986 } else {
2987 saved_flags = NULL_QREG;
2990 #if 0
2991 /* Disabled because conditional branches clobber temporary vars. */
2992 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
2993 /* Skip the accumulate if the value is already saturated. */
2994 l1 = gen_new_label();
2995 tmp = tcg_temp_new();
2996 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2997 gen_op_jmp_nz32(tmp, l1);
2999 #endif
3001 if (insn & 0x100)
3002 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
3003 else
3004 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
3006 if (s->env->macsr & MACSR_FI)
3007 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
3008 else if (s->env->macsr & MACSR_SU)
3009 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
3010 else
3011 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
3013 #if 0
3014 /* Disabled because conditional branches clobber temporary vars. */
3015 if (l1 != -1)
3016 gen_set_label(l1);
3017 #endif
3019 if (dual) {
3020 /* Dual accumulate variant. */
3021 acc = (ext >> 2) & 3;
3022 /* Restore the overflow flag from the multiplier. */
3023 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
3024 #if 0
3025 /* Disabled because conditional branches clobber temporary vars. */
3026 if ((s->env->macsr & MACSR_OMC) != 0) {
3027 /* Skip the accumulate if the value is already saturated. */
3028 l1 = gen_new_label();
3029 tmp = tcg_temp_new();
3030 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
3031 gen_op_jmp_nz32(tmp, l1);
3033 #endif
3034 if (ext & 2)
3035 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
3036 else
3037 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
3038 if (s->env->macsr & MACSR_FI)
3039 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
3040 else if (s->env->macsr & MACSR_SU)
3041 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
3042 else
3043 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
3044 #if 0
3045 /* Disabled because conditional branches clobber temporary vars. */
3046 if (l1 != -1)
3047 gen_set_label(l1);
3048 #endif
3050 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
3052 if (insn & 0x30) {
3053 TCGv rw;
3054 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
3055 tcg_gen_mov_i32(rw, loadval);
3056 /* FIXME: Should address writeback happen with the masked or
3057 unmasked value? */
3058 switch ((insn >> 3) & 7) {
3059 case 3: /* Post-increment. */
3060 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
3061 break;
3062 case 4: /* Pre-decrement. */
3063 tcg_gen_mov_i32(AREG(insn, 0), addr);
3068 DISAS_INSN(from_mac)
3070 TCGv rx;
3071 TCGv_i64 acc;
3072 int accnum;
3074 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
3075 accnum = (insn >> 9) & 3;
3076 acc = MACREG(accnum);
3077 if (s->env->macsr & MACSR_FI) {
3078 gen_helper_get_macf(rx, cpu_env, acc);
3079 } else if ((s->env->macsr & MACSR_OMC) == 0) {
3080 tcg_gen_extrl_i64_i32(rx, acc);
3081 } else if (s->env->macsr & MACSR_SU) {
3082 gen_helper_get_macs(rx, acc);
3083 } else {
3084 gen_helper_get_macu(rx, acc);
3086 if (insn & 0x40) {
3087 tcg_gen_movi_i64(acc, 0);
3088 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
3092 DISAS_INSN(move_mac)
3094 /* FIXME: This can be done without a helper. */
3095 int src;
3096 TCGv dest;
3097 src = insn & 3;
3098 dest = tcg_const_i32((insn >> 9) & 3);
3099 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
3100 gen_mac_clear_flags();
3101 gen_helper_mac_set_flags(cpu_env, dest);
3104 DISAS_INSN(from_macsr)
3106 TCGv reg;
3108 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
3109 tcg_gen_mov_i32(reg, QREG_MACSR);
3112 DISAS_INSN(from_mask)
3114 TCGv reg;
3115 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
3116 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
3119 DISAS_INSN(from_mext)
3121 TCGv reg;
3122 TCGv acc;
3123 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
3124 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
3125 if (s->env->macsr & MACSR_FI)
3126 gen_helper_get_mac_extf(reg, cpu_env, acc);
3127 else
3128 gen_helper_get_mac_exti(reg, cpu_env, acc);
3131 DISAS_INSN(macsr_to_ccr)
3133 TCGv tmp = tcg_temp_new();
3134 tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
3135 gen_helper_set_sr(cpu_env, tmp);
3136 tcg_temp_free(tmp);
3137 set_cc_op(s, CC_OP_FLAGS);
3140 DISAS_INSN(to_mac)
3142 TCGv_i64 acc;
3143 TCGv val;
3144 int accnum;
3145 accnum = (insn >> 9) & 3;
3146 acc = MACREG(accnum);
3147 SRC_EA(env, val, OS_LONG, 0, NULL);
3148 if (s->env->macsr & MACSR_FI) {
3149 tcg_gen_ext_i32_i64(acc, val);
3150 tcg_gen_shli_i64(acc, acc, 8);
3151 } else if (s->env->macsr & MACSR_SU) {
3152 tcg_gen_ext_i32_i64(acc, val);
3153 } else {
3154 tcg_gen_extu_i32_i64(acc, val);
3156 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
3157 gen_mac_clear_flags();
3158 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
3161 DISAS_INSN(to_macsr)
3163 TCGv val;
3164 SRC_EA(env, val, OS_LONG, 0, NULL);
3165 gen_helper_set_macsr(cpu_env, val);
3166 gen_lookup_tb(s);
3169 DISAS_INSN(to_mask)
3171 TCGv val;
3172 SRC_EA(env, val, OS_LONG, 0, NULL);
3173 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
3176 DISAS_INSN(to_mext)
3178 TCGv val;
3179 TCGv acc;
3180 SRC_EA(env, val, OS_LONG, 0, NULL);
3181 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
3182 if (s->env->macsr & MACSR_FI)
3183 gen_helper_set_mac_extf(cpu_env, val, acc);
3184 else if (s->env->macsr & MACSR_SU)
3185 gen_helper_set_mac_exts(cpu_env, val, acc);
3186 else
3187 gen_helper_set_mac_extu(cpu_env, val, acc);
3190 static disas_proc opcode_table[65536];
3192 static void
3193 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
3195 int i;
3196 int from;
3197 int to;
3199 /* Sanity check. All set bits must be included in the mask. */
3200 if (opcode & ~mask) {
3201 fprintf(stderr,
3202 "qemu internal error: bogus opcode definition %04x/%04x\n",
3203 opcode, mask);
3204 abort();
3206 /* This could probably be cleverer. For now just optimize the case where
3207 the top bits are known. */
3208 /* Find the first zero bit in the mask. */
3209 i = 0x8000;
3210 while ((i & mask) != 0)
3211 i >>= 1;
3212 /* Iterate over all combinations of this and lower bits. */
3213 if (i == 0)
3214 i = 1;
3215 else
3216 i <<= 1;
3217 from = opcode & ~(i - 1);
3218 to = from + i;
3219 for (i = from; i < to; i++) {
3220 if ((i & mask) == opcode)
3221 opcode_table[i] = proc;
3225 /* Register m68k opcode handlers. Order is important.
3226 Later insn override earlier ones. */
3227 void register_m68k_insns (CPUM68KState *env)
3229 /* Build the opcode table only once to avoid
3230 multithreading issues. */
3231 if (opcode_table[0] != NULL) {
3232 return;
3235 /* use BASE() for instruction available
3236 * for CF_ISA_A and M68000.
3238 #define BASE(name, opcode, mask) \
3239 register_opcode(disas_##name, 0x##opcode, 0x##mask)
3240 #define INSN(name, opcode, mask, feature) do { \
3241 if (m68k_feature(env, M68K_FEATURE_##feature)) \
3242 BASE(name, opcode, mask); \
3243 } while(0)
3244 BASE(undef, 0000, 0000);
3245 INSN(arith_im, 0080, fff8, CF_ISA_A);
3246 INSN(arith_im, 0000, ff00, M68000);
3247 INSN(undef, 00c0, ffc0, M68000);
3248 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
3249 BASE(bitop_reg, 0100, f1c0);
3250 BASE(bitop_reg, 0140, f1c0);
3251 BASE(bitop_reg, 0180, f1c0);
3252 BASE(bitop_reg, 01c0, f1c0);
3253 INSN(arith_im, 0280, fff8, CF_ISA_A);
3254 INSN(arith_im, 0200, ff00, M68000);
3255 INSN(undef, 02c0, ffc0, M68000);
3256 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
3257 INSN(arith_im, 0480, fff8, CF_ISA_A);
3258 INSN(arith_im, 0400, ff00, M68000);
3259 INSN(undef, 04c0, ffc0, M68000);
3260 INSN(arith_im, 0600, ff00, M68000);
3261 INSN(undef, 06c0, ffc0, M68000);
3262 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
3263 INSN(arith_im, 0680, fff8, CF_ISA_A);
3264 INSN(arith_im, 0c00, ff38, CF_ISA_A);
3265 INSN(arith_im, 0c00, ff00, M68000);
3266 BASE(bitop_im, 0800, ffc0);
3267 BASE(bitop_im, 0840, ffc0);
3268 BASE(bitop_im, 0880, ffc0);
3269 BASE(bitop_im, 08c0, ffc0);
3270 INSN(arith_im, 0a80, fff8, CF_ISA_A);
3271 INSN(arith_im, 0a00, ff00, M68000);
3272 BASE(move, 1000, f000);
3273 BASE(move, 2000, f000);
3274 BASE(move, 3000, f000);
3275 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
3276 INSN(negx, 4080, fff8, CF_ISA_A);
3277 INSN(negx, 4000, ff00, M68000);
3278 INSN(undef, 40c0, ffc0, M68000);
3279 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
3280 INSN(move_from_sr, 40c0, ffc0, M68000);
3281 BASE(lea, 41c0, f1c0);
3282 BASE(clr, 4200, ff00);
3283 BASE(undef, 42c0, ffc0);
3284 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
3285 INSN(move_from_ccr, 42c0, ffc0, M68000);
3286 INSN(neg, 4480, fff8, CF_ISA_A);
3287 INSN(neg, 4400, ff00, M68000);
3288 INSN(undef, 44c0, ffc0, M68000);
3289 BASE(move_to_ccr, 44c0, ffc0);
3290 INSN(not, 4680, fff8, CF_ISA_A);
3291 INSN(not, 4600, ff00, M68000);
3292 INSN(undef, 46c0, ffc0, M68000);
3293 INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
3294 INSN(linkl, 4808, fff8, M68000);
3295 BASE(pea, 4840, ffc0);
3296 BASE(swap, 4840, fff8);
3297 INSN(bkpt, 4848, fff8, BKPT);
3298 BASE(movem, 48c0, fbc0);
3299 BASE(ext, 4880, fff8);
3300 BASE(ext, 48c0, fff8);
3301 BASE(ext, 49c0, fff8);
3302 BASE(tst, 4a00, ff00);
3303 INSN(tas, 4ac0, ffc0, CF_ISA_B);
3304 INSN(tas, 4ac0, ffc0, M68000);
3305 INSN(halt, 4ac8, ffff, CF_ISA_A);
3306 INSN(pulse, 4acc, ffff, CF_ISA_A);
3307 BASE(illegal, 4afc, ffff);
3308 INSN(mull, 4c00, ffc0, CF_ISA_A);
3309 INSN(mull, 4c00, ffc0, LONG_MULDIV);
3310 INSN(divl, 4c40, ffc0, CF_ISA_A);
3311 INSN(divl, 4c40, ffc0, LONG_MULDIV);
3312 INSN(sats, 4c80, fff8, CF_ISA_B);
3313 BASE(trap, 4e40, fff0);
3314 BASE(link, 4e50, fff8);
3315 BASE(unlk, 4e58, fff8);
3316 INSN(move_to_usp, 4e60, fff8, USP);
3317 INSN(move_from_usp, 4e68, fff8, USP);
3318 BASE(nop, 4e71, ffff);
3319 BASE(stop, 4e72, ffff);
3320 BASE(rte, 4e73, ffff);
3321 BASE(rts, 4e75, ffff);
3322 INSN(movec, 4e7b, ffff, CF_ISA_A);
3323 BASE(jump, 4e80, ffc0);
3324 INSN(jump, 4ec0, ffc0, CF_ISA_A);
3325 INSN(addsubq, 5180, f1c0, CF_ISA_A);
3326 INSN(jump, 4ec0, ffc0, M68000);
3327 INSN(addsubq, 5000, f080, M68000);
3328 INSN(addsubq, 5080, f0c0, M68000);
3329 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
3330 INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */
3331 INSN(dbcc, 50c8, f0f8, M68000);
3332 INSN(addsubq, 5080, f1c0, CF_ISA_A);
3333 INSN(tpf, 51f8, fff8, CF_ISA_A);
3335 /* Branch instructions. */
3336 BASE(branch, 6000, f000);
3337 /* Disable long branch instructions, then add back the ones we want. */
3338 BASE(undef, 60ff, f0ff); /* All long branches. */
3339 INSN(branch, 60ff, f0ff, CF_ISA_B);
3340 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
3341 INSN(branch, 60ff, ffff, BRAL);
3342 INSN(branch, 60ff, f0ff, BCCL);
3344 BASE(moveq, 7000, f100);
3345 INSN(mvzs, 7100, f100, CF_ISA_B);
3346 BASE(or, 8000, f000);
3347 BASE(divw, 80c0, f0c0);
3348 BASE(addsub, 9000, f000);
3349 INSN(undef, 90c0, f0c0, CF_ISA_A);
3350 INSN(subx_reg, 9180, f1f8, CF_ISA_A);
3351 INSN(subx_reg, 9100, f138, M68000);
3352 INSN(subx_mem, 9108, f138, M68000);
3353 INSN(suba, 91c0, f1c0, CF_ISA_A);
3355 BASE(undef_mac, a000, f000);
3356 INSN(mac, a000, f100, CF_EMAC);
3357 INSN(from_mac, a180, f9b0, CF_EMAC);
3358 INSN(move_mac, a110, f9fc, CF_EMAC);
3359 INSN(from_macsr,a980, f9f0, CF_EMAC);
3360 INSN(from_mask, ad80, fff0, CF_EMAC);
3361 INSN(from_mext, ab80, fbf0, CF_EMAC);
3362 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
3363 INSN(to_mac, a100, f9c0, CF_EMAC);
3364 INSN(to_macsr, a900, ffc0, CF_EMAC);
3365 INSN(to_mext, ab00, fbc0, CF_EMAC);
3366 INSN(to_mask, ad00, ffc0, CF_EMAC);
3368 INSN(mov3q, a140, f1c0, CF_ISA_B);
3369 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
3370 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
3371 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
3372 INSN(cmp, b080, f1c0, CF_ISA_A);
3373 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
3374 INSN(cmp, b000, f100, M68000);
3375 INSN(eor, b100, f100, M68000);
3376 INSN(cmpa, b0c0, f0c0, M68000);
3377 INSN(eor, b180, f1c0, CF_ISA_A);
3378 BASE(and, c000, f000);
3379 INSN(exg_dd, c140, f1f8, M68000);
3380 INSN(exg_aa, c148, f1f8, M68000);
3381 INSN(exg_da, c188, f1f8, M68000);
3382 BASE(mulw, c0c0, f0c0);
3383 BASE(addsub, d000, f000);
3384 INSN(undef, d0c0, f0c0, CF_ISA_A);
3385 INSN(addx_reg, d180, f1f8, CF_ISA_A);
3386 INSN(addx_reg, d100, f138, M68000);
3387 INSN(addx_mem, d108, f138, M68000);
3388 INSN(adda, d1c0, f1c0, CF_ISA_A);
3389 INSN(adda, d0c0, f0c0, M68000);
3390 INSN(shift_im, e080, f0f0, CF_ISA_A);
3391 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
3392 INSN(undef_fpu, f000, f000, CF_ISA_A);
3393 INSN(fpu, f200, ffc0, CF_FPU);
3394 INSN(fbcc, f280, ffc0, CF_FPU);
3395 INSN(frestore, f340, ffc0, CF_FPU);
3396 INSN(fsave, f340, ffc0, CF_FPU);
3397 INSN(intouch, f340, ffc0, CF_ISA_A);
3398 INSN(cpushl, f428, ff38, CF_ISA_A);
3399 INSN(wddata, fb00, ff00, CF_ISA_A);
3400 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
3401 #undef INSN
3404 /* ??? Some of this implementation is not exception safe. We should always
3405 write back the result to memory before setting the condition codes. */
3406 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
3408 uint16_t insn;
3410 insn = read_im16(env, s);
3412 opcode_table[insn](env, s, insn);
3415 /* generate intermediate code for basic block 'tb'. */
3416 void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
3418 M68kCPU *cpu = m68k_env_get_cpu(env);
3419 CPUState *cs = CPU(cpu);
3420 DisasContext dc1, *dc = &dc1;
3421 target_ulong pc_start;
3422 int pc_offset;
3423 int num_insns;
3424 int max_insns;
3426 /* generate intermediate code */
3427 pc_start = tb->pc;
3429 dc->tb = tb;
3431 dc->env = env;
3432 dc->is_jmp = DISAS_NEXT;
3433 dc->pc = pc_start;
3434 dc->cc_op = CC_OP_DYNAMIC;
3435 dc->cc_op_synced = 1;
3436 dc->singlestep_enabled = cs->singlestep_enabled;
3437 dc->fpcr = env->fpcr;
3438 dc->user = (env->sr & SR_S) == 0;
3439 dc->done_mac = 0;
3440 num_insns = 0;
3441 max_insns = tb->cflags & CF_COUNT_MASK;
3442 if (max_insns == 0) {
3443 max_insns = CF_COUNT_MASK;
3445 if (max_insns > TCG_MAX_INSNS) {
3446 max_insns = TCG_MAX_INSNS;
3449 gen_tb_start(tb);
3450 do {
3451 pc_offset = dc->pc - pc_start;
3452 gen_throws_exception = NULL;
3453 tcg_gen_insn_start(dc->pc, dc->cc_op);
3454 num_insns++;
3456 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
3457 gen_exception(dc, dc->pc, EXCP_DEBUG);
3458 dc->is_jmp = DISAS_JUMP;
3459 /* The address covered by the breakpoint must be included in
3460 [tb->pc, tb->pc + tb->size) in order to for it to be
3461 properly cleared -- thus we increment the PC here so that
3462 the logic setting tb->size below does the right thing. */
3463 dc->pc += 2;
3464 break;
3467 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
3468 gen_io_start();
3471 dc->insn_pc = dc->pc;
3472 disas_m68k_insn(env, dc);
3473 } while (!dc->is_jmp && !tcg_op_buf_full() &&
3474 !cs->singlestep_enabled &&
3475 !singlestep &&
3476 (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
3477 num_insns < max_insns);
3479 if (tb->cflags & CF_LAST_IO)
3480 gen_io_end();
3481 if (unlikely(cs->singlestep_enabled)) {
3482 /* Make sure the pc is updated, and raise a debug exception. */
3483 if (!dc->is_jmp) {
3484 update_cc_op(dc);
3485 tcg_gen_movi_i32(QREG_PC, dc->pc);
3487 gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
3488 } else {
3489 switch(dc->is_jmp) {
3490 case DISAS_NEXT:
3491 update_cc_op(dc);
3492 gen_jmp_tb(dc, 0, dc->pc);
3493 break;
3494 default:
3495 case DISAS_JUMP:
3496 case DISAS_UPDATE:
3497 update_cc_op(dc);
3498 /* indicate that the hash table must be used to find the next TB */
3499 tcg_gen_exit_tb(0);
3500 break;
3501 case DISAS_TB_JUMP:
3502 /* nothing more to generate */
3503 break;
3506 gen_tb_end(tb, num_insns);
3508 #ifdef DEBUG_DISAS
3509 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3510 && qemu_log_in_addr_range(pc_start)) {
3511 qemu_log("----------------\n");
3512 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3513 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
3514 qemu_log("\n");
3516 #endif
3517 tb->size = dc->pc - pc_start;
3518 tb->icount = num_insns;
3521 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
3522 int flags)
3524 M68kCPU *cpu = M68K_CPU(cs);
3525 CPUM68KState *env = &cpu->env;
3526 int i;
3527 uint16_t sr;
3528 CPU_DoubleU u;
3529 for (i = 0; i < 8; i++)
3531 u.d = env->fregs[i];
3532 cpu_fprintf(f, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3533 i, env->dregs[i], i, env->aregs[i],
3534 i, u.l.upper, u.l.lower, *(double *)&u.d);
3536 cpu_fprintf (f, "PC = %08x ", env->pc);
3537 sr = env->sr | cpu_m68k_get_ccr(env);
3538 cpu_fprintf(f, "SR = %04x %c%c%c%c%c ", sr, (sr & CCF_X) ? 'X' : '-',
3539 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3540 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3541 cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3544 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
3545 target_ulong *data)
3547 int cc_op = data[1];
3548 env->pc = data[0];
3549 if (cc_op != CC_OP_DYNAMIC) {
3550 env->cc_op = cc_op;