kvm: Comparison with ioctl number macros needs to be unsigned
[qemu/qmp-unstable.git] / target-cris / translate.c
blob7224f46993194f42b030a88332e18ad029eeb108
1 /*
2 * CRIS emulation for qemu: main translation routines.
4 * Copyright (c) 2008 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 * FIXME:
23 * The condition code translation is in need of attention.
26 #include "cpu.h"
27 #include "disas.h"
28 #include "tcg-op.h"
29 #include "helper.h"
30 #include "mmu.h"
31 #include "crisv32-decode.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
36 #define DISAS_CRIS 0
37 #if DISAS_CRIS
38 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DIS(...) do { } while (0)
41 #endif
43 #define D(x)
44 #define BUG() (gen_BUG(dc, __FILE__, __LINE__))
45 #define BUG_ON(x) ({if (x) BUG();})
47 #define DISAS_SWI 5
49 /* Used by the decoder. */
50 #define EXTRACT_FIELD(src, start, end) \
51 (((src) >> start) & ((1 << (end - start + 1)) - 1))
53 #define CC_MASK_NZ 0xc
54 #define CC_MASK_NZV 0xe
55 #define CC_MASK_NZVC 0xf
56 #define CC_MASK_RNZV 0x10e
58 static TCGv_ptr cpu_env;
59 static TCGv cpu_R[16];
60 static TCGv cpu_PR[16];
61 static TCGv cc_x;
62 static TCGv cc_src;
63 static TCGv cc_dest;
64 static TCGv cc_result;
65 static TCGv cc_op;
66 static TCGv cc_size;
67 static TCGv cc_mask;
69 static TCGv env_btaken;
70 static TCGv env_btarget;
71 static TCGv env_pc;
73 #include "gen-icount.h"
75 /* This is the state at translation time. */
76 typedef struct DisasContext {
77 CPUCRISState *env;
78 target_ulong pc, ppc;
80 /* Decoder. */
81 unsigned int (*decoder)(struct DisasContext *dc);
82 uint32_t ir;
83 uint32_t opcode;
84 unsigned int op1;
85 unsigned int op2;
86 unsigned int zsize, zzsize;
87 unsigned int mode;
88 unsigned int postinc;
90 unsigned int size;
91 unsigned int src;
92 unsigned int dst;
93 unsigned int cond;
95 int update_cc;
96 int cc_op;
97 int cc_size;
98 uint32_t cc_mask;
100 int cc_size_uptodate; /* -1 invalid or last written value. */
102 int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not uptodate. */
103 int flags_uptodate; /* Wether or not $ccs is uptodate. */
104 int flagx_known; /* Wether or not flags_x has the x flag known at
105 translation time. */
106 int flags_x;
108 int clear_x; /* Clear x after this insn? */
109 int clear_prefix; /* Clear prefix after this insn? */
110 int clear_locked_irq; /* Clear the irq lockout. */
111 int cpustate_changed;
112 unsigned int tb_flags; /* tb dependent flags. */
113 int is_jmp;
115 #define JMP_NOJMP 0
116 #define JMP_DIRECT 1
117 #define JMP_DIRECT_CC 2
118 #define JMP_INDIRECT 3
119 int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
120 uint32_t jmp_pc;
122 int delayed_branch;
124 struct TranslationBlock *tb;
125 int singlestep_enabled;
126 } DisasContext;
128 static void gen_BUG(DisasContext *dc, const char *file, int line)
130 printf ("BUG: pc=%x %s %d\n", dc->pc, file, line);
131 qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line);
132 cpu_abort(dc->env, "%s:%d\n", file, line);
135 static const char *regnames[] =
137 "$r0", "$r1", "$r2", "$r3",
138 "$r4", "$r5", "$r6", "$r7",
139 "$r8", "$r9", "$r10", "$r11",
140 "$r12", "$r13", "$sp", "$acr",
142 static const char *pregnames[] =
144 "$bz", "$vr", "$pid", "$srs",
145 "$wz", "$exs", "$eda", "$mof",
146 "$dz", "$ebp", "$erp", "$srp",
147 "$nrp", "$ccs", "$usp", "$spc",
150 /* We need this table to handle preg-moves with implicit width. */
151 static int preg_sizes[] = {
152 1, /* bz. */
153 1, /* vr. */
154 4, /* pid. */
155 1, /* srs. */
156 2, /* wz. */
157 4, 4, 4,
158 4, 4, 4, 4,
159 4, 4, 4, 4,
162 #define t_gen_mov_TN_env(tn, member) \
163 _t_gen_mov_TN_env((tn), offsetof(CPUCRISState, member))
164 #define t_gen_mov_env_TN(member, tn) \
165 _t_gen_mov_env_TN(offsetof(CPUCRISState, member), (tn))
167 static inline void t_gen_mov_TN_reg(TCGv tn, int r)
169 if (r < 0 || r > 15)
170 fprintf(stderr, "wrong register read $r%d\n", r);
171 tcg_gen_mov_tl(tn, cpu_R[r]);
173 static inline void t_gen_mov_reg_TN(int r, TCGv tn)
175 if (r < 0 || r > 15)
176 fprintf(stderr, "wrong register write $r%d\n", r);
177 tcg_gen_mov_tl(cpu_R[r], tn);
180 static inline void _t_gen_mov_TN_env(TCGv tn, int offset)
182 if (offset > sizeof (CPUCRISState))
183 fprintf(stderr, "wrong load from env from off=%d\n", offset);
184 tcg_gen_ld_tl(tn, cpu_env, offset);
186 static inline void _t_gen_mov_env_TN(int offset, TCGv tn)
188 if (offset > sizeof (CPUCRISState))
189 fprintf(stderr, "wrong store to env at off=%d\n", offset);
190 tcg_gen_st_tl(tn, cpu_env, offset);
193 static inline void t_gen_mov_TN_preg(TCGv tn, int r)
195 if (r < 0 || r > 15)
196 fprintf(stderr, "wrong register read $p%d\n", r);
197 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
198 tcg_gen_mov_tl(tn, tcg_const_tl(0));
199 else if (r == PR_VR)
200 tcg_gen_mov_tl(tn, tcg_const_tl(32));
201 else
202 tcg_gen_mov_tl(tn, cpu_PR[r]);
204 static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
206 if (r < 0 || r > 15)
207 fprintf(stderr, "wrong register write $p%d\n", r);
208 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
209 return;
210 else if (r == PR_SRS)
211 tcg_gen_andi_tl(cpu_PR[r], tn, 3);
212 else {
213 if (r == PR_PID)
214 gen_helper_tlb_flush_pid(tn);
215 if (dc->tb_flags & S_FLAG && r == PR_SPC)
216 gen_helper_spc_write(tn);
217 else if (r == PR_CCS)
218 dc->cpustate_changed = 1;
219 tcg_gen_mov_tl(cpu_PR[r], tn);
223 /* Sign extend at translation time. */
224 static int sign_extend(unsigned int val, unsigned int width)
226 int sval;
228 /* LSL. */
229 val <<= 31 - width;
230 sval = val;
231 /* ASR. */
232 sval >>= 31 - width;
233 return sval;
236 static int cris_fetch(DisasContext *dc, uint32_t addr,
237 unsigned int size, unsigned int sign)
239 int r;
241 switch (size) {
242 case 4:
244 r = ldl_code(addr);
245 break;
247 case 2:
249 if (sign) {
250 r = ldsw_code(addr);
251 } else {
252 r = lduw_code(addr);
254 break;
256 case 1:
258 if (sign) {
259 r = ldsb_code(addr);
260 } else {
261 r = ldub_code(addr);
263 break;
265 default:
266 cpu_abort(dc->env, "Invalid fetch size %d\n", size);
267 break;
269 return r;
272 static void cris_lock_irq(DisasContext *dc)
274 dc->clear_locked_irq = 0;
275 t_gen_mov_env_TN(locked_irq, tcg_const_tl(1));
278 static inline void t_gen_raise_exception(uint32_t index)
280 TCGv_i32 tmp = tcg_const_i32(index);
281 gen_helper_raise_exception(tmp);
282 tcg_temp_free_i32(tmp);
285 static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
287 TCGv t0, t_31;
289 t0 = tcg_temp_new();
290 t_31 = tcg_const_tl(31);
291 tcg_gen_shl_tl(d, a, b);
293 tcg_gen_sub_tl(t0, t_31, b);
294 tcg_gen_sar_tl(t0, t0, t_31);
295 tcg_gen_and_tl(t0, t0, d);
296 tcg_gen_xor_tl(d, d, t0);
297 tcg_temp_free(t0);
298 tcg_temp_free(t_31);
301 static void t_gen_lsr(TCGv d, TCGv a, TCGv b)
303 TCGv t0, t_31;
305 t0 = tcg_temp_new();
306 t_31 = tcg_temp_new();
307 tcg_gen_shr_tl(d, a, b);
309 tcg_gen_movi_tl(t_31, 31);
310 tcg_gen_sub_tl(t0, t_31, b);
311 tcg_gen_sar_tl(t0, t0, t_31);
312 tcg_gen_and_tl(t0, t0, d);
313 tcg_gen_xor_tl(d, d, t0);
314 tcg_temp_free(t0);
315 tcg_temp_free(t_31);
318 static void t_gen_asr(TCGv d, TCGv a, TCGv b)
320 TCGv t0, t_31;
322 t0 = tcg_temp_new();
323 t_31 = tcg_temp_new();
324 tcg_gen_sar_tl(d, a, b);
326 tcg_gen_movi_tl(t_31, 31);
327 tcg_gen_sub_tl(t0, t_31, b);
328 tcg_gen_sar_tl(t0, t0, t_31);
329 tcg_gen_or_tl(d, d, t0);
330 tcg_temp_free(t0);
331 tcg_temp_free(t_31);
334 /* 64-bit signed mul, lower result in d and upper in d2. */
335 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
337 TCGv_i64 t0, t1;
339 t0 = tcg_temp_new_i64();
340 t1 = tcg_temp_new_i64();
342 tcg_gen_ext_i32_i64(t0, a);
343 tcg_gen_ext_i32_i64(t1, b);
344 tcg_gen_mul_i64(t0, t0, t1);
346 tcg_gen_trunc_i64_i32(d, t0);
347 tcg_gen_shri_i64(t0, t0, 32);
348 tcg_gen_trunc_i64_i32(d2, t0);
350 tcg_temp_free_i64(t0);
351 tcg_temp_free_i64(t1);
354 /* 64-bit unsigned muls, lower result in d and upper in d2. */
355 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
357 TCGv_i64 t0, t1;
359 t0 = tcg_temp_new_i64();
360 t1 = tcg_temp_new_i64();
362 tcg_gen_extu_i32_i64(t0, a);
363 tcg_gen_extu_i32_i64(t1, b);
364 tcg_gen_mul_i64(t0, t0, t1);
366 tcg_gen_trunc_i64_i32(d, t0);
367 tcg_gen_shri_i64(t0, t0, 32);
368 tcg_gen_trunc_i64_i32(d2, t0);
370 tcg_temp_free_i64(t0);
371 tcg_temp_free_i64(t1);
374 static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
376 int l1;
378 l1 = gen_new_label();
381 * d <<= 1
382 * if (d >= s)
383 * d -= s;
385 tcg_gen_shli_tl(d, a, 1);
386 tcg_gen_brcond_tl(TCG_COND_LTU, d, b, l1);
387 tcg_gen_sub_tl(d, d, b);
388 gen_set_label(l1);
391 static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs)
393 TCGv t;
396 * d <<= 1
397 * if (n)
398 * d += s;
400 t = tcg_temp_new();
401 tcg_gen_shli_tl(d, a, 1);
402 tcg_gen_shli_tl(t, ccs, 31 - 3);
403 tcg_gen_sari_tl(t, t, 31);
404 tcg_gen_and_tl(t, t, b);
405 tcg_gen_add_tl(d, d, t);
406 tcg_temp_free(t);
409 /* Extended arithmetics on CRIS. */
410 static inline void t_gen_add_flag(TCGv d, int flag)
412 TCGv c;
414 c = tcg_temp_new();
415 t_gen_mov_TN_preg(c, PR_CCS);
416 /* Propagate carry into d. */
417 tcg_gen_andi_tl(c, c, 1 << flag);
418 if (flag)
419 tcg_gen_shri_tl(c, c, flag);
420 tcg_gen_add_tl(d, d, c);
421 tcg_temp_free(c);
424 static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
426 if (dc->flagx_known) {
427 if (dc->flags_x) {
428 TCGv c;
430 c = tcg_temp_new();
431 t_gen_mov_TN_preg(c, PR_CCS);
432 /* C flag is already at bit 0. */
433 tcg_gen_andi_tl(c, c, C_FLAG);
434 tcg_gen_add_tl(d, d, c);
435 tcg_temp_free(c);
437 } else {
438 TCGv x, c;
440 x = tcg_temp_new();
441 c = tcg_temp_new();
442 t_gen_mov_TN_preg(x, PR_CCS);
443 tcg_gen_mov_tl(c, x);
445 /* Propagate carry into d if X is set. Branch free. */
446 tcg_gen_andi_tl(c, c, C_FLAG);
447 tcg_gen_andi_tl(x, x, X_FLAG);
448 tcg_gen_shri_tl(x, x, 4);
450 tcg_gen_and_tl(x, x, c);
451 tcg_gen_add_tl(d, d, x);
452 tcg_temp_free(x);
453 tcg_temp_free(c);
457 static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
459 if (dc->flagx_known) {
460 if (dc->flags_x) {
461 TCGv c;
463 c = tcg_temp_new();
464 t_gen_mov_TN_preg(c, PR_CCS);
465 /* C flag is already at bit 0. */
466 tcg_gen_andi_tl(c, c, C_FLAG);
467 tcg_gen_sub_tl(d, d, c);
468 tcg_temp_free(c);
470 } else {
471 TCGv x, c;
473 x = tcg_temp_new();
474 c = tcg_temp_new();
475 t_gen_mov_TN_preg(x, PR_CCS);
476 tcg_gen_mov_tl(c, x);
478 /* Propagate carry into d if X is set. Branch free. */
479 tcg_gen_andi_tl(c, c, C_FLAG);
480 tcg_gen_andi_tl(x, x, X_FLAG);
481 tcg_gen_shri_tl(x, x, 4);
483 tcg_gen_and_tl(x, x, c);
484 tcg_gen_sub_tl(d, d, x);
485 tcg_temp_free(x);
486 tcg_temp_free(c);
490 /* Swap the two bytes within each half word of the s operand.
491 T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
492 static inline void t_gen_swapb(TCGv d, TCGv s)
494 TCGv t, org_s;
496 t = tcg_temp_new();
497 org_s = tcg_temp_new();
499 /* d and s may refer to the same object. */
500 tcg_gen_mov_tl(org_s, s);
501 tcg_gen_shli_tl(t, org_s, 8);
502 tcg_gen_andi_tl(d, t, 0xff00ff00);
503 tcg_gen_shri_tl(t, org_s, 8);
504 tcg_gen_andi_tl(t, t, 0x00ff00ff);
505 tcg_gen_or_tl(d, d, t);
506 tcg_temp_free(t);
507 tcg_temp_free(org_s);
510 /* Swap the halfwords of the s operand. */
511 static inline void t_gen_swapw(TCGv d, TCGv s)
513 TCGv t;
514 /* d and s refer the same object. */
515 t = tcg_temp_new();
516 tcg_gen_mov_tl(t, s);
517 tcg_gen_shli_tl(d, t, 16);
518 tcg_gen_shri_tl(t, t, 16);
519 tcg_gen_or_tl(d, d, t);
520 tcg_temp_free(t);
523 /* Reverse the within each byte.
524 T0 = (((T0 << 7) & 0x80808080) |
525 ((T0 << 5) & 0x40404040) |
526 ((T0 << 3) & 0x20202020) |
527 ((T0 << 1) & 0x10101010) |
528 ((T0 >> 1) & 0x08080808) |
529 ((T0 >> 3) & 0x04040404) |
530 ((T0 >> 5) & 0x02020202) |
531 ((T0 >> 7) & 0x01010101));
533 static inline void t_gen_swapr(TCGv d, TCGv s)
535 struct {
536 int shift; /* LSL when positive, LSR when negative. */
537 uint32_t mask;
538 } bitrev [] = {
539 {7, 0x80808080},
540 {5, 0x40404040},
541 {3, 0x20202020},
542 {1, 0x10101010},
543 {-1, 0x08080808},
544 {-3, 0x04040404},
545 {-5, 0x02020202},
546 {-7, 0x01010101}
548 int i;
549 TCGv t, org_s;
551 /* d and s refer the same object. */
552 t = tcg_temp_new();
553 org_s = tcg_temp_new();
554 tcg_gen_mov_tl(org_s, s);
556 tcg_gen_shli_tl(t, org_s, bitrev[0].shift);
557 tcg_gen_andi_tl(d, t, bitrev[0].mask);
558 for (i = 1; i < ARRAY_SIZE(bitrev); i++) {
559 if (bitrev[i].shift >= 0) {
560 tcg_gen_shli_tl(t, org_s, bitrev[i].shift);
561 } else {
562 tcg_gen_shri_tl(t, org_s, -bitrev[i].shift);
564 tcg_gen_andi_tl(t, t, bitrev[i].mask);
565 tcg_gen_or_tl(d, d, t);
567 tcg_temp_free(t);
568 tcg_temp_free(org_s);
571 static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
573 int l1;
575 l1 = gen_new_label();
577 /* Conditional jmp. */
578 tcg_gen_mov_tl(env_pc, pc_false);
579 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
580 tcg_gen_mov_tl(env_pc, pc_true);
581 gen_set_label(l1);
584 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
586 TranslationBlock *tb;
587 tb = dc->tb;
588 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
589 tcg_gen_goto_tb(n);
590 tcg_gen_movi_tl(env_pc, dest);
591 tcg_gen_exit_tb((tcg_target_long)tb + n);
592 } else {
593 tcg_gen_movi_tl(env_pc, dest);
594 tcg_gen_exit_tb(0);
598 static inline void cris_clear_x_flag(DisasContext *dc)
600 if (dc->flagx_known && dc->flags_x)
601 dc->flags_uptodate = 0;
603 dc->flagx_known = 1;
604 dc->flags_x = 0;
607 static void cris_flush_cc_state(DisasContext *dc)
609 if (dc->cc_size_uptodate != dc->cc_size) {
610 tcg_gen_movi_tl(cc_size, dc->cc_size);
611 dc->cc_size_uptodate = dc->cc_size;
613 tcg_gen_movi_tl(cc_op, dc->cc_op);
614 tcg_gen_movi_tl(cc_mask, dc->cc_mask);
617 static void cris_evaluate_flags(DisasContext *dc)
619 if (dc->flags_uptodate)
620 return;
622 cris_flush_cc_state(dc);
624 switch (dc->cc_op)
626 case CC_OP_MCP:
627 gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS],
628 cpu_PR[PR_CCS], cc_src,
629 cc_dest, cc_result);
630 break;
631 case CC_OP_MULS:
632 gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS],
633 cpu_PR[PR_CCS], cc_result,
634 cpu_PR[PR_MOF]);
635 break;
636 case CC_OP_MULU:
637 gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS],
638 cpu_PR[PR_CCS], cc_result,
639 cpu_PR[PR_MOF]);
640 break;
641 case CC_OP_MOVE:
642 case CC_OP_AND:
643 case CC_OP_OR:
644 case CC_OP_XOR:
645 case CC_OP_ASR:
646 case CC_OP_LSR:
647 case CC_OP_LSL:
648 switch (dc->cc_size)
650 case 4:
651 gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
652 cpu_PR[PR_CCS], cc_result);
653 break;
654 case 2:
655 gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
656 cpu_PR[PR_CCS], cc_result);
657 break;
658 default:
659 gen_helper_evaluate_flags();
660 break;
662 break;
663 case CC_OP_FLAGS:
664 /* live. */
665 break;
666 case CC_OP_SUB:
667 case CC_OP_CMP:
668 if (dc->cc_size == 4)
669 gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS],
670 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
671 else
672 gen_helper_evaluate_flags();
674 break;
675 default:
676 switch (dc->cc_size)
678 case 4:
679 gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS],
680 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
681 break;
682 default:
683 gen_helper_evaluate_flags();
684 break;
686 break;
689 if (dc->flagx_known) {
690 if (dc->flags_x)
691 tcg_gen_ori_tl(cpu_PR[PR_CCS],
692 cpu_PR[PR_CCS], X_FLAG);
693 else if (dc->cc_op == CC_OP_FLAGS)
694 tcg_gen_andi_tl(cpu_PR[PR_CCS],
695 cpu_PR[PR_CCS], ~X_FLAG);
697 dc->flags_uptodate = 1;
700 static void cris_cc_mask(DisasContext *dc, unsigned int mask)
702 uint32_t ovl;
704 if (!mask) {
705 dc->update_cc = 0;
706 return;
709 /* Check if we need to evaluate the condition codes due to
710 CC overlaying. */
711 ovl = (dc->cc_mask ^ mask) & ~mask;
712 if (ovl) {
713 /* TODO: optimize this case. It trigs all the time. */
714 cris_evaluate_flags (dc);
716 dc->cc_mask = mask;
717 dc->update_cc = 1;
720 static void cris_update_cc_op(DisasContext *dc, int op, int size)
722 dc->cc_op = op;
723 dc->cc_size = size;
724 dc->flags_uptodate = 0;
727 static inline void cris_update_cc_x(DisasContext *dc)
729 /* Save the x flag state at the time of the cc snapshot. */
730 if (dc->flagx_known) {
731 if (dc->cc_x_uptodate == (2 | dc->flags_x))
732 return;
733 tcg_gen_movi_tl(cc_x, dc->flags_x);
734 dc->cc_x_uptodate = 2 | dc->flags_x;
736 else {
737 tcg_gen_andi_tl(cc_x, cpu_PR[PR_CCS], X_FLAG);
738 dc->cc_x_uptodate = 1;
742 /* Update cc prior to executing ALU op. Needs source operands untouched. */
743 static void cris_pre_alu_update_cc(DisasContext *dc, int op,
744 TCGv dst, TCGv src, int size)
746 if (dc->update_cc) {
747 cris_update_cc_op(dc, op, size);
748 tcg_gen_mov_tl(cc_src, src);
750 if (op != CC_OP_MOVE
751 && op != CC_OP_AND
752 && op != CC_OP_OR
753 && op != CC_OP_XOR
754 && op != CC_OP_ASR
755 && op != CC_OP_LSR
756 && op != CC_OP_LSL)
757 tcg_gen_mov_tl(cc_dest, dst);
759 cris_update_cc_x(dc);
763 /* Update cc after executing ALU op. needs the result. */
764 static inline void cris_update_result(DisasContext *dc, TCGv res)
766 if (dc->update_cc)
767 tcg_gen_mov_tl(cc_result, res);
770 /* Returns one if the write back stage should execute. */
771 static void cris_alu_op_exec(DisasContext *dc, int op,
772 TCGv dst, TCGv a, TCGv b, int size)
774 /* Emit the ALU insns. */
775 switch (op)
777 case CC_OP_ADD:
778 tcg_gen_add_tl(dst, a, b);
779 /* Extended arithmetics. */
780 t_gen_addx_carry(dc, dst);
781 break;
782 case CC_OP_ADDC:
783 tcg_gen_add_tl(dst, a, b);
784 t_gen_add_flag(dst, 0); /* C_FLAG. */
785 break;
786 case CC_OP_MCP:
787 tcg_gen_add_tl(dst, a, b);
788 t_gen_add_flag(dst, 8); /* R_FLAG. */
789 break;
790 case CC_OP_SUB:
791 tcg_gen_sub_tl(dst, a, b);
792 /* Extended arithmetics. */
793 t_gen_subx_carry(dc, dst);
794 break;
795 case CC_OP_MOVE:
796 tcg_gen_mov_tl(dst, b);
797 break;
798 case CC_OP_OR:
799 tcg_gen_or_tl(dst, a, b);
800 break;
801 case CC_OP_AND:
802 tcg_gen_and_tl(dst, a, b);
803 break;
804 case CC_OP_XOR:
805 tcg_gen_xor_tl(dst, a, b);
806 break;
807 case CC_OP_LSL:
808 t_gen_lsl(dst, a, b);
809 break;
810 case CC_OP_LSR:
811 t_gen_lsr(dst, a, b);
812 break;
813 case CC_OP_ASR:
814 t_gen_asr(dst, a, b);
815 break;
816 case CC_OP_NEG:
817 tcg_gen_neg_tl(dst, b);
818 /* Extended arithmetics. */
819 t_gen_subx_carry(dc, dst);
820 break;
821 case CC_OP_LZ:
822 gen_helper_lz(dst, b);
823 break;
824 case CC_OP_MULS:
825 t_gen_muls(dst, cpu_PR[PR_MOF], a, b);
826 break;
827 case CC_OP_MULU:
828 t_gen_mulu(dst, cpu_PR[PR_MOF], a, b);
829 break;
830 case CC_OP_DSTEP:
831 t_gen_cris_dstep(dst, a, b);
832 break;
833 case CC_OP_MSTEP:
834 t_gen_cris_mstep(dst, a, b, cpu_PR[PR_CCS]);
835 break;
836 case CC_OP_BOUND:
838 int l1;
839 l1 = gen_new_label();
840 tcg_gen_mov_tl(dst, a);
841 tcg_gen_brcond_tl(TCG_COND_LEU, a, b, l1);
842 tcg_gen_mov_tl(dst, b);
843 gen_set_label(l1);
845 break;
846 case CC_OP_CMP:
847 tcg_gen_sub_tl(dst, a, b);
848 /* Extended arithmetics. */
849 t_gen_subx_carry(dc, dst);
850 break;
851 default:
852 qemu_log("illegal ALU op.\n");
853 BUG();
854 break;
857 if (size == 1)
858 tcg_gen_andi_tl(dst, dst, 0xff);
859 else if (size == 2)
860 tcg_gen_andi_tl(dst, dst, 0xffff);
863 static void cris_alu(DisasContext *dc, int op,
864 TCGv d, TCGv op_a, TCGv op_b, int size)
866 TCGv tmp;
867 int writeback;
869 writeback = 1;
871 if (op == CC_OP_CMP) {
872 tmp = tcg_temp_new();
873 writeback = 0;
874 } else if (size == 4) {
875 tmp = d;
876 writeback = 0;
877 } else
878 tmp = tcg_temp_new();
881 cris_pre_alu_update_cc(dc, op, op_a, op_b, size);
882 cris_alu_op_exec(dc, op, tmp, op_a, op_b, size);
883 cris_update_result(dc, tmp);
885 /* Writeback. */
886 if (writeback) {
887 if (size == 1)
888 tcg_gen_andi_tl(d, d, ~0xff);
889 else
890 tcg_gen_andi_tl(d, d, ~0xffff);
891 tcg_gen_or_tl(d, d, tmp);
893 if (!TCGV_EQUAL(tmp, d))
894 tcg_temp_free(tmp);
897 static int arith_cc(DisasContext *dc)
899 if (dc->update_cc) {
900 switch (dc->cc_op) {
901 case CC_OP_ADDC: return 1;
902 case CC_OP_ADD: return 1;
903 case CC_OP_SUB: return 1;
904 case CC_OP_DSTEP: return 1;
905 case CC_OP_LSL: return 1;
906 case CC_OP_LSR: return 1;
907 case CC_OP_ASR: return 1;
908 case CC_OP_CMP: return 1;
909 case CC_OP_NEG: return 1;
910 case CC_OP_OR: return 1;
911 case CC_OP_AND: return 1;
912 case CC_OP_XOR: return 1;
913 case CC_OP_MULU: return 1;
914 case CC_OP_MULS: return 1;
915 default:
916 return 0;
919 return 0;
922 static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
924 int arith_opt, move_opt;
926 /* TODO: optimize more condition codes. */
929 * If the flags are live, we've gotta look into the bits of CCS.
930 * Otherwise, if we just did an arithmetic operation we try to
931 * evaluate the condition code faster.
933 * When this function is done, T0 should be non-zero if the condition
934 * code is true.
936 arith_opt = arith_cc(dc) && !dc->flags_uptodate;
937 move_opt = (dc->cc_op == CC_OP_MOVE);
938 switch (cond) {
939 case CC_EQ:
940 if ((arith_opt || move_opt)
941 && dc->cc_x_uptodate != (2 | X_FLAG)) {
942 tcg_gen_setcond_tl(TCG_COND_EQ, cc,
943 cc_result, tcg_const_tl(0));
945 else {
946 cris_evaluate_flags(dc);
947 tcg_gen_andi_tl(cc,
948 cpu_PR[PR_CCS], Z_FLAG);
950 break;
951 case CC_NE:
952 if ((arith_opt || move_opt)
953 && dc->cc_x_uptodate != (2 | X_FLAG)) {
954 tcg_gen_mov_tl(cc, cc_result);
955 } else {
956 cris_evaluate_flags(dc);
957 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
958 Z_FLAG);
959 tcg_gen_andi_tl(cc, cc, Z_FLAG);
961 break;
962 case CC_CS:
963 cris_evaluate_flags(dc);
964 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG);
965 break;
966 case CC_CC:
967 cris_evaluate_flags(dc);
968 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG);
969 tcg_gen_andi_tl(cc, cc, C_FLAG);
970 break;
971 case CC_VS:
972 cris_evaluate_flags(dc);
973 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG);
974 break;
975 case CC_VC:
976 cris_evaluate_flags(dc);
977 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
978 V_FLAG);
979 tcg_gen_andi_tl(cc, cc, V_FLAG);
980 break;
981 case CC_PL:
982 if (arith_opt || move_opt) {
983 int bits = 31;
985 if (dc->cc_size == 1)
986 bits = 7;
987 else if (dc->cc_size == 2)
988 bits = 15;
990 tcg_gen_shri_tl(cc, cc_result, bits);
991 tcg_gen_xori_tl(cc, cc, 1);
992 } else {
993 cris_evaluate_flags(dc);
994 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
995 N_FLAG);
996 tcg_gen_andi_tl(cc, cc, N_FLAG);
998 break;
999 case CC_MI:
1000 if (arith_opt || move_opt) {
1001 int bits = 31;
1003 if (dc->cc_size == 1)
1004 bits = 7;
1005 else if (dc->cc_size == 2)
1006 bits = 15;
1008 tcg_gen_shri_tl(cc, cc_result, bits);
1009 tcg_gen_andi_tl(cc, cc, 1);
1011 else {
1012 cris_evaluate_flags(dc);
1013 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1014 N_FLAG);
1016 break;
1017 case CC_LS:
1018 cris_evaluate_flags(dc);
1019 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1020 C_FLAG | Z_FLAG);
1021 break;
1022 case CC_HI:
1023 cris_evaluate_flags(dc);
1025 TCGv tmp;
1027 tmp = tcg_temp_new();
1028 tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS],
1029 C_FLAG | Z_FLAG);
1030 /* Overlay the C flag on top of the Z. */
1031 tcg_gen_shli_tl(cc, tmp, 2);
1032 tcg_gen_and_tl(cc, tmp, cc);
1033 tcg_gen_andi_tl(cc, cc, Z_FLAG);
1035 tcg_temp_free(tmp);
1037 break;
1038 case CC_GE:
1039 cris_evaluate_flags(dc);
1040 /* Overlay the V flag on top of the N. */
1041 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1042 tcg_gen_xor_tl(cc,
1043 cpu_PR[PR_CCS], cc);
1044 tcg_gen_andi_tl(cc, cc, N_FLAG);
1045 tcg_gen_xori_tl(cc, cc, N_FLAG);
1046 break;
1047 case CC_LT:
1048 cris_evaluate_flags(dc);
1049 /* Overlay the V flag on top of the N. */
1050 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1051 tcg_gen_xor_tl(cc,
1052 cpu_PR[PR_CCS], cc);
1053 tcg_gen_andi_tl(cc, cc, N_FLAG);
1054 break;
1055 case CC_GT:
1056 cris_evaluate_flags(dc);
1058 TCGv n, z;
1060 n = tcg_temp_new();
1061 z = tcg_temp_new();
1063 /* To avoid a shift we overlay everything on
1064 the V flag. */
1065 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1066 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1067 /* invert Z. */
1068 tcg_gen_xori_tl(z, z, 2);
1070 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1071 tcg_gen_xori_tl(n, n, 2);
1072 tcg_gen_and_tl(cc, z, n);
1073 tcg_gen_andi_tl(cc, cc, 2);
1075 tcg_temp_free(n);
1076 tcg_temp_free(z);
1078 break;
1079 case CC_LE:
1080 cris_evaluate_flags(dc);
1082 TCGv n, z;
1084 n = tcg_temp_new();
1085 z = tcg_temp_new();
1087 /* To avoid a shift we overlay everything on
1088 the V flag. */
1089 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1090 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1092 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1093 tcg_gen_or_tl(cc, z, n);
1094 tcg_gen_andi_tl(cc, cc, 2);
1096 tcg_temp_free(n);
1097 tcg_temp_free(z);
1099 break;
1100 case CC_P:
1101 cris_evaluate_flags(dc);
1102 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG);
1103 break;
1104 case CC_A:
1105 tcg_gen_movi_tl(cc, 1);
1106 break;
1107 default:
1108 BUG();
1109 break;
1113 static void cris_store_direct_jmp(DisasContext *dc)
1115 /* Store the direct jmp state into the cpu-state. */
1116 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1117 if (dc->jmp == JMP_DIRECT) {
1118 tcg_gen_movi_tl(env_btaken, 1);
1120 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1121 dc->jmp = JMP_INDIRECT;
1125 static void cris_prepare_cc_branch (DisasContext *dc,
1126 int offset, int cond)
1128 /* This helps us re-schedule the micro-code to insns in delay-slots
1129 before the actual jump. */
1130 dc->delayed_branch = 2;
1131 dc->jmp = JMP_DIRECT_CC;
1132 dc->jmp_pc = dc->pc + offset;
1134 gen_tst_cc (dc, env_btaken, cond);
1135 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1139 /* jumps, when the dest is in a live reg for example. Direct should be set
1140 when the dest addr is constant to allow tb chaining. */
1141 static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
1143 /* This helps us re-schedule the micro-code to insns in delay-slots
1144 before the actual jump. */
1145 dc->delayed_branch = 2;
1146 dc->jmp = type;
1147 if (type == JMP_INDIRECT) {
1148 tcg_gen_movi_tl(env_btaken, 1);
1152 static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
1154 int mem_index = cpu_mmu_index(dc->env);
1156 /* If we get a fault on a delayslot we must keep the jmp state in
1157 the cpu-state to be able to re-execute the jmp. */
1158 if (dc->delayed_branch == 1)
1159 cris_store_direct_jmp(dc);
1161 tcg_gen_qemu_ld64(dst, addr, mem_index);
1164 static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
1165 unsigned int size, int sign)
1167 int mem_index = cpu_mmu_index(dc->env);
1169 /* If we get a fault on a delayslot we must keep the jmp state in
1170 the cpu-state to be able to re-execute the jmp. */
1171 if (dc->delayed_branch == 1)
1172 cris_store_direct_jmp(dc);
1174 if (size == 1) {
1175 if (sign)
1176 tcg_gen_qemu_ld8s(dst, addr, mem_index);
1177 else
1178 tcg_gen_qemu_ld8u(dst, addr, mem_index);
1180 else if (size == 2) {
1181 if (sign)
1182 tcg_gen_qemu_ld16s(dst, addr, mem_index);
1183 else
1184 tcg_gen_qemu_ld16u(dst, addr, mem_index);
1186 else if (size == 4) {
1187 tcg_gen_qemu_ld32u(dst, addr, mem_index);
1189 else {
1190 abort();
1194 static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
1195 unsigned int size)
1197 int mem_index = cpu_mmu_index(dc->env);
1199 /* If we get a fault on a delayslot we must keep the jmp state in
1200 the cpu-state to be able to re-execute the jmp. */
1201 if (dc->delayed_branch == 1)
1202 cris_store_direct_jmp(dc);
1205 /* Conditional writes. We only support the kind were X and P are known
1206 at translation time. */
1207 if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
1208 dc->postinc = 0;
1209 cris_evaluate_flags(dc);
1210 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
1211 return;
1214 if (size == 1)
1215 tcg_gen_qemu_st8(val, addr, mem_index);
1216 else if (size == 2)
1217 tcg_gen_qemu_st16(val, addr, mem_index);
1218 else
1219 tcg_gen_qemu_st32(val, addr, mem_index);
1221 if (dc->flagx_known && dc->flags_x) {
1222 cris_evaluate_flags(dc);
1223 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
1227 static inline void t_gen_sext(TCGv d, TCGv s, int size)
1229 if (size == 1)
1230 tcg_gen_ext8s_i32(d, s);
1231 else if (size == 2)
1232 tcg_gen_ext16s_i32(d, s);
1233 else if(!TCGV_EQUAL(d, s))
1234 tcg_gen_mov_tl(d, s);
1237 static inline void t_gen_zext(TCGv d, TCGv s, int size)
1239 if (size == 1)
1240 tcg_gen_ext8u_i32(d, s);
1241 else if (size == 2)
1242 tcg_gen_ext16u_i32(d, s);
1243 else if (!TCGV_EQUAL(d, s))
1244 tcg_gen_mov_tl(d, s);
1247 #if DISAS_CRIS
1248 static char memsize_char(int size)
1250 switch (size)
1252 case 1: return 'b'; break;
1253 case 2: return 'w'; break;
1254 case 4: return 'd'; break;
1255 default:
1256 return 'x';
1257 break;
1260 #endif
1262 static inline unsigned int memsize_z(DisasContext *dc)
1264 return dc->zsize + 1;
1267 static inline unsigned int memsize_zz(DisasContext *dc)
1269 switch (dc->zzsize)
1271 case 0: return 1;
1272 case 1: return 2;
1273 default:
1274 return 4;
1278 static inline void do_postinc (DisasContext *dc, int size)
1280 if (dc->postinc)
1281 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size);
1284 static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd,
1285 int size, int s_ext, TCGv dst)
1287 if (s_ext)
1288 t_gen_sext(dst, cpu_R[rs], size);
1289 else
1290 t_gen_zext(dst, cpu_R[rs], size);
1293 /* Prepare T0 and T1 for a register alu operation.
1294 s_ext decides if the operand1 should be sign-extended or zero-extended when
1295 needed. */
1296 static void dec_prep_alu_r(DisasContext *dc, int rs, int rd,
1297 int size, int s_ext, TCGv dst, TCGv src)
1299 dec_prep_move_r(dc, rs, rd, size, s_ext, src);
1301 if (s_ext)
1302 t_gen_sext(dst, cpu_R[rd], size);
1303 else
1304 t_gen_zext(dst, cpu_R[rd], size);
1307 static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
1308 TCGv dst)
1310 unsigned int rs;
1311 uint32_t imm;
1312 int is_imm;
1313 int insn_len = 2;
1315 rs = dc->op1;
1316 is_imm = rs == 15 && dc->postinc;
1318 /* Load [$rs] onto T1. */
1319 if (is_imm) {
1320 insn_len = 2 + memsize;
1321 if (memsize == 1)
1322 insn_len++;
1324 imm = cris_fetch(dc, dc->pc + 2, memsize, s_ext);
1325 tcg_gen_movi_tl(dst, imm);
1326 dc->postinc = 0;
1327 } else {
1328 cris_flush_cc_state(dc);
1329 gen_load(dc, dst, cpu_R[rs], memsize, 0);
1330 if (s_ext)
1331 t_gen_sext(dst, dst, memsize);
1332 else
1333 t_gen_zext(dst, dst, memsize);
1335 return insn_len;
1338 /* Prepare T0 and T1 for a memory + alu operation.
1339 s_ext decides if the operand1 should be sign-extended or zero-extended when
1340 needed. */
1341 static int dec_prep_alu_m(DisasContext *dc, int s_ext, int memsize,
1342 TCGv dst, TCGv src)
1344 int insn_len;
1346 insn_len = dec_prep_move_m(dc, s_ext, memsize, src);
1347 tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
1348 return insn_len;
1351 #if DISAS_CRIS
1352 static const char *cc_name(int cc)
1354 static const char *cc_names[16] = {
1355 "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
1356 "ls", "hi", "ge", "lt", "gt", "le", "a", "p"
1358 assert(cc < 16);
1359 return cc_names[cc];
1361 #endif
1363 /* Start of insn decoders. */
1365 static int dec_bccq(DisasContext *dc)
1367 int32_t offset;
1368 int sign;
1369 uint32_t cond = dc->op2;
1371 offset = EXTRACT_FIELD (dc->ir, 1, 7);
1372 sign = EXTRACT_FIELD(dc->ir, 0, 0);
1374 offset *= 2;
1375 offset |= sign << 8;
1376 offset = sign_extend(offset, 8);
1378 LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset);
1380 /* op2 holds the condition-code. */
1381 cris_cc_mask(dc, 0);
1382 cris_prepare_cc_branch (dc, offset, cond);
1383 return 2;
1385 static int dec_addoq(DisasContext *dc)
1387 int32_t imm;
1389 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7);
1390 imm = sign_extend(dc->op1, 7);
1392 LOG_DIS("addoq %d, $r%u\n", imm, dc->op2);
1393 cris_cc_mask(dc, 0);
1394 /* Fetch register operand, */
1395 tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm);
1397 return 2;
1399 static int dec_addq(DisasContext *dc)
1401 LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
1403 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1405 cris_cc_mask(dc, CC_MASK_NZVC);
1407 cris_alu(dc, CC_OP_ADD,
1408 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1409 return 2;
1411 static int dec_moveq(DisasContext *dc)
1413 uint32_t imm;
1415 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1416 imm = sign_extend(dc->op1, 5);
1417 LOG_DIS("moveq %d, $r%u\n", imm, dc->op2);
1419 tcg_gen_movi_tl(cpu_R[dc->op2], imm);
1420 return 2;
1422 static int dec_subq(DisasContext *dc)
1424 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1426 LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
1428 cris_cc_mask(dc, CC_MASK_NZVC);
1429 cris_alu(dc, CC_OP_SUB,
1430 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1431 return 2;
1433 static int dec_cmpq(DisasContext *dc)
1435 uint32_t imm;
1436 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1437 imm = sign_extend(dc->op1, 5);
1439 LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
1440 cris_cc_mask(dc, CC_MASK_NZVC);
1442 cris_alu(dc, CC_OP_CMP,
1443 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1444 return 2;
1446 static int dec_andq(DisasContext *dc)
1448 uint32_t imm;
1449 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1450 imm = sign_extend(dc->op1, 5);
1452 LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
1453 cris_cc_mask(dc, CC_MASK_NZ);
1455 cris_alu(dc, CC_OP_AND,
1456 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1457 return 2;
1459 static int dec_orq(DisasContext *dc)
1461 uint32_t imm;
1462 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1463 imm = sign_extend(dc->op1, 5);
1464 LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
1465 cris_cc_mask(dc, CC_MASK_NZ);
1467 cris_alu(dc, CC_OP_OR,
1468 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1469 return 2;
1471 static int dec_btstq(DisasContext *dc)
1473 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1474 LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
1476 cris_cc_mask(dc, CC_MASK_NZ);
1477 cris_evaluate_flags(dc);
1478 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1479 tcg_const_tl(dc->op1), cpu_PR[PR_CCS]);
1480 cris_alu(dc, CC_OP_MOVE,
1481 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1482 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1483 dc->flags_uptodate = 1;
1484 return 2;
1486 static int dec_asrq(DisasContext *dc)
1488 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1489 LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
1490 cris_cc_mask(dc, CC_MASK_NZ);
1492 tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1493 cris_alu(dc, CC_OP_MOVE,
1494 cpu_R[dc->op2],
1495 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1496 return 2;
1498 static int dec_lslq(DisasContext *dc)
1500 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1501 LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
1503 cris_cc_mask(dc, CC_MASK_NZ);
1505 tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1507 cris_alu(dc, CC_OP_MOVE,
1508 cpu_R[dc->op2],
1509 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1510 return 2;
1512 static int dec_lsrq(DisasContext *dc)
1514 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1515 LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
1517 cris_cc_mask(dc, CC_MASK_NZ);
1519 tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1520 cris_alu(dc, CC_OP_MOVE,
1521 cpu_R[dc->op2],
1522 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1523 return 2;
1526 static int dec_move_r(DisasContext *dc)
1528 int size = memsize_zz(dc);
1530 LOG_DIS("move.%c $r%u, $r%u\n",
1531 memsize_char(size), dc->op1, dc->op2);
1533 cris_cc_mask(dc, CC_MASK_NZ);
1534 if (size == 4) {
1535 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]);
1536 cris_cc_mask(dc, CC_MASK_NZ);
1537 cris_update_cc_op(dc, CC_OP_MOVE, 4);
1538 cris_update_cc_x(dc);
1539 cris_update_result(dc, cpu_R[dc->op2]);
1541 else {
1542 TCGv t0;
1544 t0 = tcg_temp_new();
1545 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1546 cris_alu(dc, CC_OP_MOVE,
1547 cpu_R[dc->op2],
1548 cpu_R[dc->op2], t0, size);
1549 tcg_temp_free(t0);
1551 return 2;
1554 static int dec_scc_r(DisasContext *dc)
1556 int cond = dc->op2;
1558 LOG_DIS("s%s $r%u\n",
1559 cc_name(cond), dc->op1);
1561 if (cond != CC_A)
1563 int l1;
1565 gen_tst_cc (dc, cpu_R[dc->op1], cond);
1566 l1 = gen_new_label();
1567 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[dc->op1], 0, l1);
1568 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1569 gen_set_label(l1);
1571 else
1572 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1574 cris_cc_mask(dc, 0);
1575 return 2;
1578 static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t)
1580 if (size == 4) {
1581 t[0] = cpu_R[dc->op2];
1582 t[1] = cpu_R[dc->op1];
1583 } else {
1584 t[0] = tcg_temp_new();
1585 t[1] = tcg_temp_new();
1589 static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t)
1591 if (size != 4) {
1592 tcg_temp_free(t[0]);
1593 tcg_temp_free(t[1]);
1597 static int dec_and_r(DisasContext *dc)
1599 TCGv t[2];
1600 int size = memsize_zz(dc);
1602 LOG_DIS("and.%c $r%u, $r%u\n",
1603 memsize_char(size), dc->op1, dc->op2);
1605 cris_cc_mask(dc, CC_MASK_NZ);
1607 cris_alu_alloc_temps(dc, size, t);
1608 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1609 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size);
1610 cris_alu_free_temps(dc, size, t);
1611 return 2;
1614 static int dec_lz_r(DisasContext *dc)
1616 TCGv t0;
1617 LOG_DIS("lz $r%u, $r%u\n",
1618 dc->op1, dc->op2);
1619 cris_cc_mask(dc, CC_MASK_NZ);
1620 t0 = tcg_temp_new();
1621 dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0);
1622 cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1623 tcg_temp_free(t0);
1624 return 2;
1627 static int dec_lsl_r(DisasContext *dc)
1629 TCGv t[2];
1630 int size = memsize_zz(dc);
1632 LOG_DIS("lsl.%c $r%u, $r%u\n",
1633 memsize_char(size), dc->op1, dc->op2);
1635 cris_cc_mask(dc, CC_MASK_NZ);
1636 cris_alu_alloc_temps(dc, size, t);
1637 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1638 tcg_gen_andi_tl(t[1], t[1], 63);
1639 cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size);
1640 cris_alu_alloc_temps(dc, size, t);
1641 return 2;
1644 static int dec_lsr_r(DisasContext *dc)
1646 TCGv t[2];
1647 int size = memsize_zz(dc);
1649 LOG_DIS("lsr.%c $r%u, $r%u\n",
1650 memsize_char(size), dc->op1, dc->op2);
1652 cris_cc_mask(dc, CC_MASK_NZ);
1653 cris_alu_alloc_temps(dc, size, t);
1654 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1655 tcg_gen_andi_tl(t[1], t[1], 63);
1656 cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size);
1657 cris_alu_free_temps(dc, size, t);
1658 return 2;
1661 static int dec_asr_r(DisasContext *dc)
1663 TCGv t[2];
1664 int size = memsize_zz(dc);
1666 LOG_DIS("asr.%c $r%u, $r%u\n",
1667 memsize_char(size), dc->op1, dc->op2);
1669 cris_cc_mask(dc, CC_MASK_NZ);
1670 cris_alu_alloc_temps(dc, size, t);
1671 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1672 tcg_gen_andi_tl(t[1], t[1], 63);
1673 cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size);
1674 cris_alu_free_temps(dc, size, t);
1675 return 2;
1678 static int dec_muls_r(DisasContext *dc)
1680 TCGv t[2];
1681 int size = memsize_zz(dc);
1683 LOG_DIS("muls.%c $r%u, $r%u\n",
1684 memsize_char(size), dc->op1, dc->op2);
1685 cris_cc_mask(dc, CC_MASK_NZV);
1686 cris_alu_alloc_temps(dc, size, t);
1687 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1689 cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4);
1690 cris_alu_free_temps(dc, size, t);
1691 return 2;
1694 static int dec_mulu_r(DisasContext *dc)
1696 TCGv t[2];
1697 int size = memsize_zz(dc);
1699 LOG_DIS("mulu.%c $r%u, $r%u\n",
1700 memsize_char(size), dc->op1, dc->op2);
1701 cris_cc_mask(dc, CC_MASK_NZV);
1702 cris_alu_alloc_temps(dc, size, t);
1703 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1705 cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4);
1706 cris_alu_alloc_temps(dc, size, t);
1707 return 2;
1711 static int dec_dstep_r(DisasContext *dc)
1713 LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
1714 cris_cc_mask(dc, CC_MASK_NZ);
1715 cris_alu(dc, CC_OP_DSTEP,
1716 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1717 return 2;
1720 static int dec_xor_r(DisasContext *dc)
1722 TCGv t[2];
1723 int size = memsize_zz(dc);
1724 LOG_DIS("xor.%c $r%u, $r%u\n",
1725 memsize_char(size), dc->op1, dc->op2);
1726 BUG_ON(size != 4); /* xor is dword. */
1727 cris_cc_mask(dc, CC_MASK_NZ);
1728 cris_alu_alloc_temps(dc, size, t);
1729 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1731 cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4);
1732 cris_alu_free_temps(dc, size, t);
1733 return 2;
1736 static int dec_bound_r(DisasContext *dc)
1738 TCGv l0;
1739 int size = memsize_zz(dc);
1740 LOG_DIS("bound.%c $r%u, $r%u\n",
1741 memsize_char(size), dc->op1, dc->op2);
1742 cris_cc_mask(dc, CC_MASK_NZ);
1743 l0 = tcg_temp_local_new();
1744 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
1745 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
1746 tcg_temp_free(l0);
1747 return 2;
1750 static int dec_cmp_r(DisasContext *dc)
1752 TCGv t[2];
1753 int size = memsize_zz(dc);
1754 LOG_DIS("cmp.%c $r%u, $r%u\n",
1755 memsize_char(size), dc->op1, dc->op2);
1756 cris_cc_mask(dc, CC_MASK_NZVC);
1757 cris_alu_alloc_temps(dc, size, t);
1758 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1760 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size);
1761 cris_alu_free_temps(dc, size, t);
1762 return 2;
1765 static int dec_abs_r(DisasContext *dc)
1767 TCGv t0;
1769 LOG_DIS("abs $r%u, $r%u\n",
1770 dc->op1, dc->op2);
1771 cris_cc_mask(dc, CC_MASK_NZ);
1773 t0 = tcg_temp_new();
1774 tcg_gen_sari_tl(t0, cpu_R[dc->op1], 31);
1775 tcg_gen_xor_tl(cpu_R[dc->op2], cpu_R[dc->op1], t0);
1776 tcg_gen_sub_tl(cpu_R[dc->op2], cpu_R[dc->op2], t0);
1777 tcg_temp_free(t0);
1779 cris_alu(dc, CC_OP_MOVE,
1780 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1781 return 2;
1784 static int dec_add_r(DisasContext *dc)
1786 TCGv t[2];
1787 int size = memsize_zz(dc);
1788 LOG_DIS("add.%c $r%u, $r%u\n",
1789 memsize_char(size), dc->op1, dc->op2);
1790 cris_cc_mask(dc, CC_MASK_NZVC);
1791 cris_alu_alloc_temps(dc, size, t);
1792 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1794 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size);
1795 cris_alu_free_temps(dc, size, t);
1796 return 2;
1799 static int dec_addc_r(DisasContext *dc)
1801 LOG_DIS("addc $r%u, $r%u\n",
1802 dc->op1, dc->op2);
1803 cris_evaluate_flags(dc);
1804 /* Set for this insn. */
1805 dc->flagx_known = 1;
1806 dc->flags_x = X_FLAG;
1808 cris_cc_mask(dc, CC_MASK_NZVC);
1809 cris_alu(dc, CC_OP_ADDC,
1810 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1811 return 2;
1814 static int dec_mcp_r(DisasContext *dc)
1816 LOG_DIS("mcp $p%u, $r%u\n",
1817 dc->op2, dc->op1);
1818 cris_evaluate_flags(dc);
1819 cris_cc_mask(dc, CC_MASK_RNZV);
1820 cris_alu(dc, CC_OP_MCP,
1821 cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4);
1822 return 2;
1825 #if DISAS_CRIS
1826 static char * swapmode_name(int mode, char *modename) {
1827 int i = 0;
1828 if (mode & 8)
1829 modename[i++] = 'n';
1830 if (mode & 4)
1831 modename[i++] = 'w';
1832 if (mode & 2)
1833 modename[i++] = 'b';
1834 if (mode & 1)
1835 modename[i++] = 'r';
1836 modename[i++] = 0;
1837 return modename;
1839 #endif
1841 static int dec_swap_r(DisasContext *dc)
1843 TCGv t0;
1844 #if DISAS_CRIS
1845 char modename[4];
1846 #endif
1847 LOG_DIS("swap%s $r%u\n",
1848 swapmode_name(dc->op2, modename), dc->op1);
1850 cris_cc_mask(dc, CC_MASK_NZ);
1851 t0 = tcg_temp_new();
1852 t_gen_mov_TN_reg(t0, dc->op1);
1853 if (dc->op2 & 8)
1854 tcg_gen_not_tl(t0, t0);
1855 if (dc->op2 & 4)
1856 t_gen_swapw(t0, t0);
1857 if (dc->op2 & 2)
1858 t_gen_swapb(t0, t0);
1859 if (dc->op2 & 1)
1860 t_gen_swapr(t0, t0);
1861 cris_alu(dc, CC_OP_MOVE,
1862 cpu_R[dc->op1], cpu_R[dc->op1], t0, 4);
1863 tcg_temp_free(t0);
1864 return 2;
1867 static int dec_or_r(DisasContext *dc)
1869 TCGv t[2];
1870 int size = memsize_zz(dc);
1871 LOG_DIS("or.%c $r%u, $r%u\n",
1872 memsize_char(size), dc->op1, dc->op2);
1873 cris_cc_mask(dc, CC_MASK_NZ);
1874 cris_alu_alloc_temps(dc, size, t);
1875 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1876 cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size);
1877 cris_alu_free_temps(dc, size, t);
1878 return 2;
1881 static int dec_addi_r(DisasContext *dc)
1883 TCGv t0;
1884 LOG_DIS("addi.%c $r%u, $r%u\n",
1885 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1886 cris_cc_mask(dc, 0);
1887 t0 = tcg_temp_new();
1888 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1889 tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0);
1890 tcg_temp_free(t0);
1891 return 2;
1894 static int dec_addi_acr(DisasContext *dc)
1896 TCGv t0;
1897 LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
1898 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1899 cris_cc_mask(dc, 0);
1900 t0 = tcg_temp_new();
1901 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1902 tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0);
1903 tcg_temp_free(t0);
1904 return 2;
1907 static int dec_neg_r(DisasContext *dc)
1909 TCGv t[2];
1910 int size = memsize_zz(dc);
1911 LOG_DIS("neg.%c $r%u, $r%u\n",
1912 memsize_char(size), dc->op1, dc->op2);
1913 cris_cc_mask(dc, CC_MASK_NZVC);
1914 cris_alu_alloc_temps(dc, size, t);
1915 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1917 cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size);
1918 cris_alu_free_temps(dc, size, t);
1919 return 2;
1922 static int dec_btst_r(DisasContext *dc)
1924 LOG_DIS("btst $r%u, $r%u\n",
1925 dc->op1, dc->op2);
1926 cris_cc_mask(dc, CC_MASK_NZ);
1927 cris_evaluate_flags(dc);
1928 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1929 cpu_R[dc->op1], cpu_PR[PR_CCS]);
1930 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
1931 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1932 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1933 dc->flags_uptodate = 1;
1934 return 2;
1937 static int dec_sub_r(DisasContext *dc)
1939 TCGv t[2];
1940 int size = memsize_zz(dc);
1941 LOG_DIS("sub.%c $r%u, $r%u\n",
1942 memsize_char(size), dc->op1, dc->op2);
1943 cris_cc_mask(dc, CC_MASK_NZVC);
1944 cris_alu_alloc_temps(dc, size, t);
1945 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1946 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size);
1947 cris_alu_free_temps(dc, size, t);
1948 return 2;
1951 /* Zero extension. From size to dword. */
1952 static int dec_movu_r(DisasContext *dc)
1954 TCGv t0;
1955 int size = memsize_z(dc);
1956 LOG_DIS("movu.%c $r%u, $r%u\n",
1957 memsize_char(size),
1958 dc->op1, dc->op2);
1960 cris_cc_mask(dc, CC_MASK_NZ);
1961 t0 = tcg_temp_new();
1962 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1963 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1964 tcg_temp_free(t0);
1965 return 2;
1968 /* Sign extension. From size to dword. */
1969 static int dec_movs_r(DisasContext *dc)
1971 TCGv t0;
1972 int size = memsize_z(dc);
1973 LOG_DIS("movs.%c $r%u, $r%u\n",
1974 memsize_char(size),
1975 dc->op1, dc->op2);
1977 cris_cc_mask(dc, CC_MASK_NZ);
1978 t0 = tcg_temp_new();
1979 /* Size can only be qi or hi. */
1980 t_gen_sext(t0, cpu_R[dc->op1], size);
1981 cris_alu(dc, CC_OP_MOVE,
1982 cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
1983 tcg_temp_free(t0);
1984 return 2;
1987 /* zero extension. From size to dword. */
1988 static int dec_addu_r(DisasContext *dc)
1990 TCGv t0;
1991 int size = memsize_z(dc);
1992 LOG_DIS("addu.%c $r%u, $r%u\n",
1993 memsize_char(size),
1994 dc->op1, dc->op2);
1996 cris_cc_mask(dc, CC_MASK_NZVC);
1997 t0 = tcg_temp_new();
1998 /* Size can only be qi or hi. */
1999 t_gen_zext(t0, cpu_R[dc->op1], size);
2000 cris_alu(dc, CC_OP_ADD,
2001 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2002 tcg_temp_free(t0);
2003 return 2;
2006 /* Sign extension. From size to dword. */
2007 static int dec_adds_r(DisasContext *dc)
2009 TCGv t0;
2010 int size = memsize_z(dc);
2011 LOG_DIS("adds.%c $r%u, $r%u\n",
2012 memsize_char(size),
2013 dc->op1, dc->op2);
2015 cris_cc_mask(dc, CC_MASK_NZVC);
2016 t0 = tcg_temp_new();
2017 /* Size can only be qi or hi. */
2018 t_gen_sext(t0, cpu_R[dc->op1], size);
2019 cris_alu(dc, CC_OP_ADD,
2020 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2021 tcg_temp_free(t0);
2022 return 2;
2025 /* Zero extension. From size to dword. */
2026 static int dec_subu_r(DisasContext *dc)
2028 TCGv t0;
2029 int size = memsize_z(dc);
2030 LOG_DIS("subu.%c $r%u, $r%u\n",
2031 memsize_char(size),
2032 dc->op1, dc->op2);
2034 cris_cc_mask(dc, CC_MASK_NZVC);
2035 t0 = tcg_temp_new();
2036 /* Size can only be qi or hi. */
2037 t_gen_zext(t0, cpu_R[dc->op1], size);
2038 cris_alu(dc, CC_OP_SUB,
2039 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2040 tcg_temp_free(t0);
2041 return 2;
2044 /* Sign extension. From size to dword. */
2045 static int dec_subs_r(DisasContext *dc)
2047 TCGv t0;
2048 int size = memsize_z(dc);
2049 LOG_DIS("subs.%c $r%u, $r%u\n",
2050 memsize_char(size),
2051 dc->op1, dc->op2);
2053 cris_cc_mask(dc, CC_MASK_NZVC);
2054 t0 = tcg_temp_new();
2055 /* Size can only be qi or hi. */
2056 t_gen_sext(t0, cpu_R[dc->op1], size);
2057 cris_alu(dc, CC_OP_SUB,
2058 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2059 tcg_temp_free(t0);
2060 return 2;
2063 static int dec_setclrf(DisasContext *dc)
2065 uint32_t flags;
2066 int set = (~dc->opcode >> 2) & 1;
2069 flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
2070 | EXTRACT_FIELD(dc->ir, 0, 3);
2071 if (set && flags == 0) {
2072 LOG_DIS("nop\n");
2073 return 2;
2074 } else if (!set && (flags & 0x20)) {
2075 LOG_DIS("di\n");
2077 else {
2078 LOG_DIS("%sf %x\n",
2079 set ? "set" : "clr",
2080 flags);
2083 /* User space is not allowed to touch these. Silently ignore. */
2084 if (dc->tb_flags & U_FLAG) {
2085 flags &= ~(S_FLAG | I_FLAG | U_FLAG);
2088 if (flags & X_FLAG) {
2089 dc->flagx_known = 1;
2090 if (set)
2091 dc->flags_x = X_FLAG;
2092 else
2093 dc->flags_x = 0;
2096 /* Break the TB if any of the SPI flag changes. */
2097 if (flags & (P_FLAG | S_FLAG)) {
2098 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2099 dc->is_jmp = DISAS_UPDATE;
2100 dc->cpustate_changed = 1;
2103 /* For the I flag, only act on posedge. */
2104 if ((flags & I_FLAG)) {
2105 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2106 dc->is_jmp = DISAS_UPDATE;
2107 dc->cpustate_changed = 1;
2111 /* Simply decode the flags. */
2112 cris_evaluate_flags (dc);
2113 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2114 cris_update_cc_x(dc);
2115 tcg_gen_movi_tl(cc_op, dc->cc_op);
2117 if (set) {
2118 if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
2119 /* Enter user mode. */
2120 t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
2121 tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
2122 dc->cpustate_changed = 1;
2124 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
2126 else
2127 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags);
2129 dc->flags_uptodate = 1;
2130 dc->clear_x = 0;
2131 return 2;
2134 static int dec_move_rs(DisasContext *dc)
2136 LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
2137 cris_cc_mask(dc, 0);
2138 gen_helper_movl_sreg_reg(tcg_const_tl(dc->op2), tcg_const_tl(dc->op1));
2139 return 2;
2141 static int dec_move_sr(DisasContext *dc)
2143 LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
2144 cris_cc_mask(dc, 0);
2145 gen_helper_movl_reg_sreg(tcg_const_tl(dc->op1), tcg_const_tl(dc->op2));
2146 return 2;
2149 static int dec_move_rp(DisasContext *dc)
2151 TCGv t[2];
2152 LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
2153 cris_cc_mask(dc, 0);
2155 t[0] = tcg_temp_new();
2156 if (dc->op2 == PR_CCS) {
2157 cris_evaluate_flags(dc);
2158 t_gen_mov_TN_reg(t[0], dc->op1);
2159 if (dc->tb_flags & U_FLAG) {
2160 t[1] = tcg_temp_new();
2161 /* User space is not allowed to touch all flags. */
2162 tcg_gen_andi_tl(t[0], t[0], 0x39f);
2163 tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f);
2164 tcg_gen_or_tl(t[0], t[1], t[0]);
2165 tcg_temp_free(t[1]);
2168 else
2169 t_gen_mov_TN_reg(t[0], dc->op1);
2171 t_gen_mov_preg_TN(dc, dc->op2, t[0]);
2172 if (dc->op2 == PR_CCS) {
2173 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2174 dc->flags_uptodate = 1;
2176 tcg_temp_free(t[0]);
2177 return 2;
2179 static int dec_move_pr(DisasContext *dc)
2181 TCGv t0;
2182 LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1);
2183 cris_cc_mask(dc, 0);
2185 if (dc->op2 == PR_CCS)
2186 cris_evaluate_flags(dc);
2188 if (dc->op2 == PR_DZ) {
2189 tcg_gen_movi_tl(cpu_R[dc->op1], 0);
2190 } else {
2191 t0 = tcg_temp_new();
2192 t_gen_mov_TN_preg(t0, dc->op2);
2193 cris_alu(dc, CC_OP_MOVE,
2194 cpu_R[dc->op1], cpu_R[dc->op1], t0,
2195 preg_sizes[dc->op2]);
2196 tcg_temp_free(t0);
2198 return 2;
2201 static int dec_move_mr(DisasContext *dc)
2203 int memsize = memsize_zz(dc);
2204 int insn_len;
2205 LOG_DIS("move.%c [$r%u%s, $r%u\n",
2206 memsize_char(memsize),
2207 dc->op1, dc->postinc ? "+]" : "]",
2208 dc->op2);
2210 if (memsize == 4) {
2211 insn_len = dec_prep_move_m(dc, 0, 4, cpu_R[dc->op2]);
2212 cris_cc_mask(dc, CC_MASK_NZ);
2213 cris_update_cc_op(dc, CC_OP_MOVE, 4);
2214 cris_update_cc_x(dc);
2215 cris_update_result(dc, cpu_R[dc->op2]);
2217 else {
2218 TCGv t0;
2220 t0 = tcg_temp_new();
2221 insn_len = dec_prep_move_m(dc, 0, memsize, t0);
2222 cris_cc_mask(dc, CC_MASK_NZ);
2223 cris_alu(dc, CC_OP_MOVE,
2224 cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
2225 tcg_temp_free(t0);
2227 do_postinc(dc, memsize);
2228 return insn_len;
2231 static inline void cris_alu_m_alloc_temps(TCGv *t)
2233 t[0] = tcg_temp_new();
2234 t[1] = tcg_temp_new();
2237 static inline void cris_alu_m_free_temps(TCGv *t)
2239 tcg_temp_free(t[0]);
2240 tcg_temp_free(t[1]);
2243 static int dec_movs_m(DisasContext *dc)
2245 TCGv t[2];
2246 int memsize = memsize_z(dc);
2247 int insn_len;
2248 LOG_DIS("movs.%c [$r%u%s, $r%u\n",
2249 memsize_char(memsize),
2250 dc->op1, dc->postinc ? "+]" : "]",
2251 dc->op2);
2253 cris_alu_m_alloc_temps(t);
2254 /* sign extend. */
2255 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2256 cris_cc_mask(dc, CC_MASK_NZ);
2257 cris_alu(dc, CC_OP_MOVE,
2258 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2259 do_postinc(dc, memsize);
2260 cris_alu_m_free_temps(t);
2261 return insn_len;
2264 static int dec_addu_m(DisasContext *dc)
2266 TCGv t[2];
2267 int memsize = memsize_z(dc);
2268 int insn_len;
2269 LOG_DIS("addu.%c [$r%u%s, $r%u\n",
2270 memsize_char(memsize),
2271 dc->op1, dc->postinc ? "+]" : "]",
2272 dc->op2);
2274 cris_alu_m_alloc_temps(t);
2275 /* sign extend. */
2276 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2277 cris_cc_mask(dc, CC_MASK_NZVC);
2278 cris_alu(dc, CC_OP_ADD,
2279 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2280 do_postinc(dc, memsize);
2281 cris_alu_m_free_temps(t);
2282 return insn_len;
2285 static int dec_adds_m(DisasContext *dc)
2287 TCGv t[2];
2288 int memsize = memsize_z(dc);
2289 int insn_len;
2290 LOG_DIS("adds.%c [$r%u%s, $r%u\n",
2291 memsize_char(memsize),
2292 dc->op1, dc->postinc ? "+]" : "]",
2293 dc->op2);
2295 cris_alu_m_alloc_temps(t);
2296 /* sign extend. */
2297 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2298 cris_cc_mask(dc, CC_MASK_NZVC);
2299 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2300 do_postinc(dc, memsize);
2301 cris_alu_m_free_temps(t);
2302 return insn_len;
2305 static int dec_subu_m(DisasContext *dc)
2307 TCGv t[2];
2308 int memsize = memsize_z(dc);
2309 int insn_len;
2310 LOG_DIS("subu.%c [$r%u%s, $r%u\n",
2311 memsize_char(memsize),
2312 dc->op1, dc->postinc ? "+]" : "]",
2313 dc->op2);
2315 cris_alu_m_alloc_temps(t);
2316 /* sign extend. */
2317 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2318 cris_cc_mask(dc, CC_MASK_NZVC);
2319 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2320 do_postinc(dc, memsize);
2321 cris_alu_m_free_temps(t);
2322 return insn_len;
2325 static int dec_subs_m(DisasContext *dc)
2327 TCGv t[2];
2328 int memsize = memsize_z(dc);
2329 int insn_len;
2330 LOG_DIS("subs.%c [$r%u%s, $r%u\n",
2331 memsize_char(memsize),
2332 dc->op1, dc->postinc ? "+]" : "]",
2333 dc->op2);
2335 cris_alu_m_alloc_temps(t);
2336 /* sign extend. */
2337 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2338 cris_cc_mask(dc, CC_MASK_NZVC);
2339 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2340 do_postinc(dc, memsize);
2341 cris_alu_m_free_temps(t);
2342 return insn_len;
2345 static int dec_movu_m(DisasContext *dc)
2347 TCGv t[2];
2348 int memsize = memsize_z(dc);
2349 int insn_len;
2351 LOG_DIS("movu.%c [$r%u%s, $r%u\n",
2352 memsize_char(memsize),
2353 dc->op1, dc->postinc ? "+]" : "]",
2354 dc->op2);
2356 cris_alu_m_alloc_temps(t);
2357 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2358 cris_cc_mask(dc, CC_MASK_NZ);
2359 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2360 do_postinc(dc, memsize);
2361 cris_alu_m_free_temps(t);
2362 return insn_len;
2365 static int dec_cmpu_m(DisasContext *dc)
2367 TCGv t[2];
2368 int memsize = memsize_z(dc);
2369 int insn_len;
2370 LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
2371 memsize_char(memsize),
2372 dc->op1, dc->postinc ? "+]" : "]",
2373 dc->op2);
2375 cris_alu_m_alloc_temps(t);
2376 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2377 cris_cc_mask(dc, CC_MASK_NZVC);
2378 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2379 do_postinc(dc, memsize);
2380 cris_alu_m_free_temps(t);
2381 return insn_len;
2384 static int dec_cmps_m(DisasContext *dc)
2386 TCGv t[2];
2387 int memsize = memsize_z(dc);
2388 int insn_len;
2389 LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
2390 memsize_char(memsize),
2391 dc->op1, dc->postinc ? "+]" : "]",
2392 dc->op2);
2394 cris_alu_m_alloc_temps(t);
2395 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2396 cris_cc_mask(dc, CC_MASK_NZVC);
2397 cris_alu(dc, CC_OP_CMP,
2398 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2399 memsize_zz(dc));
2400 do_postinc(dc, memsize);
2401 cris_alu_m_free_temps(t);
2402 return insn_len;
2405 static int dec_cmp_m(DisasContext *dc)
2407 TCGv t[2];
2408 int memsize = memsize_zz(dc);
2409 int insn_len;
2410 LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
2411 memsize_char(memsize),
2412 dc->op1, dc->postinc ? "+]" : "]",
2413 dc->op2);
2415 cris_alu_m_alloc_temps(t);
2416 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2417 cris_cc_mask(dc, CC_MASK_NZVC);
2418 cris_alu(dc, CC_OP_CMP,
2419 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2420 memsize_zz(dc));
2421 do_postinc(dc, memsize);
2422 cris_alu_m_free_temps(t);
2423 return insn_len;
2426 static int dec_test_m(DisasContext *dc)
2428 TCGv t[2];
2429 int memsize = memsize_zz(dc);
2430 int insn_len;
2431 LOG_DIS("test.%c [$r%u%s] op2=%x\n",
2432 memsize_char(memsize),
2433 dc->op1, dc->postinc ? "+]" : "]",
2434 dc->op2);
2436 cris_evaluate_flags(dc);
2438 cris_alu_m_alloc_temps(t);
2439 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2440 cris_cc_mask(dc, CC_MASK_NZ);
2441 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
2443 cris_alu(dc, CC_OP_CMP,
2444 cpu_R[dc->op2], t[1], tcg_const_tl(0), memsize_zz(dc));
2445 do_postinc(dc, memsize);
2446 cris_alu_m_free_temps(t);
2447 return insn_len;
2450 static int dec_and_m(DisasContext *dc)
2452 TCGv t[2];
2453 int memsize = memsize_zz(dc);
2454 int insn_len;
2455 LOG_DIS("and.%c [$r%u%s, $r%u\n",
2456 memsize_char(memsize),
2457 dc->op1, dc->postinc ? "+]" : "]",
2458 dc->op2);
2460 cris_alu_m_alloc_temps(t);
2461 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2462 cris_cc_mask(dc, CC_MASK_NZ);
2463 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2464 do_postinc(dc, memsize);
2465 cris_alu_m_free_temps(t);
2466 return insn_len;
2469 static int dec_add_m(DisasContext *dc)
2471 TCGv t[2];
2472 int memsize = memsize_zz(dc);
2473 int insn_len;
2474 LOG_DIS("add.%c [$r%u%s, $r%u\n",
2475 memsize_char(memsize),
2476 dc->op1, dc->postinc ? "+]" : "]",
2477 dc->op2);
2479 cris_alu_m_alloc_temps(t);
2480 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2481 cris_cc_mask(dc, CC_MASK_NZVC);
2482 cris_alu(dc, CC_OP_ADD,
2483 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2484 do_postinc(dc, memsize);
2485 cris_alu_m_free_temps(t);
2486 return insn_len;
2489 static int dec_addo_m(DisasContext *dc)
2491 TCGv t[2];
2492 int memsize = memsize_zz(dc);
2493 int insn_len;
2494 LOG_DIS("add.%c [$r%u%s, $r%u\n",
2495 memsize_char(memsize),
2496 dc->op1, dc->postinc ? "+]" : "]",
2497 dc->op2);
2499 cris_alu_m_alloc_temps(t);
2500 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2501 cris_cc_mask(dc, 0);
2502 cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
2503 do_postinc(dc, memsize);
2504 cris_alu_m_free_temps(t);
2505 return insn_len;
2508 static int dec_bound_m(DisasContext *dc)
2510 TCGv l[2];
2511 int memsize = memsize_zz(dc);
2512 int insn_len;
2513 LOG_DIS("bound.%c [$r%u%s, $r%u\n",
2514 memsize_char(memsize),
2515 dc->op1, dc->postinc ? "+]" : "]",
2516 dc->op2);
2518 l[0] = tcg_temp_local_new();
2519 l[1] = tcg_temp_local_new();
2520 insn_len = dec_prep_alu_m(dc, 0, memsize, l[0], l[1]);
2521 cris_cc_mask(dc, CC_MASK_NZ);
2522 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
2523 do_postinc(dc, memsize);
2524 tcg_temp_free(l[0]);
2525 tcg_temp_free(l[1]);
2526 return insn_len;
2529 static int dec_addc_mr(DisasContext *dc)
2531 TCGv t[2];
2532 int insn_len = 2;
2533 LOG_DIS("addc [$r%u%s, $r%u\n",
2534 dc->op1, dc->postinc ? "+]" : "]",
2535 dc->op2);
2537 cris_evaluate_flags(dc);
2539 /* Set for this insn. */
2540 dc->flagx_known = 1;
2541 dc->flags_x = X_FLAG;
2543 cris_alu_m_alloc_temps(t);
2544 insn_len = dec_prep_alu_m(dc, 0, 4, t[0], t[1]);
2545 cris_cc_mask(dc, CC_MASK_NZVC);
2546 cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
2547 do_postinc(dc, 4);
2548 cris_alu_m_free_temps(t);
2549 return insn_len;
2552 static int dec_sub_m(DisasContext *dc)
2554 TCGv t[2];
2555 int memsize = memsize_zz(dc);
2556 int insn_len;
2557 LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
2558 memsize_char(memsize),
2559 dc->op1, dc->postinc ? "+]" : "]",
2560 dc->op2, dc->ir, dc->zzsize);
2562 cris_alu_m_alloc_temps(t);
2563 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2564 cris_cc_mask(dc, CC_MASK_NZVC);
2565 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
2566 do_postinc(dc, memsize);
2567 cris_alu_m_free_temps(t);
2568 return insn_len;
2571 static int dec_or_m(DisasContext *dc)
2573 TCGv t[2];
2574 int memsize = memsize_zz(dc);
2575 int insn_len;
2576 LOG_DIS("or.%c [$r%u%s, $r%u pc=%x\n",
2577 memsize_char(memsize),
2578 dc->op1, dc->postinc ? "+]" : "]",
2579 dc->op2, dc->pc);
2581 cris_alu_m_alloc_temps(t);
2582 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2583 cris_cc_mask(dc, CC_MASK_NZ);
2584 cris_alu(dc, CC_OP_OR,
2585 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2586 do_postinc(dc, memsize);
2587 cris_alu_m_free_temps(t);
2588 return insn_len;
2591 static int dec_move_mp(DisasContext *dc)
2593 TCGv t[2];
2594 int memsize = memsize_zz(dc);
2595 int insn_len = 2;
2597 LOG_DIS("move.%c [$r%u%s, $p%u\n",
2598 memsize_char(memsize),
2599 dc->op1,
2600 dc->postinc ? "+]" : "]",
2601 dc->op2);
2603 cris_alu_m_alloc_temps(t);
2604 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2605 cris_cc_mask(dc, 0);
2606 if (dc->op2 == PR_CCS) {
2607 cris_evaluate_flags(dc);
2608 if (dc->tb_flags & U_FLAG) {
2609 /* User space is not allowed to touch all flags. */
2610 tcg_gen_andi_tl(t[1], t[1], 0x39f);
2611 tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f);
2612 tcg_gen_or_tl(t[1], t[0], t[1]);
2616 t_gen_mov_preg_TN(dc, dc->op2, t[1]);
2618 do_postinc(dc, memsize);
2619 cris_alu_m_free_temps(t);
2620 return insn_len;
2623 static int dec_move_pm(DisasContext *dc)
2625 TCGv t0;
2626 int memsize;
2628 memsize = preg_sizes[dc->op2];
2630 LOG_DIS("move.%c $p%u, [$r%u%s\n",
2631 memsize_char(memsize),
2632 dc->op2, dc->op1, dc->postinc ? "+]" : "]");
2634 /* prepare store. Address in T0, value in T1. */
2635 if (dc->op2 == PR_CCS)
2636 cris_evaluate_flags(dc);
2637 t0 = tcg_temp_new();
2638 t_gen_mov_TN_preg(t0, dc->op2);
2639 cris_flush_cc_state(dc);
2640 gen_store(dc, cpu_R[dc->op1], t0, memsize);
2641 tcg_temp_free(t0);
2643 cris_cc_mask(dc, 0);
2644 if (dc->postinc)
2645 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2646 return 2;
2649 static int dec_movem_mr(DisasContext *dc)
2651 TCGv_i64 tmp[16];
2652 TCGv tmp32;
2653 TCGv addr;
2654 int i;
2655 int nr = dc->op2 + 1;
2657 LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1,
2658 dc->postinc ? "+]" : "]", dc->op2);
2660 addr = tcg_temp_new();
2661 /* There are probably better ways of doing this. */
2662 cris_flush_cc_state(dc);
2663 for (i = 0; i < (nr >> 1); i++) {
2664 tmp[i] = tcg_temp_new_i64();
2665 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2666 gen_load64(dc, tmp[i], addr);
2668 if (nr & 1) {
2669 tmp32 = tcg_temp_new_i32();
2670 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2671 gen_load(dc, tmp32, addr, 4, 0);
2672 } else
2673 TCGV_UNUSED(tmp32);
2674 tcg_temp_free(addr);
2676 for (i = 0; i < (nr >> 1); i++) {
2677 tcg_gen_trunc_i64_i32(cpu_R[i * 2], tmp[i]);
2678 tcg_gen_shri_i64(tmp[i], tmp[i], 32);
2679 tcg_gen_trunc_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
2680 tcg_temp_free_i64(tmp[i]);
2682 if (nr & 1) {
2683 tcg_gen_mov_tl(cpu_R[dc->op2], tmp32);
2684 tcg_temp_free(tmp32);
2687 /* writeback the updated pointer value. */
2688 if (dc->postinc)
2689 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4);
2691 /* gen_load might want to evaluate the previous insns flags. */
2692 cris_cc_mask(dc, 0);
2693 return 2;
2696 static int dec_movem_rm(DisasContext *dc)
2698 TCGv tmp;
2699 TCGv addr;
2700 int i;
2702 LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1,
2703 dc->postinc ? "+]" : "]");
2705 cris_flush_cc_state(dc);
2707 tmp = tcg_temp_new();
2708 addr = tcg_temp_new();
2709 tcg_gen_movi_tl(tmp, 4);
2710 tcg_gen_mov_tl(addr, cpu_R[dc->op1]);
2711 for (i = 0; i <= dc->op2; i++) {
2712 /* Displace addr. */
2713 /* Perform the store. */
2714 gen_store(dc, addr, cpu_R[i], 4);
2715 tcg_gen_add_tl(addr, addr, tmp);
2717 if (dc->postinc)
2718 tcg_gen_mov_tl(cpu_R[dc->op1], addr);
2719 cris_cc_mask(dc, 0);
2720 tcg_temp_free(tmp);
2721 tcg_temp_free(addr);
2722 return 2;
2725 static int dec_move_rm(DisasContext *dc)
2727 int memsize;
2729 memsize = memsize_zz(dc);
2731 LOG_DIS("move.%c $r%u, [$r%u]\n",
2732 memsize_char(memsize), dc->op2, dc->op1);
2734 /* prepare store. */
2735 cris_flush_cc_state(dc);
2736 gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize);
2738 if (dc->postinc)
2739 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2740 cris_cc_mask(dc, 0);
2741 return 2;
2744 static int dec_lapcq(DisasContext *dc)
2746 LOG_DIS("lapcq %x, $r%u\n",
2747 dc->pc + dc->op1*2, dc->op2);
2748 cris_cc_mask(dc, 0);
2749 tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2);
2750 return 2;
2753 static int dec_lapc_im(DisasContext *dc)
2755 unsigned int rd;
2756 int32_t imm;
2757 int32_t pc;
2759 rd = dc->op2;
2761 cris_cc_mask(dc, 0);
2762 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2763 LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
2765 pc = dc->pc;
2766 pc += imm;
2767 tcg_gen_movi_tl(cpu_R[rd], pc);
2768 return 6;
2771 /* Jump to special reg. */
2772 static int dec_jump_p(DisasContext *dc)
2774 LOG_DIS("jump $p%u\n", dc->op2);
2776 if (dc->op2 == PR_CCS)
2777 cris_evaluate_flags(dc);
2778 t_gen_mov_TN_preg(env_btarget, dc->op2);
2779 /* rete will often have low bit set to indicate delayslot. */
2780 tcg_gen_andi_tl(env_btarget, env_btarget, ~1);
2781 cris_cc_mask(dc, 0);
2782 cris_prepare_jmp(dc, JMP_INDIRECT);
2783 return 2;
2786 /* Jump and save. */
2787 static int dec_jas_r(DisasContext *dc)
2789 LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
2790 cris_cc_mask(dc, 0);
2791 /* Store the return address in Pd. */
2792 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2793 if (dc->op2 > 15)
2794 abort();
2795 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4));
2797 cris_prepare_jmp(dc, JMP_INDIRECT);
2798 return 2;
2801 static int dec_jas_im(DisasContext *dc)
2803 uint32_t imm;
2805 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2807 LOG_DIS("jas 0x%x\n", imm);
2808 cris_cc_mask(dc, 0);
2809 /* Store the return address in Pd. */
2810 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2812 dc->jmp_pc = imm;
2813 cris_prepare_jmp(dc, JMP_DIRECT);
2814 return 6;
2817 static int dec_jasc_im(DisasContext *dc)
2819 uint32_t imm;
2821 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2823 LOG_DIS("jasc 0x%x\n", imm);
2824 cris_cc_mask(dc, 0);
2825 /* Store the return address in Pd. */
2826 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8 + 4));
2828 dc->jmp_pc = imm;
2829 cris_prepare_jmp(dc, JMP_DIRECT);
2830 return 6;
2833 static int dec_jasc_r(DisasContext *dc)
2835 LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
2836 cris_cc_mask(dc, 0);
2837 /* Store the return address in Pd. */
2838 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2839 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4 + 4));
2840 cris_prepare_jmp(dc, JMP_INDIRECT);
2841 return 2;
2844 static int dec_bcc_im(DisasContext *dc)
2846 int32_t offset;
2847 uint32_t cond = dc->op2;
2849 offset = cris_fetch(dc, dc->pc + 2, 2, 1);
2851 LOG_DIS("b%s %d pc=%x dst=%x\n",
2852 cc_name(cond), offset,
2853 dc->pc, dc->pc + offset);
2855 cris_cc_mask(dc, 0);
2856 /* op2 holds the condition-code. */
2857 cris_prepare_cc_branch (dc, offset, cond);
2858 return 4;
2861 static int dec_bas_im(DisasContext *dc)
2863 int32_t simm;
2866 simm = cris_fetch(dc, dc->pc + 2, 4, 0);
2868 LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2869 cris_cc_mask(dc, 0);
2870 /* Store the return address in Pd. */
2871 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2873 dc->jmp_pc = dc->pc + simm;
2874 cris_prepare_jmp(dc, JMP_DIRECT);
2875 return 6;
2878 static int dec_basc_im(DisasContext *dc)
2880 int32_t simm;
2881 simm = cris_fetch(dc, dc->pc + 2, 4, 0);
2883 LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2884 cris_cc_mask(dc, 0);
2885 /* Store the return address in Pd. */
2886 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 12));
2888 dc->jmp_pc = dc->pc + simm;
2889 cris_prepare_jmp(dc, JMP_DIRECT);
2890 return 6;
2893 static int dec_rfe_etc(DisasContext *dc)
2895 cris_cc_mask(dc, 0);
2897 if (dc->op2 == 15) {
2898 t_gen_mov_env_TN(halted, tcg_const_tl(1));
2899 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2900 t_gen_raise_exception(EXCP_HLT);
2901 return 2;
2904 switch (dc->op2 & 7) {
2905 case 2:
2906 /* rfe. */
2907 LOG_DIS("rfe\n");
2908 cris_evaluate_flags(dc);
2909 gen_helper_rfe();
2910 dc->is_jmp = DISAS_UPDATE;
2911 break;
2912 case 5:
2913 /* rfn. */
2914 LOG_DIS("rfn\n");
2915 cris_evaluate_flags(dc);
2916 gen_helper_rfn();
2917 dc->is_jmp = DISAS_UPDATE;
2918 break;
2919 case 6:
2920 LOG_DIS("break %d\n", dc->op1);
2921 cris_evaluate_flags (dc);
2922 /* break. */
2923 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2925 /* Breaks start at 16 in the exception vector. */
2926 t_gen_mov_env_TN(trap_vector,
2927 tcg_const_tl(dc->op1 + 16));
2928 t_gen_raise_exception(EXCP_BREAK);
2929 dc->is_jmp = DISAS_UPDATE;
2930 break;
2931 default:
2932 printf ("op2=%x\n", dc->op2);
2933 BUG();
2934 break;
2937 return 2;
2940 static int dec_ftag_fidx_d_m(DisasContext *dc)
2942 return 2;
2945 static int dec_ftag_fidx_i_m(DisasContext *dc)
2947 return 2;
2950 static int dec_null(DisasContext *dc)
2952 printf ("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
2953 dc->pc, dc->opcode, dc->op1, dc->op2);
2954 fflush(NULL);
2955 BUG();
2956 return 2;
2959 static struct decoder_info {
2960 struct {
2961 uint32_t bits;
2962 uint32_t mask;
2964 int (*dec)(DisasContext *dc);
2965 } decinfo[] = {
2966 /* Order matters here. */
2967 {DEC_MOVEQ, dec_moveq},
2968 {DEC_BTSTQ, dec_btstq},
2969 {DEC_CMPQ, dec_cmpq},
2970 {DEC_ADDOQ, dec_addoq},
2971 {DEC_ADDQ, dec_addq},
2972 {DEC_SUBQ, dec_subq},
2973 {DEC_ANDQ, dec_andq},
2974 {DEC_ORQ, dec_orq},
2975 {DEC_ASRQ, dec_asrq},
2976 {DEC_LSLQ, dec_lslq},
2977 {DEC_LSRQ, dec_lsrq},
2978 {DEC_BCCQ, dec_bccq},
2980 {DEC_BCC_IM, dec_bcc_im},
2981 {DEC_JAS_IM, dec_jas_im},
2982 {DEC_JAS_R, dec_jas_r},
2983 {DEC_JASC_IM, dec_jasc_im},
2984 {DEC_JASC_R, dec_jasc_r},
2985 {DEC_BAS_IM, dec_bas_im},
2986 {DEC_BASC_IM, dec_basc_im},
2987 {DEC_JUMP_P, dec_jump_p},
2988 {DEC_LAPC_IM, dec_lapc_im},
2989 {DEC_LAPCQ, dec_lapcq},
2991 {DEC_RFE_ETC, dec_rfe_etc},
2992 {DEC_ADDC_MR, dec_addc_mr},
2994 {DEC_MOVE_MP, dec_move_mp},
2995 {DEC_MOVE_PM, dec_move_pm},
2996 {DEC_MOVEM_MR, dec_movem_mr},
2997 {DEC_MOVEM_RM, dec_movem_rm},
2998 {DEC_MOVE_PR, dec_move_pr},
2999 {DEC_SCC_R, dec_scc_r},
3000 {DEC_SETF, dec_setclrf},
3001 {DEC_CLEARF, dec_setclrf},
3003 {DEC_MOVE_SR, dec_move_sr},
3004 {DEC_MOVE_RP, dec_move_rp},
3005 {DEC_SWAP_R, dec_swap_r},
3006 {DEC_ABS_R, dec_abs_r},
3007 {DEC_LZ_R, dec_lz_r},
3008 {DEC_MOVE_RS, dec_move_rs},
3009 {DEC_BTST_R, dec_btst_r},
3010 {DEC_ADDC_R, dec_addc_r},
3012 {DEC_DSTEP_R, dec_dstep_r},
3013 {DEC_XOR_R, dec_xor_r},
3014 {DEC_MCP_R, dec_mcp_r},
3015 {DEC_CMP_R, dec_cmp_r},
3017 {DEC_ADDI_R, dec_addi_r},
3018 {DEC_ADDI_ACR, dec_addi_acr},
3020 {DEC_ADD_R, dec_add_r},
3021 {DEC_SUB_R, dec_sub_r},
3023 {DEC_ADDU_R, dec_addu_r},
3024 {DEC_ADDS_R, dec_adds_r},
3025 {DEC_SUBU_R, dec_subu_r},
3026 {DEC_SUBS_R, dec_subs_r},
3027 {DEC_LSL_R, dec_lsl_r},
3029 {DEC_AND_R, dec_and_r},
3030 {DEC_OR_R, dec_or_r},
3031 {DEC_BOUND_R, dec_bound_r},
3032 {DEC_ASR_R, dec_asr_r},
3033 {DEC_LSR_R, dec_lsr_r},
3035 {DEC_MOVU_R, dec_movu_r},
3036 {DEC_MOVS_R, dec_movs_r},
3037 {DEC_NEG_R, dec_neg_r},
3038 {DEC_MOVE_R, dec_move_r},
3040 {DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m},
3041 {DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m},
3043 {DEC_MULS_R, dec_muls_r},
3044 {DEC_MULU_R, dec_mulu_r},
3046 {DEC_ADDU_M, dec_addu_m},
3047 {DEC_ADDS_M, dec_adds_m},
3048 {DEC_SUBU_M, dec_subu_m},
3049 {DEC_SUBS_M, dec_subs_m},
3051 {DEC_CMPU_M, dec_cmpu_m},
3052 {DEC_CMPS_M, dec_cmps_m},
3053 {DEC_MOVU_M, dec_movu_m},
3054 {DEC_MOVS_M, dec_movs_m},
3056 {DEC_CMP_M, dec_cmp_m},
3057 {DEC_ADDO_M, dec_addo_m},
3058 {DEC_BOUND_M, dec_bound_m},
3059 {DEC_ADD_M, dec_add_m},
3060 {DEC_SUB_M, dec_sub_m},
3061 {DEC_AND_M, dec_and_m},
3062 {DEC_OR_M, dec_or_m},
3063 {DEC_MOVE_RM, dec_move_rm},
3064 {DEC_TEST_M, dec_test_m},
3065 {DEC_MOVE_MR, dec_move_mr},
3067 {{0, 0}, dec_null}
3070 static unsigned int crisv32_decoder(DisasContext *dc)
3072 int insn_len = 2;
3073 int i;
3075 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
3076 tcg_gen_debug_insn_start(dc->pc);
3078 /* Load a halfword onto the instruction register. */
3079 dc->ir = cris_fetch(dc, dc->pc, 2, 0);
3081 /* Now decode it. */
3082 dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
3083 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3);
3084 dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15);
3085 dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4);
3086 dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5);
3087 dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
3089 /* Large switch for all insns. */
3090 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
3091 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits)
3093 insn_len = decinfo[i].dec(dc);
3094 break;
3098 #if !defined(CONFIG_USER_ONLY)
3099 /* Single-stepping ? */
3100 if (dc->tb_flags & S_FLAG) {
3101 int l1;
3103 l1 = gen_new_label();
3104 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1);
3105 /* We treat SPC as a break with an odd trap vector. */
3106 cris_evaluate_flags (dc);
3107 t_gen_mov_env_TN(trap_vector, tcg_const_tl(3));
3108 tcg_gen_movi_tl(env_pc, dc->pc + insn_len);
3109 tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len);
3110 t_gen_raise_exception(EXCP_BREAK);
3111 gen_set_label(l1);
3113 #endif
3114 return insn_len;
3117 static void check_breakpoint(CPUCRISState *env, DisasContext *dc)
3119 CPUBreakpoint *bp;
3121 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3122 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3123 if (bp->pc == dc->pc) {
3124 cris_evaluate_flags (dc);
3125 tcg_gen_movi_tl(env_pc, dc->pc);
3126 t_gen_raise_exception(EXCP_DEBUG);
3127 dc->is_jmp = DISAS_UPDATE;
3133 #include "translate_v10.c"
3136 * Delay slots on QEMU/CRIS.
3138 * If an exception hits on a delayslot, the core will let ERP (the Exception
3139 * Return Pointer) point to the branch (the previous) insn and set the lsb to
3140 * to give SW a hint that the exception actually hit on the dslot.
3142 * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
3143 * the core and any jmp to an odd addresses will mask off that lsb. It is
3144 * simply there to let sw know there was an exception on a dslot.
3146 * When the software returns from an exception, the branch will re-execute.
3147 * On QEMU care needs to be taken when a branch+delayslot sequence is broken
3148 * and the branch and delayslot dont share pages.
3150 * The TB contaning the branch insn will set up env->btarget and evaluate
3151 * env->btaken. When the translation loop exits we will note that the branch
3152 * sequence is broken and let env->dslot be the size of the branch insn (those
3153 * vary in length).
3155 * The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
3156 * set). It will also expect to have env->dslot setup with the size of the
3157 * delay slot so that env->pc - env->dslot point to the branch insn. This TB
3158 * will execute the dslot and take the branch, either to btarget or just one
3159 * insn ahead.
3161 * When exceptions occur, we check for env->dslot in do_interrupt to detect
3162 * broken branch sequences and setup $erp accordingly (i.e let it point to the
3163 * branch and set lsb). Then env->dslot gets cleared so that the exception
3164 * handler can enter. When returning from exceptions (jump $erp) the lsb gets
3165 * masked off and we will reexecute the branch insn.
3169 /* generate intermediate code for basic block 'tb'. */
3170 static void
3171 gen_intermediate_code_internal(CPUCRISState *env, TranslationBlock *tb,
3172 int search_pc)
3174 uint16_t *gen_opc_end;
3175 uint32_t pc_start;
3176 unsigned int insn_len;
3177 int j, lj;
3178 struct DisasContext ctx;
3179 struct DisasContext *dc = &ctx;
3180 uint32_t next_page_start;
3181 target_ulong npc;
3182 int num_insns;
3183 int max_insns;
3185 qemu_log_try_set_file(stderr);
3187 if (env->pregs[PR_VR] == 32) {
3188 dc->decoder = crisv32_decoder;
3189 dc->clear_locked_irq = 0;
3190 } else {
3191 dc->decoder = crisv10_decoder;
3192 dc->clear_locked_irq = 1;
3195 /* Odd PC indicates that branch is rexecuting due to exception in the
3196 * delayslot, like in real hw.
3198 pc_start = tb->pc & ~1;
3199 dc->env = env;
3200 dc->tb = tb;
3202 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3204 dc->is_jmp = DISAS_NEXT;
3205 dc->ppc = pc_start;
3206 dc->pc = pc_start;
3207 dc->singlestep_enabled = env->singlestep_enabled;
3208 dc->flags_uptodate = 1;
3209 dc->flagx_known = 1;
3210 dc->flags_x = tb->flags & X_FLAG;
3211 dc->cc_x_uptodate = 0;
3212 dc->cc_mask = 0;
3213 dc->update_cc = 0;
3214 dc->clear_prefix = 0;
3216 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
3217 dc->cc_size_uptodate = -1;
3219 /* Decode TB flags. */
3220 dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG \
3221 | X_FLAG | PFIX_FLAG);
3222 dc->delayed_branch = !!(tb->flags & 7);
3223 if (dc->delayed_branch)
3224 dc->jmp = JMP_INDIRECT;
3225 else
3226 dc->jmp = JMP_NOJMP;
3228 dc->cpustate_changed = 0;
3230 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3231 qemu_log(
3232 "srch=%d pc=%x %x flg=%" PRIx64 " bt=%x ds=%u ccs=%x\n"
3233 "pid=%x usp=%x\n"
3234 "%x.%x.%x.%x\n"
3235 "%x.%x.%x.%x\n"
3236 "%x.%x.%x.%x\n"
3237 "%x.%x.%x.%x\n",
3238 search_pc, dc->pc, dc->ppc,
3239 (uint64_t)tb->flags,
3240 env->btarget, (unsigned)tb->flags & 7,
3241 env->pregs[PR_CCS],
3242 env->pregs[PR_PID], env->pregs[PR_USP],
3243 env->regs[0], env->regs[1], env->regs[2], env->regs[3],
3244 env->regs[4], env->regs[5], env->regs[6], env->regs[7],
3245 env->regs[8], env->regs[9],
3246 env->regs[10], env->regs[11],
3247 env->regs[12], env->regs[13],
3248 env->regs[14], env->regs[15]);
3249 qemu_log("--------------\n");
3250 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3253 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3254 lj = -1;
3255 num_insns = 0;
3256 max_insns = tb->cflags & CF_COUNT_MASK;
3257 if (max_insns == 0)
3258 max_insns = CF_COUNT_MASK;
3260 gen_icount_start();
3263 check_breakpoint(env, dc);
3265 if (search_pc) {
3266 j = gen_opc_ptr - gen_opc_buf;
3267 if (lj < j) {
3268 lj++;
3269 while (lj < j)
3270 gen_opc_instr_start[lj++] = 0;
3272 if (dc->delayed_branch == 1)
3273 gen_opc_pc[lj] = dc->ppc | 1;
3274 else
3275 gen_opc_pc[lj] = dc->pc;
3276 gen_opc_instr_start[lj] = 1;
3277 gen_opc_icount[lj] = num_insns;
3280 /* Pretty disas. */
3281 LOG_DIS("%8.8x:\t", dc->pc);
3283 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3284 gen_io_start();
3285 dc->clear_x = 1;
3287 insn_len = dc->decoder(dc);
3288 dc->ppc = dc->pc;
3289 dc->pc += insn_len;
3290 if (dc->clear_x)
3291 cris_clear_x_flag(dc);
3293 num_insns++;
3294 /* Check for delayed branches here. If we do it before
3295 actually generating any host code, the simulator will just
3296 loop doing nothing for on this program location. */
3297 if (dc->delayed_branch) {
3298 dc->delayed_branch--;
3299 if (dc->delayed_branch == 0)
3301 if (tb->flags & 7)
3302 t_gen_mov_env_TN(dslot,
3303 tcg_const_tl(0));
3304 if (dc->cpustate_changed || !dc->flagx_known
3305 || (dc->flags_x != (tb->flags & X_FLAG))) {
3306 cris_store_direct_jmp(dc);
3309 if (dc->clear_locked_irq) {
3310 dc->clear_locked_irq = 0;
3311 t_gen_mov_env_TN(locked_irq,
3312 tcg_const_tl(0));
3315 if (dc->jmp == JMP_DIRECT_CC) {
3316 int l1;
3318 l1 = gen_new_label();
3319 cris_evaluate_flags(dc);
3321 /* Conditional jmp. */
3322 tcg_gen_brcondi_tl(TCG_COND_EQ,
3323 env_btaken, 0, l1);
3324 gen_goto_tb(dc, 1, dc->jmp_pc);
3325 gen_set_label(l1);
3326 gen_goto_tb(dc, 0, dc->pc);
3327 dc->is_jmp = DISAS_TB_JUMP;
3328 dc->jmp = JMP_NOJMP;
3329 } else if (dc->jmp == JMP_DIRECT) {
3330 cris_evaluate_flags(dc);
3331 gen_goto_tb(dc, 0, dc->jmp_pc);
3332 dc->is_jmp = DISAS_TB_JUMP;
3333 dc->jmp = JMP_NOJMP;
3334 } else {
3335 t_gen_cc_jmp(env_btarget,
3336 tcg_const_tl(dc->pc));
3337 dc->is_jmp = DISAS_JUMP;
3339 break;
3343 /* If we are rexecuting a branch due to exceptions on
3344 delay slots dont break. */
3345 if (!(tb->pc & 1) && env->singlestep_enabled)
3346 break;
3347 } while (!dc->is_jmp && !dc->cpustate_changed
3348 && gen_opc_ptr < gen_opc_end
3349 && !singlestep
3350 && (dc->pc < next_page_start)
3351 && num_insns < max_insns);
3353 if (dc->clear_locked_irq)
3354 t_gen_mov_env_TN(locked_irq, tcg_const_tl(0));
3356 npc = dc->pc;
3358 if (tb->cflags & CF_LAST_IO)
3359 gen_io_end();
3360 /* Force an update if the per-tb cpu state has changed. */
3361 if (dc->is_jmp == DISAS_NEXT
3362 && (dc->cpustate_changed || !dc->flagx_known
3363 || (dc->flags_x != (tb->flags & X_FLAG)))) {
3364 dc->is_jmp = DISAS_UPDATE;
3365 tcg_gen_movi_tl(env_pc, npc);
3367 /* Broken branch+delayslot sequence. */
3368 if (dc->delayed_branch == 1) {
3369 /* Set env->dslot to the size of the branch insn. */
3370 t_gen_mov_env_TN(dslot, tcg_const_tl(dc->pc - dc->ppc));
3371 cris_store_direct_jmp(dc);
3374 cris_evaluate_flags (dc);
3376 if (unlikely(env->singlestep_enabled)) {
3377 if (dc->is_jmp == DISAS_NEXT)
3378 tcg_gen_movi_tl(env_pc, npc);
3379 t_gen_raise_exception(EXCP_DEBUG);
3380 } else {
3381 switch(dc->is_jmp) {
3382 case DISAS_NEXT:
3383 gen_goto_tb(dc, 1, npc);
3384 break;
3385 default:
3386 case DISAS_JUMP:
3387 case DISAS_UPDATE:
3388 /* indicate that the hash table must be used
3389 to find the next TB */
3390 tcg_gen_exit_tb(0);
3391 break;
3392 case DISAS_SWI:
3393 case DISAS_TB_JUMP:
3394 /* nothing more to generate */
3395 break;
3398 gen_icount_end(tb, num_insns);
3399 *gen_opc_ptr = INDEX_op_end;
3400 if (search_pc) {
3401 j = gen_opc_ptr - gen_opc_buf;
3402 lj++;
3403 while (lj <= j)
3404 gen_opc_instr_start[lj++] = 0;
3405 } else {
3406 tb->size = dc->pc - pc_start;
3407 tb->icount = num_insns;
3410 #ifdef DEBUG_DISAS
3411 #if !DISAS_CRIS
3412 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3413 log_target_disas(pc_start, dc->pc - pc_start,
3414 dc->env->pregs[PR_VR]);
3415 qemu_log("\nisize=%d osize=%td\n",
3416 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
3418 #endif
3419 #endif
3422 void gen_intermediate_code (CPUCRISState *env, struct TranslationBlock *tb)
3424 gen_intermediate_code_internal(env, tb, 0);
3427 void gen_intermediate_code_pc (CPUCRISState *env, struct TranslationBlock *tb)
3429 gen_intermediate_code_internal(env, tb, 1);
3432 void cpu_dump_state (CPUCRISState *env, FILE *f, fprintf_function cpu_fprintf,
3433 int flags)
3435 int i;
3436 uint32_t srs;
3438 if (!env || !f)
3439 return;
3441 cpu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n"
3442 "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
3443 env->pc, env->pregs[PR_CCS], env->btaken, env->btarget,
3444 env->cc_op,
3445 env->cc_src, env->cc_dest, env->cc_result, env->cc_mask);
3448 for (i = 0; i < 16; i++) {
3449 cpu_fprintf(f, "%s=%8.8x ",regnames[i], env->regs[i]);
3450 if ((i + 1) % 4 == 0)
3451 cpu_fprintf(f, "\n");
3453 cpu_fprintf(f, "\nspecial regs:\n");
3454 for (i = 0; i < 16; i++) {
3455 cpu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]);
3456 if ((i + 1) % 4 == 0)
3457 cpu_fprintf(f, "\n");
3459 srs = env->pregs[PR_SRS];
3460 cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs);
3461 if (srs < 256) {
3462 for (i = 0; i < 16; i++) {
3463 cpu_fprintf(f, "s%2.2d=%8.8x ",
3464 i, env->sregs[srs][i]);
3465 if ((i + 1) % 4 == 0)
3466 cpu_fprintf(f, "\n");
3469 cpu_fprintf(f, "\n\n");
3473 struct
3475 uint32_t vr;
3476 const char *name;
3477 } cris_cores[] = {
3478 {8, "crisv8"},
3479 {9, "crisv9"},
3480 {10, "crisv10"},
3481 {11, "crisv11"},
3482 {32, "crisv32"},
3485 void cris_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3487 unsigned int i;
3489 (*cpu_fprintf)(f, "Available CPUs:\n");
3490 for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3491 (*cpu_fprintf)(f, " %s\n", cris_cores[i].name);
3495 static uint32_t vr_by_name(const char *name)
3497 unsigned int i;
3498 for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3499 if (strcmp(name, cris_cores[i].name) == 0) {
3500 return cris_cores[i].vr;
3503 return 32;
3506 CPUCRISState *cpu_cris_init (const char *cpu_model)
3508 CPUCRISState *env;
3509 static int tcg_initialized = 0;
3510 int i;
3512 env = g_malloc0(sizeof(CPUCRISState));
3514 env->pregs[PR_VR] = vr_by_name(cpu_model);
3515 cpu_exec_init(env);
3516 cpu_state_reset(env);
3517 qemu_init_vcpu(env);
3519 if (tcg_initialized)
3520 return env;
3522 tcg_initialized = 1;
3524 #define GEN_HELPER 2
3525 #include "helper.h"
3527 if (env->pregs[PR_VR] < 32) {
3528 cpu_crisv10_init(env);
3529 return env;
3533 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
3534 cc_x = tcg_global_mem_new(TCG_AREG0,
3535 offsetof(CPUCRISState, cc_x), "cc_x");
3536 cc_src = tcg_global_mem_new(TCG_AREG0,
3537 offsetof(CPUCRISState, cc_src), "cc_src");
3538 cc_dest = tcg_global_mem_new(TCG_AREG0,
3539 offsetof(CPUCRISState, cc_dest),
3540 "cc_dest");
3541 cc_result = tcg_global_mem_new(TCG_AREG0,
3542 offsetof(CPUCRISState, cc_result),
3543 "cc_result");
3544 cc_op = tcg_global_mem_new(TCG_AREG0,
3545 offsetof(CPUCRISState, cc_op), "cc_op");
3546 cc_size = tcg_global_mem_new(TCG_AREG0,
3547 offsetof(CPUCRISState, cc_size),
3548 "cc_size");
3549 cc_mask = tcg_global_mem_new(TCG_AREG0,
3550 offsetof(CPUCRISState, cc_mask),
3551 "cc_mask");
3553 env_pc = tcg_global_mem_new(TCG_AREG0,
3554 offsetof(CPUCRISState, pc),
3555 "pc");
3556 env_btarget = tcg_global_mem_new(TCG_AREG0,
3557 offsetof(CPUCRISState, btarget),
3558 "btarget");
3559 env_btaken = tcg_global_mem_new(TCG_AREG0,
3560 offsetof(CPUCRISState, btaken),
3561 "btaken");
3562 for (i = 0; i < 16; i++) {
3563 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
3564 offsetof(CPUCRISState, regs[i]),
3565 regnames[i]);
3567 for (i = 0; i < 16; i++) {
3568 cpu_PR[i] = tcg_global_mem_new(TCG_AREG0,
3569 offsetof(CPUCRISState, pregs[i]),
3570 pregnames[i]);
3573 return env;
3576 void cpu_state_reset(CPUCRISState *env)
3578 uint32_t vr;
3580 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
3581 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
3582 log_cpu_state(env, 0);
3585 vr = env->pregs[PR_VR];
3586 memset(env, 0, offsetof(CPUCRISState, breakpoints));
3587 env->pregs[PR_VR] = vr;
3588 tlb_flush(env, 1);
3590 #if defined(CONFIG_USER_ONLY)
3591 /* start in user mode with interrupts enabled. */
3592 env->pregs[PR_CCS] |= U_FLAG | I_FLAG | P_FLAG;
3593 #else
3594 cris_mmu_init(env);
3595 env->pregs[PR_CCS] = 0;
3596 #endif
3599 void restore_state_to_opc(CPUCRISState *env, TranslationBlock *tb, int pc_pos)
3601 env->pc = gen_opc_pc[pc_pos];