virtio-pci: compile per-target
[qemu/aliguori-queue.git] / target-cris / translate.c
blobf8baa88c188dcf22d5e37a32e8a7cb59a5de280f
1 /*
2 * CRIS emulation for qemu: main translation routines.
4 * Copyright (c) 2008 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 * FIXME:
23 * The condition code translation is in need of attention.
26 #include <stdarg.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <string.h>
30 #include <inttypes.h>
32 #include "cpu.h"
33 #include "exec-all.h"
34 #include "disas.h"
35 #include "tcg-op.h"
36 #include "helper.h"
37 #include "mmu.h"
38 #include "crisv32-decode.h"
39 #include "qemu-common.h"
41 #define GEN_HELPER 1
42 #include "helper.h"
44 #define DISAS_CRIS 0
45 #if DISAS_CRIS
46 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
47 #else
48 # define LOG_DIS(...) do { } while (0)
49 #endif
51 #define D(x)
52 #define BUG() (gen_BUG(dc, __FILE__, __LINE__))
53 #define BUG_ON(x) ({if (x) BUG();})
55 #define DISAS_SWI 5
57 /* Used by the decoder. */
58 #define EXTRACT_FIELD(src, start, end) \
59 (((src) >> start) & ((1 << (end - start + 1)) - 1))
61 #define CC_MASK_NZ 0xc
62 #define CC_MASK_NZV 0xe
63 #define CC_MASK_NZVC 0xf
64 #define CC_MASK_RNZV 0x10e
66 static TCGv_ptr cpu_env;
67 static TCGv cpu_R[16];
68 static TCGv cpu_PR[16];
69 static TCGv cc_x;
70 static TCGv cc_src;
71 static TCGv cc_dest;
72 static TCGv cc_result;
73 static TCGv cc_op;
74 static TCGv cc_size;
75 static TCGv cc_mask;
77 static TCGv env_btaken;
78 static TCGv env_btarget;
79 static TCGv env_pc;
81 #include "gen-icount.h"
83 /* This is the state at translation time. */
84 typedef struct DisasContext {
85 CPUState *env;
86 target_ulong pc, ppc;
88 /* Decoder. */
89 unsigned int (*decoder)(struct DisasContext *dc);
90 uint32_t ir;
91 uint32_t opcode;
92 unsigned int op1;
93 unsigned int op2;
94 unsigned int zsize, zzsize;
95 unsigned int mode;
96 unsigned int postinc;
98 unsigned int size;
99 unsigned int src;
100 unsigned int dst;
101 unsigned int cond;
103 int update_cc;
104 int cc_op;
105 int cc_size;
106 uint32_t cc_mask;
108 int cc_size_uptodate; /* -1 invalid or last written value. */
110 int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not uptodate. */
111 int flags_uptodate; /* Wether or not $ccs is uptodate. */
112 int flagx_known; /* Wether or not flags_x has the x flag known at
113 translation time. */
114 int flags_x;
116 int clear_x; /* Clear x after this insn? */
117 int clear_prefix; /* Clear prefix after this insn? */
118 int clear_locked_irq; /* Clear the irq lockout. */
119 int cpustate_changed;
120 unsigned int tb_flags; /* tb dependent flags. */
121 int is_jmp;
123 #define JMP_NOJMP 0
124 #define JMP_DIRECT 1
125 #define JMP_INDIRECT 2
126 int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
127 uint32_t jmp_pc;
129 int delayed_branch;
131 struct TranslationBlock *tb;
132 int singlestep_enabled;
133 } DisasContext;
135 static void gen_BUG(DisasContext *dc, const char *file, int line)
137 printf ("BUG: pc=%x %s %d\n", dc->pc, file, line);
138 qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line);
139 cpu_abort(dc->env, "%s:%d\n", file, line);
142 static const char *regnames[] =
144 "$r0", "$r1", "$r2", "$r3",
145 "$r4", "$r5", "$r6", "$r7",
146 "$r8", "$r9", "$r10", "$r11",
147 "$r12", "$r13", "$sp", "$acr",
149 static const char *pregnames[] =
151 "$bz", "$vr", "$pid", "$srs",
152 "$wz", "$exs", "$eda", "$mof",
153 "$dz", "$ebp", "$erp", "$srp",
154 "$nrp", "$ccs", "$usp", "$spc",
157 /* We need this table to handle preg-moves with implicit width. */
158 static int preg_sizes[] = {
159 1, /* bz. */
160 1, /* vr. */
161 4, /* pid. */
162 1, /* srs. */
163 2, /* wz. */
164 4, 4, 4,
165 4, 4, 4, 4,
166 4, 4, 4, 4,
169 #define t_gen_mov_TN_env(tn, member) \
170 _t_gen_mov_TN_env((tn), offsetof(CPUState, member))
171 #define t_gen_mov_env_TN(member, tn) \
172 _t_gen_mov_env_TN(offsetof(CPUState, member), (tn))
174 static inline void t_gen_mov_TN_reg(TCGv tn, int r)
176 if (r < 0 || r > 15)
177 fprintf(stderr, "wrong register read $r%d\n", r);
178 tcg_gen_mov_tl(tn, cpu_R[r]);
180 static inline void t_gen_mov_reg_TN(int r, TCGv tn)
182 if (r < 0 || r > 15)
183 fprintf(stderr, "wrong register write $r%d\n", r);
184 tcg_gen_mov_tl(cpu_R[r], tn);
187 static inline void _t_gen_mov_TN_env(TCGv tn, int offset)
189 if (offset > sizeof (CPUState))
190 fprintf(stderr, "wrong load from env from off=%d\n", offset);
191 tcg_gen_ld_tl(tn, cpu_env, offset);
193 static inline void _t_gen_mov_env_TN(int offset, TCGv tn)
195 if (offset > sizeof (CPUState))
196 fprintf(stderr, "wrong store to env at off=%d\n", offset);
197 tcg_gen_st_tl(tn, cpu_env, offset);
200 static inline void t_gen_mov_TN_preg(TCGv tn, int r)
202 if (r < 0 || r > 15)
203 fprintf(stderr, "wrong register read $p%d\n", r);
204 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
205 tcg_gen_mov_tl(tn, tcg_const_tl(0));
206 else if (r == PR_VR)
207 tcg_gen_mov_tl(tn, tcg_const_tl(32));
208 else
209 tcg_gen_mov_tl(tn, cpu_PR[r]);
211 static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
213 if (r < 0 || r > 15)
214 fprintf(stderr, "wrong register write $p%d\n", r);
215 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
216 return;
217 else if (r == PR_SRS)
218 tcg_gen_andi_tl(cpu_PR[r], tn, 3);
219 else {
220 if (r == PR_PID)
221 gen_helper_tlb_flush_pid(tn);
222 if (dc->tb_flags & S_FLAG && r == PR_SPC)
223 gen_helper_spc_write(tn);
224 else if (r == PR_CCS)
225 dc->cpustate_changed = 1;
226 tcg_gen_mov_tl(cpu_PR[r], tn);
230 static void cris_lock_irq(DisasContext *dc)
232 dc->clear_locked_irq = 0;
233 t_gen_mov_env_TN(locked_irq, tcg_const_tl(1));
236 static inline void t_gen_raise_exception(uint32_t index)
238 TCGv_i32 tmp = tcg_const_i32(index);
239 gen_helper_raise_exception(tmp);
240 tcg_temp_free_i32(tmp);
243 static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
245 TCGv t0, t_31;
247 t0 = tcg_temp_new();
248 t_31 = tcg_const_tl(31);
249 tcg_gen_shl_tl(d, a, b);
251 tcg_gen_sub_tl(t0, t_31, b);
252 tcg_gen_sar_tl(t0, t0, t_31);
253 tcg_gen_and_tl(t0, t0, d);
254 tcg_gen_xor_tl(d, d, t0);
255 tcg_temp_free(t0);
256 tcg_temp_free(t_31);
259 static void t_gen_lsr(TCGv d, TCGv a, TCGv b)
261 TCGv t0, t_31;
263 t0 = tcg_temp_new();
264 t_31 = tcg_temp_new();
265 tcg_gen_shr_tl(d, a, b);
267 tcg_gen_movi_tl(t_31, 31);
268 tcg_gen_sub_tl(t0, t_31, b);
269 tcg_gen_sar_tl(t0, t0, t_31);
270 tcg_gen_and_tl(t0, t0, d);
271 tcg_gen_xor_tl(d, d, t0);
272 tcg_temp_free(t0);
273 tcg_temp_free(t_31);
276 static void t_gen_asr(TCGv d, TCGv a, TCGv b)
278 TCGv t0, t_31;
280 t0 = tcg_temp_new();
281 t_31 = tcg_temp_new();
282 tcg_gen_sar_tl(d, a, b);
284 tcg_gen_movi_tl(t_31, 31);
285 tcg_gen_sub_tl(t0, t_31, b);
286 tcg_gen_sar_tl(t0, t0, t_31);
287 tcg_gen_or_tl(d, d, t0);
288 tcg_temp_free(t0);
289 tcg_temp_free(t_31);
292 /* 64-bit signed mul, lower result in d and upper in d2. */
293 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
295 TCGv_i64 t0, t1;
297 t0 = tcg_temp_new_i64();
298 t1 = tcg_temp_new_i64();
300 tcg_gen_ext_i32_i64(t0, a);
301 tcg_gen_ext_i32_i64(t1, b);
302 tcg_gen_mul_i64(t0, t0, t1);
304 tcg_gen_trunc_i64_i32(d, t0);
305 tcg_gen_shri_i64(t0, t0, 32);
306 tcg_gen_trunc_i64_i32(d2, t0);
308 tcg_temp_free_i64(t0);
309 tcg_temp_free_i64(t1);
312 /* 64-bit unsigned muls, lower result in d and upper in d2. */
313 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
315 TCGv_i64 t0, t1;
317 t0 = tcg_temp_new_i64();
318 t1 = tcg_temp_new_i64();
320 tcg_gen_extu_i32_i64(t0, a);
321 tcg_gen_extu_i32_i64(t1, b);
322 tcg_gen_mul_i64(t0, t0, t1);
324 tcg_gen_trunc_i64_i32(d, t0);
325 tcg_gen_shri_i64(t0, t0, 32);
326 tcg_gen_trunc_i64_i32(d2, t0);
328 tcg_temp_free_i64(t0);
329 tcg_temp_free_i64(t1);
332 static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
334 int l1;
336 l1 = gen_new_label();
339 * d <<= 1
340 * if (d >= s)
341 * d -= s;
343 tcg_gen_shli_tl(d, a, 1);
344 tcg_gen_brcond_tl(TCG_COND_LTU, d, b, l1);
345 tcg_gen_sub_tl(d, d, b);
346 gen_set_label(l1);
349 static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs)
351 TCGv t;
354 * d <<= 1
355 * if (n)
356 * d += s;
358 t = tcg_temp_new();
359 tcg_gen_shli_tl(d, a, 1);
360 tcg_gen_shli_tl(t, ccs, 31 - 3);
361 tcg_gen_sari_tl(t, t, 31);
362 tcg_gen_and_tl(t, t, b);
363 tcg_gen_add_tl(d, d, t);
364 tcg_temp_free(t);
367 /* Extended arithmetics on CRIS. */
368 static inline void t_gen_add_flag(TCGv d, int flag)
370 TCGv c;
372 c = tcg_temp_new();
373 t_gen_mov_TN_preg(c, PR_CCS);
374 /* Propagate carry into d. */
375 tcg_gen_andi_tl(c, c, 1 << flag);
376 if (flag)
377 tcg_gen_shri_tl(c, c, flag);
378 tcg_gen_add_tl(d, d, c);
379 tcg_temp_free(c);
382 static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
384 if (dc->flagx_known) {
385 if (dc->flags_x) {
386 TCGv c;
388 c = tcg_temp_new();
389 t_gen_mov_TN_preg(c, PR_CCS);
390 /* C flag is already at bit 0. */
391 tcg_gen_andi_tl(c, c, C_FLAG);
392 tcg_gen_add_tl(d, d, c);
393 tcg_temp_free(c);
395 } else {
396 TCGv x, c;
398 x = tcg_temp_new();
399 c = tcg_temp_new();
400 t_gen_mov_TN_preg(x, PR_CCS);
401 tcg_gen_mov_tl(c, x);
403 /* Propagate carry into d if X is set. Branch free. */
404 tcg_gen_andi_tl(c, c, C_FLAG);
405 tcg_gen_andi_tl(x, x, X_FLAG);
406 tcg_gen_shri_tl(x, x, 4);
408 tcg_gen_and_tl(x, x, c);
409 tcg_gen_add_tl(d, d, x);
410 tcg_temp_free(x);
411 tcg_temp_free(c);
415 static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
417 if (dc->flagx_known) {
418 if (dc->flags_x) {
419 TCGv c;
421 c = tcg_temp_new();
422 t_gen_mov_TN_preg(c, PR_CCS);
423 /* C flag is already at bit 0. */
424 tcg_gen_andi_tl(c, c, C_FLAG);
425 tcg_gen_sub_tl(d, d, c);
426 tcg_temp_free(c);
428 } else {
429 TCGv x, c;
431 x = tcg_temp_new();
432 c = tcg_temp_new();
433 t_gen_mov_TN_preg(x, PR_CCS);
434 tcg_gen_mov_tl(c, x);
436 /* Propagate carry into d if X is set. Branch free. */
437 tcg_gen_andi_tl(c, c, C_FLAG);
438 tcg_gen_andi_tl(x, x, X_FLAG);
439 tcg_gen_shri_tl(x, x, 4);
441 tcg_gen_and_tl(x, x, c);
442 tcg_gen_sub_tl(d, d, x);
443 tcg_temp_free(x);
444 tcg_temp_free(c);
448 /* Swap the two bytes within each half word of the s operand.
449 T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
450 static inline void t_gen_swapb(TCGv d, TCGv s)
452 TCGv t, org_s;
454 t = tcg_temp_new();
455 org_s = tcg_temp_new();
457 /* d and s may refer to the same object. */
458 tcg_gen_mov_tl(org_s, s);
459 tcg_gen_shli_tl(t, org_s, 8);
460 tcg_gen_andi_tl(d, t, 0xff00ff00);
461 tcg_gen_shri_tl(t, org_s, 8);
462 tcg_gen_andi_tl(t, t, 0x00ff00ff);
463 tcg_gen_or_tl(d, d, t);
464 tcg_temp_free(t);
465 tcg_temp_free(org_s);
468 /* Swap the halfwords of the s operand. */
469 static inline void t_gen_swapw(TCGv d, TCGv s)
471 TCGv t;
472 /* d and s refer the same object. */
473 t = tcg_temp_new();
474 tcg_gen_mov_tl(t, s);
475 tcg_gen_shli_tl(d, t, 16);
476 tcg_gen_shri_tl(t, t, 16);
477 tcg_gen_or_tl(d, d, t);
478 tcg_temp_free(t);
481 /* Reverse the within each byte.
482 T0 = (((T0 << 7) & 0x80808080) |
483 ((T0 << 5) & 0x40404040) |
484 ((T0 << 3) & 0x20202020) |
485 ((T0 << 1) & 0x10101010) |
486 ((T0 >> 1) & 0x08080808) |
487 ((T0 >> 3) & 0x04040404) |
488 ((T0 >> 5) & 0x02020202) |
489 ((T0 >> 7) & 0x01010101));
491 static inline void t_gen_swapr(TCGv d, TCGv s)
493 struct {
494 int shift; /* LSL when positive, LSR when negative. */
495 uint32_t mask;
496 } bitrev [] = {
497 {7, 0x80808080},
498 {5, 0x40404040},
499 {3, 0x20202020},
500 {1, 0x10101010},
501 {-1, 0x08080808},
502 {-3, 0x04040404},
503 {-5, 0x02020202},
504 {-7, 0x01010101}
506 int i;
507 TCGv t, org_s;
509 /* d and s refer the same object. */
510 t = tcg_temp_new();
511 org_s = tcg_temp_new();
512 tcg_gen_mov_tl(org_s, s);
514 tcg_gen_shli_tl(t, org_s, bitrev[0].shift);
515 tcg_gen_andi_tl(d, t, bitrev[0].mask);
516 for (i = 1; i < ARRAY_SIZE(bitrev); i++) {
517 if (bitrev[i].shift >= 0) {
518 tcg_gen_shli_tl(t, org_s, bitrev[i].shift);
519 } else {
520 tcg_gen_shri_tl(t, org_s, -bitrev[i].shift);
522 tcg_gen_andi_tl(t, t, bitrev[i].mask);
523 tcg_gen_or_tl(d, d, t);
525 tcg_temp_free(t);
526 tcg_temp_free(org_s);
529 static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
531 TCGv btaken;
532 int l1;
534 l1 = gen_new_label();
535 btaken = tcg_temp_new();
537 /* Conditional jmp. */
538 tcg_gen_mov_tl(btaken, env_btaken);
539 tcg_gen_mov_tl(env_pc, pc_false);
540 tcg_gen_brcondi_tl(TCG_COND_EQ, btaken, 0, l1);
541 tcg_gen_mov_tl(env_pc, pc_true);
542 gen_set_label(l1);
544 tcg_temp_free(btaken);
547 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
549 TranslationBlock *tb;
550 tb = dc->tb;
551 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
552 tcg_gen_goto_tb(n);
553 tcg_gen_movi_tl(env_pc, dest);
554 tcg_gen_exit_tb((long)tb + n);
555 } else {
556 tcg_gen_movi_tl(env_pc, dest);
557 tcg_gen_exit_tb(0);
561 /* Sign extend at translation time. */
562 static int sign_extend(unsigned int val, unsigned int width)
564 int sval;
566 /* LSL. */
567 val <<= 31 - width;
568 sval = val;
569 /* ASR. */
570 sval >>= 31 - width;
571 return sval;
574 static inline void cris_clear_x_flag(DisasContext *dc)
576 if (dc->flagx_known && dc->flags_x)
577 dc->flags_uptodate = 0;
579 dc->flagx_known = 1;
580 dc->flags_x = 0;
583 static void cris_flush_cc_state(DisasContext *dc)
585 if (dc->cc_size_uptodate != dc->cc_size) {
586 tcg_gen_movi_tl(cc_size, dc->cc_size);
587 dc->cc_size_uptodate = dc->cc_size;
589 tcg_gen_movi_tl(cc_op, dc->cc_op);
590 tcg_gen_movi_tl(cc_mask, dc->cc_mask);
593 static void cris_evaluate_flags(DisasContext *dc)
595 if (dc->flags_uptodate)
596 return;
598 cris_flush_cc_state(dc);
600 switch (dc->cc_op)
602 case CC_OP_MCP:
603 gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS],
604 cpu_PR[PR_CCS], cc_src,
605 cc_dest, cc_result);
606 break;
607 case CC_OP_MULS:
608 gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS],
609 cpu_PR[PR_CCS], cc_result,
610 cpu_PR[PR_MOF]);
611 break;
612 case CC_OP_MULU:
613 gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS],
614 cpu_PR[PR_CCS], cc_result,
615 cpu_PR[PR_MOF]);
616 break;
617 case CC_OP_MOVE:
618 case CC_OP_AND:
619 case CC_OP_OR:
620 case CC_OP_XOR:
621 case CC_OP_ASR:
622 case CC_OP_LSR:
623 case CC_OP_LSL:
624 switch (dc->cc_size)
626 case 4:
627 gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
628 cpu_PR[PR_CCS], cc_result);
629 break;
630 case 2:
631 gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
632 cpu_PR[PR_CCS], cc_result);
633 break;
634 default:
635 gen_helper_evaluate_flags();
636 break;
638 break;
639 case CC_OP_FLAGS:
640 /* live. */
641 break;
642 case CC_OP_SUB:
643 case CC_OP_CMP:
644 if (dc->cc_size == 4)
645 gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS],
646 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
647 else
648 gen_helper_evaluate_flags();
650 break;
651 default:
652 switch (dc->cc_size)
654 case 4:
655 gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS],
656 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
657 break;
658 default:
659 gen_helper_evaluate_flags();
660 break;
662 break;
665 if (dc->flagx_known) {
666 if (dc->flags_x)
667 tcg_gen_ori_tl(cpu_PR[PR_CCS],
668 cpu_PR[PR_CCS], X_FLAG);
669 else if (dc->cc_op == CC_OP_FLAGS)
670 tcg_gen_andi_tl(cpu_PR[PR_CCS],
671 cpu_PR[PR_CCS], ~X_FLAG);
673 dc->flags_uptodate = 1;
676 static void cris_cc_mask(DisasContext *dc, unsigned int mask)
678 uint32_t ovl;
680 if (!mask) {
681 dc->update_cc = 0;
682 return;
685 /* Check if we need to evaluate the condition codes due to
686 CC overlaying. */
687 ovl = (dc->cc_mask ^ mask) & ~mask;
688 if (ovl) {
689 /* TODO: optimize this case. It trigs all the time. */
690 cris_evaluate_flags (dc);
692 dc->cc_mask = mask;
693 dc->update_cc = 1;
696 static void cris_update_cc_op(DisasContext *dc, int op, int size)
698 dc->cc_op = op;
699 dc->cc_size = size;
700 dc->flags_uptodate = 0;
703 static inline void cris_update_cc_x(DisasContext *dc)
705 /* Save the x flag state at the time of the cc snapshot. */
706 if (dc->flagx_known) {
707 if (dc->cc_x_uptodate == (2 | dc->flags_x))
708 return;
709 tcg_gen_movi_tl(cc_x, dc->flags_x);
710 dc->cc_x_uptodate = 2 | dc->flags_x;
712 else {
713 tcg_gen_andi_tl(cc_x, cpu_PR[PR_CCS], X_FLAG);
714 dc->cc_x_uptodate = 1;
718 /* Update cc prior to executing ALU op. Needs source operands untouched. */
719 static void cris_pre_alu_update_cc(DisasContext *dc, int op,
720 TCGv dst, TCGv src, int size)
722 if (dc->update_cc) {
723 cris_update_cc_op(dc, op, size);
724 tcg_gen_mov_tl(cc_src, src);
726 if (op != CC_OP_MOVE
727 && op != CC_OP_AND
728 && op != CC_OP_OR
729 && op != CC_OP_XOR
730 && op != CC_OP_ASR
731 && op != CC_OP_LSR
732 && op != CC_OP_LSL)
733 tcg_gen_mov_tl(cc_dest, dst);
735 cris_update_cc_x(dc);
739 /* Update cc after executing ALU op. needs the result. */
740 static inline void cris_update_result(DisasContext *dc, TCGv res)
742 if (dc->update_cc)
743 tcg_gen_mov_tl(cc_result, res);
746 /* Returns one if the write back stage should execute. */
747 static void cris_alu_op_exec(DisasContext *dc, int op,
748 TCGv dst, TCGv a, TCGv b, int size)
750 /* Emit the ALU insns. */
751 switch (op)
753 case CC_OP_ADD:
754 tcg_gen_add_tl(dst, a, b);
755 /* Extended arithmetics. */
756 t_gen_addx_carry(dc, dst);
757 break;
758 case CC_OP_ADDC:
759 tcg_gen_add_tl(dst, a, b);
760 t_gen_add_flag(dst, 0); /* C_FLAG. */
761 break;
762 case CC_OP_MCP:
763 tcg_gen_add_tl(dst, a, b);
764 t_gen_add_flag(dst, 8); /* R_FLAG. */
765 break;
766 case CC_OP_SUB:
767 tcg_gen_sub_tl(dst, a, b);
768 /* Extended arithmetics. */
769 t_gen_subx_carry(dc, dst);
770 break;
771 case CC_OP_MOVE:
772 tcg_gen_mov_tl(dst, b);
773 break;
774 case CC_OP_OR:
775 tcg_gen_or_tl(dst, a, b);
776 break;
777 case CC_OP_AND:
778 tcg_gen_and_tl(dst, a, b);
779 break;
780 case CC_OP_XOR:
781 tcg_gen_xor_tl(dst, a, b);
782 break;
783 case CC_OP_LSL:
784 t_gen_lsl(dst, a, b);
785 break;
786 case CC_OP_LSR:
787 t_gen_lsr(dst, a, b);
788 break;
789 case CC_OP_ASR:
790 t_gen_asr(dst, a, b);
791 break;
792 case CC_OP_NEG:
793 tcg_gen_neg_tl(dst, b);
794 /* Extended arithmetics. */
795 t_gen_subx_carry(dc, dst);
796 break;
797 case CC_OP_LZ:
798 gen_helper_lz(dst, b);
799 break;
800 case CC_OP_MULS:
801 t_gen_muls(dst, cpu_PR[PR_MOF], a, b);
802 break;
803 case CC_OP_MULU:
804 t_gen_mulu(dst, cpu_PR[PR_MOF], a, b);
805 break;
806 case CC_OP_DSTEP:
807 t_gen_cris_dstep(dst, a, b);
808 break;
809 case CC_OP_MSTEP:
810 t_gen_cris_mstep(dst, a, b, cpu_PR[PR_CCS]);
811 break;
812 case CC_OP_BOUND:
814 int l1;
815 l1 = gen_new_label();
816 tcg_gen_mov_tl(dst, a);
817 tcg_gen_brcond_tl(TCG_COND_LEU, a, b, l1);
818 tcg_gen_mov_tl(dst, b);
819 gen_set_label(l1);
821 break;
822 case CC_OP_CMP:
823 tcg_gen_sub_tl(dst, a, b);
824 /* Extended arithmetics. */
825 t_gen_subx_carry(dc, dst);
826 break;
827 default:
828 qemu_log("illegal ALU op.\n");
829 BUG();
830 break;
833 if (size == 1)
834 tcg_gen_andi_tl(dst, dst, 0xff);
835 else if (size == 2)
836 tcg_gen_andi_tl(dst, dst, 0xffff);
839 static void cris_alu(DisasContext *dc, int op,
840 TCGv d, TCGv op_a, TCGv op_b, int size)
842 TCGv tmp;
843 int writeback;
845 writeback = 1;
847 if (op == CC_OP_CMP) {
848 tmp = tcg_temp_new();
849 writeback = 0;
850 } else if (size == 4) {
851 tmp = d;
852 writeback = 0;
853 } else
854 tmp = tcg_temp_new();
857 cris_pre_alu_update_cc(dc, op, op_a, op_b, size);
858 cris_alu_op_exec(dc, op, tmp, op_a, op_b, size);
859 cris_update_result(dc, tmp);
861 /* Writeback. */
862 if (writeback) {
863 if (size == 1)
864 tcg_gen_andi_tl(d, d, ~0xff);
865 else
866 tcg_gen_andi_tl(d, d, ~0xffff);
867 tcg_gen_or_tl(d, d, tmp);
869 if (!TCGV_EQUAL(tmp, d))
870 tcg_temp_free(tmp);
873 static int arith_cc(DisasContext *dc)
875 if (dc->update_cc) {
876 switch (dc->cc_op) {
877 case CC_OP_ADDC: return 1;
878 case CC_OP_ADD: return 1;
879 case CC_OP_SUB: return 1;
880 case CC_OP_DSTEP: return 1;
881 case CC_OP_LSL: return 1;
882 case CC_OP_LSR: return 1;
883 case CC_OP_ASR: return 1;
884 case CC_OP_CMP: return 1;
885 case CC_OP_NEG: return 1;
886 case CC_OP_OR: return 1;
887 case CC_OP_AND: return 1;
888 case CC_OP_XOR: return 1;
889 case CC_OP_MULU: return 1;
890 case CC_OP_MULS: return 1;
891 default:
892 return 0;
895 return 0;
898 static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
900 int arith_opt, move_opt;
902 /* TODO: optimize more condition codes. */
905 * If the flags are live, we've gotta look into the bits of CCS.
906 * Otherwise, if we just did an arithmetic operation we try to
907 * evaluate the condition code faster.
909 * When this function is done, T0 should be non-zero if the condition
910 * code is true.
912 arith_opt = arith_cc(dc) && !dc->flags_uptodate;
913 move_opt = (dc->cc_op == CC_OP_MOVE);
914 switch (cond) {
915 case CC_EQ:
916 if ((arith_opt || move_opt)
917 && dc->cc_x_uptodate != (2 | X_FLAG)) {
918 /* If cc_result is zero, T0 should be
919 non-zero otherwise T0 should be zero. */
920 int l1;
921 l1 = gen_new_label();
922 tcg_gen_movi_tl(cc, 0);
923 tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
924 0, l1);
925 tcg_gen_movi_tl(cc, 1);
926 gen_set_label(l1);
928 else {
929 cris_evaluate_flags(dc);
930 tcg_gen_andi_tl(cc,
931 cpu_PR[PR_CCS], Z_FLAG);
933 break;
934 case CC_NE:
935 if ((arith_opt || move_opt)
936 && dc->cc_x_uptodate != (2 | X_FLAG)) {
937 tcg_gen_mov_tl(cc, cc_result);
938 } else {
939 cris_evaluate_flags(dc);
940 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
941 Z_FLAG);
942 tcg_gen_andi_tl(cc, cc, Z_FLAG);
944 break;
945 case CC_CS:
946 cris_evaluate_flags(dc);
947 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG);
948 break;
949 case CC_CC:
950 cris_evaluate_flags(dc);
951 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG);
952 tcg_gen_andi_tl(cc, cc, C_FLAG);
953 break;
954 case CC_VS:
955 cris_evaluate_flags(dc);
956 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG);
957 break;
958 case CC_VC:
959 cris_evaluate_flags(dc);
960 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
961 V_FLAG);
962 tcg_gen_andi_tl(cc, cc, V_FLAG);
963 break;
964 case CC_PL:
965 if (arith_opt || move_opt) {
966 int bits = 31;
968 if (dc->cc_size == 1)
969 bits = 7;
970 else if (dc->cc_size == 2)
971 bits = 15;
973 tcg_gen_shri_tl(cc, cc_result, bits);
974 tcg_gen_xori_tl(cc, cc, 1);
975 } else {
976 cris_evaluate_flags(dc);
977 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
978 N_FLAG);
979 tcg_gen_andi_tl(cc, cc, N_FLAG);
981 break;
982 case CC_MI:
983 if (arith_opt || move_opt) {
984 int bits = 31;
986 if (dc->cc_size == 1)
987 bits = 7;
988 else if (dc->cc_size == 2)
989 bits = 15;
991 tcg_gen_shri_tl(cc, cc_result, bits);
992 tcg_gen_andi_tl(cc, cc, 1);
994 else {
995 cris_evaluate_flags(dc);
996 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
997 N_FLAG);
999 break;
1000 case CC_LS:
1001 cris_evaluate_flags(dc);
1002 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1003 C_FLAG | Z_FLAG);
1004 break;
1005 case CC_HI:
1006 cris_evaluate_flags(dc);
1008 TCGv tmp;
1010 tmp = tcg_temp_new();
1011 tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS],
1012 C_FLAG | Z_FLAG);
1013 /* Overlay the C flag on top of the Z. */
1014 tcg_gen_shli_tl(cc, tmp, 2);
1015 tcg_gen_and_tl(cc, tmp, cc);
1016 tcg_gen_andi_tl(cc, cc, Z_FLAG);
1018 tcg_temp_free(tmp);
1020 break;
1021 case CC_GE:
1022 cris_evaluate_flags(dc);
1023 /* Overlay the V flag on top of the N. */
1024 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1025 tcg_gen_xor_tl(cc,
1026 cpu_PR[PR_CCS], cc);
1027 tcg_gen_andi_tl(cc, cc, N_FLAG);
1028 tcg_gen_xori_tl(cc, cc, N_FLAG);
1029 break;
1030 case CC_LT:
1031 cris_evaluate_flags(dc);
1032 /* Overlay the V flag on top of the N. */
1033 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1034 tcg_gen_xor_tl(cc,
1035 cpu_PR[PR_CCS], cc);
1036 tcg_gen_andi_tl(cc, cc, N_FLAG);
1037 break;
1038 case CC_GT:
1039 cris_evaluate_flags(dc);
1041 TCGv n, z;
1043 n = tcg_temp_new();
1044 z = tcg_temp_new();
1046 /* To avoid a shift we overlay everything on
1047 the V flag. */
1048 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1049 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1050 /* invert Z. */
1051 tcg_gen_xori_tl(z, z, 2);
1053 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1054 tcg_gen_xori_tl(n, n, 2);
1055 tcg_gen_and_tl(cc, z, n);
1056 tcg_gen_andi_tl(cc, cc, 2);
1058 tcg_temp_free(n);
1059 tcg_temp_free(z);
1061 break;
1062 case CC_LE:
1063 cris_evaluate_flags(dc);
1065 TCGv n, z;
1067 n = tcg_temp_new();
1068 z = tcg_temp_new();
1070 /* To avoid a shift we overlay everything on
1071 the V flag. */
1072 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1073 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1075 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1076 tcg_gen_or_tl(cc, z, n);
1077 tcg_gen_andi_tl(cc, cc, 2);
1079 tcg_temp_free(n);
1080 tcg_temp_free(z);
1082 break;
1083 case CC_P:
1084 cris_evaluate_flags(dc);
1085 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG);
1086 break;
1087 case CC_A:
1088 tcg_gen_movi_tl(cc, 1);
1089 break;
1090 default:
1091 BUG();
1092 break;
1096 static void cris_store_direct_jmp(DisasContext *dc)
1098 /* Store the direct jmp state into the cpu-state. */
1099 if (dc->jmp == JMP_DIRECT) {
1100 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1101 tcg_gen_movi_tl(env_btaken, 1);
1105 static void cris_prepare_cc_branch (DisasContext *dc,
1106 int offset, int cond)
1108 /* This helps us re-schedule the micro-code to insns in delay-slots
1109 before the actual jump. */
1110 dc->delayed_branch = 2;
1111 dc->jmp_pc = dc->pc + offset;
1113 if (cond != CC_A)
1115 dc->jmp = JMP_INDIRECT;
1116 gen_tst_cc (dc, env_btaken, cond);
1117 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1118 } else {
1119 /* Allow chaining. */
1120 dc->jmp = JMP_DIRECT;
1125 /* jumps, when the dest is in a live reg for example. Direct should be set
1126 when the dest addr is constant to allow tb chaining. */
1127 static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
1129 /* This helps us re-schedule the micro-code to insns in delay-slots
1130 before the actual jump. */
1131 dc->delayed_branch = 2;
1132 dc->jmp = type;
1133 if (type == JMP_INDIRECT)
1134 tcg_gen_movi_tl(env_btaken, 1);
1137 static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
1139 int mem_index = cpu_mmu_index(dc->env);
1141 /* If we get a fault on a delayslot we must keep the jmp state in
1142 the cpu-state to be able to re-execute the jmp. */
1143 if (dc->delayed_branch == 1)
1144 cris_store_direct_jmp(dc);
1146 tcg_gen_qemu_ld64(dst, addr, mem_index);
1149 static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
1150 unsigned int size, int sign)
1152 int mem_index = cpu_mmu_index(dc->env);
1154 /* If we get a fault on a delayslot we must keep the jmp state in
1155 the cpu-state to be able to re-execute the jmp. */
1156 if (dc->delayed_branch == 1)
1157 cris_store_direct_jmp(dc);
1159 if (size == 1) {
1160 if (sign)
1161 tcg_gen_qemu_ld8s(dst, addr, mem_index);
1162 else
1163 tcg_gen_qemu_ld8u(dst, addr, mem_index);
1165 else if (size == 2) {
1166 if (sign)
1167 tcg_gen_qemu_ld16s(dst, addr, mem_index);
1168 else
1169 tcg_gen_qemu_ld16u(dst, addr, mem_index);
1171 else if (size == 4) {
1172 tcg_gen_qemu_ld32u(dst, addr, mem_index);
1174 else {
1175 abort();
1179 static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
1180 unsigned int size)
1182 int mem_index = cpu_mmu_index(dc->env);
1184 /* If we get a fault on a delayslot we must keep the jmp state in
1185 the cpu-state to be able to re-execute the jmp. */
1186 if (dc->delayed_branch == 1)
1187 cris_store_direct_jmp(dc);
1190 /* Conditional writes. We only support the kind were X and P are known
1191 at translation time. */
1192 if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
1193 dc->postinc = 0;
1194 cris_evaluate_flags(dc);
1195 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
1196 return;
1199 if (size == 1)
1200 tcg_gen_qemu_st8(val, addr, mem_index);
1201 else if (size == 2)
1202 tcg_gen_qemu_st16(val, addr, mem_index);
1203 else
1204 tcg_gen_qemu_st32(val, addr, mem_index);
1206 if (dc->flagx_known && dc->flags_x) {
1207 cris_evaluate_flags(dc);
1208 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
1212 static inline void t_gen_sext(TCGv d, TCGv s, int size)
1214 if (size == 1)
1215 tcg_gen_ext8s_i32(d, s);
1216 else if (size == 2)
1217 tcg_gen_ext16s_i32(d, s);
1218 else if(!TCGV_EQUAL(d, s))
1219 tcg_gen_mov_tl(d, s);
1222 static inline void t_gen_zext(TCGv d, TCGv s, int size)
1224 if (size == 1)
1225 tcg_gen_ext8u_i32(d, s);
1226 else if (size == 2)
1227 tcg_gen_ext16u_i32(d, s);
1228 else if (!TCGV_EQUAL(d, s))
1229 tcg_gen_mov_tl(d, s);
1232 #if DISAS_CRIS
1233 static char memsize_char(int size)
1235 switch (size)
1237 case 1: return 'b'; break;
1238 case 2: return 'w'; break;
1239 case 4: return 'd'; break;
1240 default:
1241 return 'x';
1242 break;
1245 #endif
1247 static inline unsigned int memsize_z(DisasContext *dc)
1249 return dc->zsize + 1;
1252 static inline unsigned int memsize_zz(DisasContext *dc)
1254 switch (dc->zzsize)
1256 case 0: return 1;
1257 case 1: return 2;
1258 default:
1259 return 4;
1263 static inline void do_postinc (DisasContext *dc, int size)
1265 if (dc->postinc)
1266 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size);
1269 static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd,
1270 int size, int s_ext, TCGv dst)
1272 if (s_ext)
1273 t_gen_sext(dst, cpu_R[rs], size);
1274 else
1275 t_gen_zext(dst, cpu_R[rs], size);
1278 /* Prepare T0 and T1 for a register alu operation.
1279 s_ext decides if the operand1 should be sign-extended or zero-extended when
1280 needed. */
1281 static void dec_prep_alu_r(DisasContext *dc, int rs, int rd,
1282 int size, int s_ext, TCGv dst, TCGv src)
1284 dec_prep_move_r(dc, rs, rd, size, s_ext, src);
1286 if (s_ext)
1287 t_gen_sext(dst, cpu_R[rd], size);
1288 else
1289 t_gen_zext(dst, cpu_R[rd], size);
1292 static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
1293 TCGv dst)
1295 unsigned int rs, rd;
1296 uint32_t imm;
1297 int is_imm;
1298 int insn_len = 2;
1300 rs = dc->op1;
1301 rd = dc->op2;
1302 is_imm = rs == 15 && dc->postinc;
1304 /* Load [$rs] onto T1. */
1305 if (is_imm) {
1306 insn_len = 2 + memsize;
1307 if (memsize == 1)
1308 insn_len++;
1310 if (memsize != 4) {
1311 if (s_ext) {
1312 if (memsize == 1)
1313 imm = ldsb_code(dc->pc + 2);
1314 else
1315 imm = ldsw_code(dc->pc + 2);
1316 } else {
1317 if (memsize == 1)
1318 imm = ldub_code(dc->pc + 2);
1319 else
1320 imm = lduw_code(dc->pc + 2);
1322 } else
1323 imm = ldl_code(dc->pc + 2);
1325 tcg_gen_movi_tl(dst, imm);
1326 dc->postinc = 0;
1327 } else {
1328 cris_flush_cc_state(dc);
1329 gen_load(dc, dst, cpu_R[rs], memsize, 0);
1330 if (s_ext)
1331 t_gen_sext(dst, dst, memsize);
1332 else
1333 t_gen_zext(dst, dst, memsize);
1335 return insn_len;
1338 /* Prepare T0 and T1 for a memory + alu operation.
1339 s_ext decides if the operand1 should be sign-extended or zero-extended when
1340 needed. */
1341 static int dec_prep_alu_m(DisasContext *dc, int s_ext, int memsize,
1342 TCGv dst, TCGv src)
1344 int insn_len;
1346 insn_len = dec_prep_move_m(dc, s_ext, memsize, src);
1347 tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
1348 return insn_len;
1351 #if DISAS_CRIS
1352 static const char *cc_name(int cc)
1354 static const char *cc_names[16] = {
1355 "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
1356 "ls", "hi", "ge", "lt", "gt", "le", "a", "p"
1358 assert(cc < 16);
1359 return cc_names[cc];
1361 #endif
1363 /* Start of insn decoders. */
1365 static unsigned int dec_bccq(DisasContext *dc)
1367 int32_t offset;
1368 int sign;
1369 uint32_t cond = dc->op2;
1370 int tmp;
1372 offset = EXTRACT_FIELD (dc->ir, 1, 7);
1373 sign = EXTRACT_FIELD(dc->ir, 0, 0);
1375 offset *= 2;
1376 offset |= sign << 8;
1377 tmp = offset;
1378 offset = sign_extend(offset, 8);
1380 LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset);
1382 /* op2 holds the condition-code. */
1383 cris_cc_mask(dc, 0);
1384 cris_prepare_cc_branch (dc, offset, cond);
1385 return 2;
1387 static unsigned int dec_addoq(DisasContext *dc)
1389 int32_t imm;
1391 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7);
1392 imm = sign_extend(dc->op1, 7);
1394 LOG_DIS("addoq %d, $r%u\n", imm, dc->op2);
1395 cris_cc_mask(dc, 0);
1396 /* Fetch register operand, */
1397 tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm);
1399 return 2;
1401 static unsigned int dec_addq(DisasContext *dc)
1403 LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
1405 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1407 cris_cc_mask(dc, CC_MASK_NZVC);
1409 cris_alu(dc, CC_OP_ADD,
1410 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1411 return 2;
1413 static unsigned int dec_moveq(DisasContext *dc)
1415 uint32_t imm;
1417 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1418 imm = sign_extend(dc->op1, 5);
1419 LOG_DIS("moveq %d, $r%u\n", imm, dc->op2);
1421 tcg_gen_movi_tl(cpu_R[dc->op2], imm);
1422 return 2;
1424 static unsigned int dec_subq(DisasContext *dc)
1426 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1428 LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
1430 cris_cc_mask(dc, CC_MASK_NZVC);
1431 cris_alu(dc, CC_OP_SUB,
1432 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1433 return 2;
1435 static unsigned int dec_cmpq(DisasContext *dc)
1437 uint32_t imm;
1438 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1439 imm = sign_extend(dc->op1, 5);
1441 LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
1442 cris_cc_mask(dc, CC_MASK_NZVC);
1444 cris_alu(dc, CC_OP_CMP,
1445 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1446 return 2;
1448 static unsigned int dec_andq(DisasContext *dc)
1450 uint32_t imm;
1451 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1452 imm = sign_extend(dc->op1, 5);
1454 LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
1455 cris_cc_mask(dc, CC_MASK_NZ);
1457 cris_alu(dc, CC_OP_AND,
1458 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1459 return 2;
1461 static unsigned int dec_orq(DisasContext *dc)
1463 uint32_t imm;
1464 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1465 imm = sign_extend(dc->op1, 5);
1466 LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
1467 cris_cc_mask(dc, CC_MASK_NZ);
1469 cris_alu(dc, CC_OP_OR,
1470 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1471 return 2;
1473 static unsigned int dec_btstq(DisasContext *dc)
1475 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1476 LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
1478 cris_cc_mask(dc, CC_MASK_NZ);
1479 cris_evaluate_flags(dc);
1480 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1481 tcg_const_tl(dc->op1), cpu_PR[PR_CCS]);
1482 cris_alu(dc, CC_OP_MOVE,
1483 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1484 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1485 dc->flags_uptodate = 1;
1486 return 2;
1488 static unsigned int dec_asrq(DisasContext *dc)
1490 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1491 LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
1492 cris_cc_mask(dc, CC_MASK_NZ);
1494 tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1495 cris_alu(dc, CC_OP_MOVE,
1496 cpu_R[dc->op2],
1497 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1498 return 2;
1500 static unsigned int dec_lslq(DisasContext *dc)
1502 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1503 LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
1505 cris_cc_mask(dc, CC_MASK_NZ);
1507 tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1509 cris_alu(dc, CC_OP_MOVE,
1510 cpu_R[dc->op2],
1511 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1512 return 2;
1514 static unsigned int dec_lsrq(DisasContext *dc)
1516 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1517 LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
1519 cris_cc_mask(dc, CC_MASK_NZ);
1521 tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1522 cris_alu(dc, CC_OP_MOVE,
1523 cpu_R[dc->op2],
1524 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1525 return 2;
1528 static unsigned int dec_move_r(DisasContext *dc)
1530 int size = memsize_zz(dc);
1532 LOG_DIS("move.%c $r%u, $r%u\n",
1533 memsize_char(size), dc->op1, dc->op2);
1535 cris_cc_mask(dc, CC_MASK_NZ);
1536 if (size == 4) {
1537 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]);
1538 cris_cc_mask(dc, CC_MASK_NZ);
1539 cris_update_cc_op(dc, CC_OP_MOVE, 4);
1540 cris_update_cc_x(dc);
1541 cris_update_result(dc, cpu_R[dc->op2]);
1543 else {
1544 TCGv t0;
1546 t0 = tcg_temp_new();
1547 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1548 cris_alu(dc, CC_OP_MOVE,
1549 cpu_R[dc->op2],
1550 cpu_R[dc->op2], t0, size);
1551 tcg_temp_free(t0);
1553 return 2;
1556 static unsigned int dec_scc_r(DisasContext *dc)
1558 int cond = dc->op2;
1560 LOG_DIS("s%s $r%u\n",
1561 cc_name(cond), dc->op1);
1563 if (cond != CC_A)
1565 int l1;
1567 gen_tst_cc (dc, cpu_R[dc->op1], cond);
1568 l1 = gen_new_label();
1569 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[dc->op1], 0, l1);
1570 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1571 gen_set_label(l1);
1573 else
1574 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1576 cris_cc_mask(dc, 0);
1577 return 2;
1580 static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t)
1582 if (size == 4) {
1583 t[0] = cpu_R[dc->op2];
1584 t[1] = cpu_R[dc->op1];
1585 } else {
1586 t[0] = tcg_temp_new();
1587 t[1] = tcg_temp_new();
1591 static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t)
1593 if (size != 4) {
1594 tcg_temp_free(t[0]);
1595 tcg_temp_free(t[1]);
1599 static unsigned int dec_and_r(DisasContext *dc)
1601 TCGv t[2];
1602 int size = memsize_zz(dc);
1604 LOG_DIS("and.%c $r%u, $r%u\n",
1605 memsize_char(size), dc->op1, dc->op2);
1607 cris_cc_mask(dc, CC_MASK_NZ);
1609 cris_alu_alloc_temps(dc, size, t);
1610 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1611 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size);
1612 cris_alu_free_temps(dc, size, t);
1613 return 2;
1616 static unsigned int dec_lz_r(DisasContext *dc)
1618 TCGv t0;
1619 LOG_DIS("lz $r%u, $r%u\n",
1620 dc->op1, dc->op2);
1621 cris_cc_mask(dc, CC_MASK_NZ);
1622 t0 = tcg_temp_new();
1623 dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0);
1624 cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1625 tcg_temp_free(t0);
1626 return 2;
1629 static unsigned int dec_lsl_r(DisasContext *dc)
1631 TCGv t[2];
1632 int size = memsize_zz(dc);
1634 LOG_DIS("lsl.%c $r%u, $r%u\n",
1635 memsize_char(size), dc->op1, dc->op2);
1637 cris_cc_mask(dc, CC_MASK_NZ);
1638 cris_alu_alloc_temps(dc, size, t);
1639 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1640 tcg_gen_andi_tl(t[1], t[1], 63);
1641 cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size);
1642 cris_alu_alloc_temps(dc, size, t);
1643 return 2;
1646 static unsigned int dec_lsr_r(DisasContext *dc)
1648 TCGv t[2];
1649 int size = memsize_zz(dc);
1651 LOG_DIS("lsr.%c $r%u, $r%u\n",
1652 memsize_char(size), dc->op1, dc->op2);
1654 cris_cc_mask(dc, CC_MASK_NZ);
1655 cris_alu_alloc_temps(dc, size, t);
1656 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1657 tcg_gen_andi_tl(t[1], t[1], 63);
1658 cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size);
1659 cris_alu_free_temps(dc, size, t);
1660 return 2;
1663 static unsigned int dec_asr_r(DisasContext *dc)
1665 TCGv t[2];
1666 int size = memsize_zz(dc);
1668 LOG_DIS("asr.%c $r%u, $r%u\n",
1669 memsize_char(size), dc->op1, dc->op2);
1671 cris_cc_mask(dc, CC_MASK_NZ);
1672 cris_alu_alloc_temps(dc, size, t);
1673 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1674 tcg_gen_andi_tl(t[1], t[1], 63);
1675 cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size);
1676 cris_alu_free_temps(dc, size, t);
1677 return 2;
1680 static unsigned int dec_muls_r(DisasContext *dc)
1682 TCGv t[2];
1683 int size = memsize_zz(dc);
1685 LOG_DIS("muls.%c $r%u, $r%u\n",
1686 memsize_char(size), dc->op1, dc->op2);
1687 cris_cc_mask(dc, CC_MASK_NZV);
1688 cris_alu_alloc_temps(dc, size, t);
1689 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1691 cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4);
1692 cris_alu_free_temps(dc, size, t);
1693 return 2;
1696 static unsigned int dec_mulu_r(DisasContext *dc)
1698 TCGv t[2];
1699 int size = memsize_zz(dc);
1701 LOG_DIS("mulu.%c $r%u, $r%u\n",
1702 memsize_char(size), dc->op1, dc->op2);
1703 cris_cc_mask(dc, CC_MASK_NZV);
1704 cris_alu_alloc_temps(dc, size, t);
1705 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1707 cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4);
1708 cris_alu_alloc_temps(dc, size, t);
1709 return 2;
1713 static unsigned int dec_dstep_r(DisasContext *dc)
1715 LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
1716 cris_cc_mask(dc, CC_MASK_NZ);
1717 cris_alu(dc, CC_OP_DSTEP,
1718 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1719 return 2;
1722 static unsigned int dec_xor_r(DisasContext *dc)
1724 TCGv t[2];
1725 int size = memsize_zz(dc);
1726 LOG_DIS("xor.%c $r%u, $r%u\n",
1727 memsize_char(size), dc->op1, dc->op2);
1728 BUG_ON(size != 4); /* xor is dword. */
1729 cris_cc_mask(dc, CC_MASK_NZ);
1730 cris_alu_alloc_temps(dc, size, t);
1731 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1733 cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4);
1734 cris_alu_free_temps(dc, size, t);
1735 return 2;
1738 static unsigned int dec_bound_r(DisasContext *dc)
1740 TCGv l0;
1741 int size = memsize_zz(dc);
1742 LOG_DIS("bound.%c $r%u, $r%u\n",
1743 memsize_char(size), dc->op1, dc->op2);
1744 cris_cc_mask(dc, CC_MASK_NZ);
1745 l0 = tcg_temp_local_new();
1746 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
1747 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
1748 tcg_temp_free(l0);
1749 return 2;
1752 static unsigned int dec_cmp_r(DisasContext *dc)
1754 TCGv t[2];
1755 int size = memsize_zz(dc);
1756 LOG_DIS("cmp.%c $r%u, $r%u\n",
1757 memsize_char(size), dc->op1, dc->op2);
1758 cris_cc_mask(dc, CC_MASK_NZVC);
1759 cris_alu_alloc_temps(dc, size, t);
1760 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1762 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size);
1763 cris_alu_free_temps(dc, size, t);
1764 return 2;
1767 static unsigned int dec_abs_r(DisasContext *dc)
1769 TCGv t0;
1771 LOG_DIS("abs $r%u, $r%u\n",
1772 dc->op1, dc->op2);
1773 cris_cc_mask(dc, CC_MASK_NZ);
1775 t0 = tcg_temp_new();
1776 tcg_gen_sari_tl(t0, cpu_R[dc->op1], 31);
1777 tcg_gen_xor_tl(cpu_R[dc->op2], cpu_R[dc->op1], t0);
1778 tcg_gen_sub_tl(cpu_R[dc->op2], cpu_R[dc->op2], t0);
1779 tcg_temp_free(t0);
1781 cris_alu(dc, CC_OP_MOVE,
1782 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1783 return 2;
1786 static unsigned int dec_add_r(DisasContext *dc)
1788 TCGv t[2];
1789 int size = memsize_zz(dc);
1790 LOG_DIS("add.%c $r%u, $r%u\n",
1791 memsize_char(size), dc->op1, dc->op2);
1792 cris_cc_mask(dc, CC_MASK_NZVC);
1793 cris_alu_alloc_temps(dc, size, t);
1794 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1796 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size);
1797 cris_alu_free_temps(dc, size, t);
1798 return 2;
1801 static unsigned int dec_addc_r(DisasContext *dc)
1803 LOG_DIS("addc $r%u, $r%u\n",
1804 dc->op1, dc->op2);
1805 cris_evaluate_flags(dc);
1806 /* Set for this insn. */
1807 dc->flagx_known = 1;
1808 dc->flags_x = X_FLAG;
1810 cris_cc_mask(dc, CC_MASK_NZVC);
1811 cris_alu(dc, CC_OP_ADDC,
1812 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1813 return 2;
1816 static unsigned int dec_mcp_r(DisasContext *dc)
1818 LOG_DIS("mcp $p%u, $r%u\n",
1819 dc->op2, dc->op1);
1820 cris_evaluate_flags(dc);
1821 cris_cc_mask(dc, CC_MASK_RNZV);
1822 cris_alu(dc, CC_OP_MCP,
1823 cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4);
1824 return 2;
1827 #if DISAS_CRIS
1828 static char * swapmode_name(int mode, char *modename) {
1829 int i = 0;
1830 if (mode & 8)
1831 modename[i++] = 'n';
1832 if (mode & 4)
1833 modename[i++] = 'w';
1834 if (mode & 2)
1835 modename[i++] = 'b';
1836 if (mode & 1)
1837 modename[i++] = 'r';
1838 modename[i++] = 0;
1839 return modename;
1841 #endif
1843 static unsigned int dec_swap_r(DisasContext *dc)
1845 TCGv t0;
1846 #if DISAS_CRIS
1847 char modename[4];
1848 #endif
1849 LOG_DIS("swap%s $r%u\n",
1850 swapmode_name(dc->op2, modename), dc->op1);
1852 cris_cc_mask(dc, CC_MASK_NZ);
1853 t0 = tcg_temp_new();
1854 t_gen_mov_TN_reg(t0, dc->op1);
1855 if (dc->op2 & 8)
1856 tcg_gen_not_tl(t0, t0);
1857 if (dc->op2 & 4)
1858 t_gen_swapw(t0, t0);
1859 if (dc->op2 & 2)
1860 t_gen_swapb(t0, t0);
1861 if (dc->op2 & 1)
1862 t_gen_swapr(t0, t0);
1863 cris_alu(dc, CC_OP_MOVE,
1864 cpu_R[dc->op1], cpu_R[dc->op1], t0, 4);
1865 tcg_temp_free(t0);
1866 return 2;
1869 static unsigned int dec_or_r(DisasContext *dc)
1871 TCGv t[2];
1872 int size = memsize_zz(dc);
1873 LOG_DIS("or.%c $r%u, $r%u\n",
1874 memsize_char(size), dc->op1, dc->op2);
1875 cris_cc_mask(dc, CC_MASK_NZ);
1876 cris_alu_alloc_temps(dc, size, t);
1877 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1878 cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size);
1879 cris_alu_free_temps(dc, size, t);
1880 return 2;
1883 static unsigned int dec_addi_r(DisasContext *dc)
1885 TCGv t0;
1886 LOG_DIS("addi.%c $r%u, $r%u\n",
1887 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1888 cris_cc_mask(dc, 0);
1889 t0 = tcg_temp_new();
1890 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1891 tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0);
1892 tcg_temp_free(t0);
1893 return 2;
1896 static unsigned int dec_addi_acr(DisasContext *dc)
1898 TCGv t0;
1899 LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
1900 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1901 cris_cc_mask(dc, 0);
1902 t0 = tcg_temp_new();
1903 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1904 tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0);
1905 tcg_temp_free(t0);
1906 return 2;
1909 static unsigned int dec_neg_r(DisasContext *dc)
1911 TCGv t[2];
1912 int size = memsize_zz(dc);
1913 LOG_DIS("neg.%c $r%u, $r%u\n",
1914 memsize_char(size), dc->op1, dc->op2);
1915 cris_cc_mask(dc, CC_MASK_NZVC);
1916 cris_alu_alloc_temps(dc, size, t);
1917 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1919 cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size);
1920 cris_alu_free_temps(dc, size, t);
1921 return 2;
1924 static unsigned int dec_btst_r(DisasContext *dc)
1926 LOG_DIS("btst $r%u, $r%u\n",
1927 dc->op1, dc->op2);
1928 cris_cc_mask(dc, CC_MASK_NZ);
1929 cris_evaluate_flags(dc);
1930 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1931 cpu_R[dc->op1], cpu_PR[PR_CCS]);
1932 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
1933 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1934 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1935 dc->flags_uptodate = 1;
1936 return 2;
1939 static unsigned int dec_sub_r(DisasContext *dc)
1941 TCGv t[2];
1942 int size = memsize_zz(dc);
1943 LOG_DIS("sub.%c $r%u, $r%u\n",
1944 memsize_char(size), dc->op1, dc->op2);
1945 cris_cc_mask(dc, CC_MASK_NZVC);
1946 cris_alu_alloc_temps(dc, size, t);
1947 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1948 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size);
1949 cris_alu_free_temps(dc, size, t);
1950 return 2;
1953 /* Zero extension. From size to dword. */
1954 static unsigned int dec_movu_r(DisasContext *dc)
1956 TCGv t0;
1957 int size = memsize_z(dc);
1958 LOG_DIS("movu.%c $r%u, $r%u\n",
1959 memsize_char(size),
1960 dc->op1, dc->op2);
1962 cris_cc_mask(dc, CC_MASK_NZ);
1963 t0 = tcg_temp_new();
1964 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1965 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1966 tcg_temp_free(t0);
1967 return 2;
1970 /* Sign extension. From size to dword. */
1971 static unsigned int dec_movs_r(DisasContext *dc)
1973 TCGv t0;
1974 int size = memsize_z(dc);
1975 LOG_DIS("movs.%c $r%u, $r%u\n",
1976 memsize_char(size),
1977 dc->op1, dc->op2);
1979 cris_cc_mask(dc, CC_MASK_NZ);
1980 t0 = tcg_temp_new();
1981 /* Size can only be qi or hi. */
1982 t_gen_sext(t0, cpu_R[dc->op1], size);
1983 cris_alu(dc, CC_OP_MOVE,
1984 cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
1985 tcg_temp_free(t0);
1986 return 2;
1989 /* zero extension. From size to dword. */
1990 static unsigned int dec_addu_r(DisasContext *dc)
1992 TCGv t0;
1993 int size = memsize_z(dc);
1994 LOG_DIS("addu.%c $r%u, $r%u\n",
1995 memsize_char(size),
1996 dc->op1, dc->op2);
1998 cris_cc_mask(dc, CC_MASK_NZVC);
1999 t0 = tcg_temp_new();
2000 /* Size can only be qi or hi. */
2001 t_gen_zext(t0, cpu_R[dc->op1], size);
2002 cris_alu(dc, CC_OP_ADD,
2003 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2004 tcg_temp_free(t0);
2005 return 2;
2008 /* Sign extension. From size to dword. */
2009 static unsigned int dec_adds_r(DisasContext *dc)
2011 TCGv t0;
2012 int size = memsize_z(dc);
2013 LOG_DIS("adds.%c $r%u, $r%u\n",
2014 memsize_char(size),
2015 dc->op1, dc->op2);
2017 cris_cc_mask(dc, CC_MASK_NZVC);
2018 t0 = tcg_temp_new();
2019 /* Size can only be qi or hi. */
2020 t_gen_sext(t0, cpu_R[dc->op1], size);
2021 cris_alu(dc, CC_OP_ADD,
2022 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2023 tcg_temp_free(t0);
2024 return 2;
2027 /* Zero extension. From size to dword. */
2028 static unsigned int dec_subu_r(DisasContext *dc)
2030 TCGv t0;
2031 int size = memsize_z(dc);
2032 LOG_DIS("subu.%c $r%u, $r%u\n",
2033 memsize_char(size),
2034 dc->op1, dc->op2);
2036 cris_cc_mask(dc, CC_MASK_NZVC);
2037 t0 = tcg_temp_new();
2038 /* Size can only be qi or hi. */
2039 t_gen_zext(t0, cpu_R[dc->op1], size);
2040 cris_alu(dc, CC_OP_SUB,
2041 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2042 tcg_temp_free(t0);
2043 return 2;
2046 /* Sign extension. From size to dword. */
2047 static unsigned int dec_subs_r(DisasContext *dc)
2049 TCGv t0;
2050 int size = memsize_z(dc);
2051 LOG_DIS("subs.%c $r%u, $r%u\n",
2052 memsize_char(size),
2053 dc->op1, dc->op2);
2055 cris_cc_mask(dc, CC_MASK_NZVC);
2056 t0 = tcg_temp_new();
2057 /* Size can only be qi or hi. */
2058 t_gen_sext(t0, cpu_R[dc->op1], size);
2059 cris_alu(dc, CC_OP_SUB,
2060 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2061 tcg_temp_free(t0);
2062 return 2;
2065 static unsigned int dec_setclrf(DisasContext *dc)
2067 uint32_t flags;
2068 int set = (~dc->opcode >> 2) & 1;
2071 flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
2072 | EXTRACT_FIELD(dc->ir, 0, 3);
2073 if (set && flags == 0) {
2074 LOG_DIS("nop\n");
2075 return 2;
2076 } else if (!set && (flags & 0x20)) {
2077 LOG_DIS("di\n");
2079 else {
2080 LOG_DIS("%sf %x\n",
2081 set ? "set" : "clr",
2082 flags);
2085 /* User space is not allowed to touch these. Silently ignore. */
2086 if (dc->tb_flags & U_FLAG) {
2087 flags &= ~(S_FLAG | I_FLAG | U_FLAG);
2090 if (flags & X_FLAG) {
2091 dc->flagx_known = 1;
2092 if (set)
2093 dc->flags_x = X_FLAG;
2094 else
2095 dc->flags_x = 0;
2098 /* Break the TB if any of the SPI flag changes. */
2099 if (flags & (P_FLAG | S_FLAG)) {
2100 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2101 dc->is_jmp = DISAS_UPDATE;
2102 dc->cpustate_changed = 1;
2105 /* For the I flag, only act on posedge. */
2106 if ((flags & I_FLAG)) {
2107 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2108 dc->is_jmp = DISAS_UPDATE;
2109 dc->cpustate_changed = 1;
2113 /* Simply decode the flags. */
2114 cris_evaluate_flags (dc);
2115 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2116 cris_update_cc_x(dc);
2117 tcg_gen_movi_tl(cc_op, dc->cc_op);
2119 if (set) {
2120 if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
2121 /* Enter user mode. */
2122 t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
2123 tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
2124 dc->cpustate_changed = 1;
2126 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
2128 else
2129 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags);
2131 dc->flags_uptodate = 1;
2132 dc->clear_x = 0;
2133 return 2;
2136 static unsigned int dec_move_rs(DisasContext *dc)
2138 LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
2139 cris_cc_mask(dc, 0);
2140 gen_helper_movl_sreg_reg(tcg_const_tl(dc->op2), tcg_const_tl(dc->op1));
2141 return 2;
2143 static unsigned int dec_move_sr(DisasContext *dc)
2145 LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
2146 cris_cc_mask(dc, 0);
2147 gen_helper_movl_reg_sreg(tcg_const_tl(dc->op1), tcg_const_tl(dc->op2));
2148 return 2;
2151 static unsigned int dec_move_rp(DisasContext *dc)
2153 TCGv t[2];
2154 LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
2155 cris_cc_mask(dc, 0);
2157 t[0] = tcg_temp_new();
2158 if (dc->op2 == PR_CCS) {
2159 cris_evaluate_flags(dc);
2160 t_gen_mov_TN_reg(t[0], dc->op1);
2161 if (dc->tb_flags & U_FLAG) {
2162 t[1] = tcg_temp_new();
2163 /* User space is not allowed to touch all flags. */
2164 tcg_gen_andi_tl(t[0], t[0], 0x39f);
2165 tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f);
2166 tcg_gen_or_tl(t[0], t[1], t[0]);
2167 tcg_temp_free(t[1]);
2170 else
2171 t_gen_mov_TN_reg(t[0], dc->op1);
2173 t_gen_mov_preg_TN(dc, dc->op2, t[0]);
2174 if (dc->op2 == PR_CCS) {
2175 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2176 dc->flags_uptodate = 1;
2178 tcg_temp_free(t[0]);
2179 return 2;
2181 static unsigned int dec_move_pr(DisasContext *dc)
2183 TCGv t0;
2184 LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1);
2185 cris_cc_mask(dc, 0);
2187 if (dc->op2 == PR_CCS)
2188 cris_evaluate_flags(dc);
2190 if (dc->op2 == PR_DZ) {
2191 tcg_gen_movi_tl(cpu_R[dc->op1], 0);
2192 } else {
2193 t0 = tcg_temp_new();
2194 t_gen_mov_TN_preg(t0, dc->op2);
2195 cris_alu(dc, CC_OP_MOVE,
2196 cpu_R[dc->op1], cpu_R[dc->op1], t0,
2197 preg_sizes[dc->op2]);
2198 tcg_temp_free(t0);
2200 return 2;
2203 static unsigned int dec_move_mr(DisasContext *dc)
2205 int memsize = memsize_zz(dc);
2206 int insn_len;
2207 LOG_DIS("move.%c [$r%u%s, $r%u\n",
2208 memsize_char(memsize),
2209 dc->op1, dc->postinc ? "+]" : "]",
2210 dc->op2);
2212 if (memsize == 4) {
2213 insn_len = dec_prep_move_m(dc, 0, 4, cpu_R[dc->op2]);
2214 cris_cc_mask(dc, CC_MASK_NZ);
2215 cris_update_cc_op(dc, CC_OP_MOVE, 4);
2216 cris_update_cc_x(dc);
2217 cris_update_result(dc, cpu_R[dc->op2]);
2219 else {
2220 TCGv t0;
2222 t0 = tcg_temp_new();
2223 insn_len = dec_prep_move_m(dc, 0, memsize, t0);
2224 cris_cc_mask(dc, CC_MASK_NZ);
2225 cris_alu(dc, CC_OP_MOVE,
2226 cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
2227 tcg_temp_free(t0);
2229 do_postinc(dc, memsize);
2230 return insn_len;
2233 static inline void cris_alu_m_alloc_temps(TCGv *t)
2235 t[0] = tcg_temp_new();
2236 t[1] = tcg_temp_new();
2239 static inline void cris_alu_m_free_temps(TCGv *t)
2241 tcg_temp_free(t[0]);
2242 tcg_temp_free(t[1]);
2245 static unsigned int dec_movs_m(DisasContext *dc)
2247 TCGv t[2];
2248 int memsize = memsize_z(dc);
2249 int insn_len;
2250 LOG_DIS("movs.%c [$r%u%s, $r%u\n",
2251 memsize_char(memsize),
2252 dc->op1, dc->postinc ? "+]" : "]",
2253 dc->op2);
2255 cris_alu_m_alloc_temps(t);
2256 /* sign extend. */
2257 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2258 cris_cc_mask(dc, CC_MASK_NZ);
2259 cris_alu(dc, CC_OP_MOVE,
2260 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2261 do_postinc(dc, memsize);
2262 cris_alu_m_free_temps(t);
2263 return insn_len;
2266 static unsigned int dec_addu_m(DisasContext *dc)
2268 TCGv t[2];
2269 int memsize = memsize_z(dc);
2270 int insn_len;
2271 LOG_DIS("addu.%c [$r%u%s, $r%u\n",
2272 memsize_char(memsize),
2273 dc->op1, dc->postinc ? "+]" : "]",
2274 dc->op2);
2276 cris_alu_m_alloc_temps(t);
2277 /* sign extend. */
2278 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2279 cris_cc_mask(dc, CC_MASK_NZVC);
2280 cris_alu(dc, CC_OP_ADD,
2281 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2282 do_postinc(dc, memsize);
2283 cris_alu_m_free_temps(t);
2284 return insn_len;
2287 static unsigned int dec_adds_m(DisasContext *dc)
2289 TCGv t[2];
2290 int memsize = memsize_z(dc);
2291 int insn_len;
2292 LOG_DIS("adds.%c [$r%u%s, $r%u\n",
2293 memsize_char(memsize),
2294 dc->op1, dc->postinc ? "+]" : "]",
2295 dc->op2);
2297 cris_alu_m_alloc_temps(t);
2298 /* sign extend. */
2299 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2300 cris_cc_mask(dc, CC_MASK_NZVC);
2301 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2302 do_postinc(dc, memsize);
2303 cris_alu_m_free_temps(t);
2304 return insn_len;
2307 static unsigned int dec_subu_m(DisasContext *dc)
2309 TCGv t[2];
2310 int memsize = memsize_z(dc);
2311 int insn_len;
2312 LOG_DIS("subu.%c [$r%u%s, $r%u\n",
2313 memsize_char(memsize),
2314 dc->op1, dc->postinc ? "+]" : "]",
2315 dc->op2);
2317 cris_alu_m_alloc_temps(t);
2318 /* sign extend. */
2319 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2320 cris_cc_mask(dc, CC_MASK_NZVC);
2321 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2322 do_postinc(dc, memsize);
2323 cris_alu_m_free_temps(t);
2324 return insn_len;
2327 static unsigned int dec_subs_m(DisasContext *dc)
2329 TCGv t[2];
2330 int memsize = memsize_z(dc);
2331 int insn_len;
2332 LOG_DIS("subs.%c [$r%u%s, $r%u\n",
2333 memsize_char(memsize),
2334 dc->op1, dc->postinc ? "+]" : "]",
2335 dc->op2);
2337 cris_alu_m_alloc_temps(t);
2338 /* sign extend. */
2339 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2340 cris_cc_mask(dc, CC_MASK_NZVC);
2341 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2342 do_postinc(dc, memsize);
2343 cris_alu_m_free_temps(t);
2344 return insn_len;
2347 static unsigned int dec_movu_m(DisasContext *dc)
2349 TCGv t[2];
2350 int memsize = memsize_z(dc);
2351 int insn_len;
2353 LOG_DIS("movu.%c [$r%u%s, $r%u\n",
2354 memsize_char(memsize),
2355 dc->op1, dc->postinc ? "+]" : "]",
2356 dc->op2);
2358 cris_alu_m_alloc_temps(t);
2359 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2360 cris_cc_mask(dc, CC_MASK_NZ);
2361 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2362 do_postinc(dc, memsize);
2363 cris_alu_m_free_temps(t);
2364 return insn_len;
2367 static unsigned int dec_cmpu_m(DisasContext *dc)
2369 TCGv t[2];
2370 int memsize = memsize_z(dc);
2371 int insn_len;
2372 LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
2373 memsize_char(memsize),
2374 dc->op1, dc->postinc ? "+]" : "]",
2375 dc->op2);
2377 cris_alu_m_alloc_temps(t);
2378 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2379 cris_cc_mask(dc, CC_MASK_NZVC);
2380 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2381 do_postinc(dc, memsize);
2382 cris_alu_m_free_temps(t);
2383 return insn_len;
2386 static unsigned int dec_cmps_m(DisasContext *dc)
2388 TCGv t[2];
2389 int memsize = memsize_z(dc);
2390 int insn_len;
2391 LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
2392 memsize_char(memsize),
2393 dc->op1, dc->postinc ? "+]" : "]",
2394 dc->op2);
2396 cris_alu_m_alloc_temps(t);
2397 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2398 cris_cc_mask(dc, CC_MASK_NZVC);
2399 cris_alu(dc, CC_OP_CMP,
2400 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2401 memsize_zz(dc));
2402 do_postinc(dc, memsize);
2403 cris_alu_m_free_temps(t);
2404 return insn_len;
2407 static unsigned int dec_cmp_m(DisasContext *dc)
2409 TCGv t[2];
2410 int memsize = memsize_zz(dc);
2411 int insn_len;
2412 LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
2413 memsize_char(memsize),
2414 dc->op1, dc->postinc ? "+]" : "]",
2415 dc->op2);
2417 cris_alu_m_alloc_temps(t);
2418 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2419 cris_cc_mask(dc, CC_MASK_NZVC);
2420 cris_alu(dc, CC_OP_CMP,
2421 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2422 memsize_zz(dc));
2423 do_postinc(dc, memsize);
2424 cris_alu_m_free_temps(t);
2425 return insn_len;
2428 static unsigned int dec_test_m(DisasContext *dc)
2430 TCGv t[2];
2431 int memsize = memsize_zz(dc);
2432 int insn_len;
2433 LOG_DIS("test.%c [$r%u%s] op2=%x\n",
2434 memsize_char(memsize),
2435 dc->op1, dc->postinc ? "+]" : "]",
2436 dc->op2);
2438 cris_evaluate_flags(dc);
2440 cris_alu_m_alloc_temps(t);
2441 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2442 cris_cc_mask(dc, CC_MASK_NZ);
2443 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
2445 cris_alu(dc, CC_OP_CMP,
2446 cpu_R[dc->op2], t[1], tcg_const_tl(0), memsize_zz(dc));
2447 do_postinc(dc, memsize);
2448 cris_alu_m_free_temps(t);
2449 return insn_len;
2452 static unsigned int dec_and_m(DisasContext *dc)
2454 TCGv t[2];
2455 int memsize = memsize_zz(dc);
2456 int insn_len;
2457 LOG_DIS("and.%c [$r%u%s, $r%u\n",
2458 memsize_char(memsize),
2459 dc->op1, dc->postinc ? "+]" : "]",
2460 dc->op2);
2462 cris_alu_m_alloc_temps(t);
2463 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2464 cris_cc_mask(dc, CC_MASK_NZ);
2465 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2466 do_postinc(dc, memsize);
2467 cris_alu_m_free_temps(t);
2468 return insn_len;
2471 static unsigned int dec_add_m(DisasContext *dc)
2473 TCGv t[2];
2474 int memsize = memsize_zz(dc);
2475 int insn_len;
2476 LOG_DIS("add.%c [$r%u%s, $r%u\n",
2477 memsize_char(memsize),
2478 dc->op1, dc->postinc ? "+]" : "]",
2479 dc->op2);
2481 cris_alu_m_alloc_temps(t);
2482 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2483 cris_cc_mask(dc, CC_MASK_NZVC);
2484 cris_alu(dc, CC_OP_ADD,
2485 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2486 do_postinc(dc, memsize);
2487 cris_alu_m_free_temps(t);
2488 return insn_len;
2491 static unsigned int dec_addo_m(DisasContext *dc)
2493 TCGv t[2];
2494 int memsize = memsize_zz(dc);
2495 int insn_len;
2496 LOG_DIS("add.%c [$r%u%s, $r%u\n",
2497 memsize_char(memsize),
2498 dc->op1, dc->postinc ? "+]" : "]",
2499 dc->op2);
2501 cris_alu_m_alloc_temps(t);
2502 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2503 cris_cc_mask(dc, 0);
2504 cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
2505 do_postinc(dc, memsize);
2506 cris_alu_m_free_temps(t);
2507 return insn_len;
2510 static unsigned int dec_bound_m(DisasContext *dc)
2512 TCGv l[2];
2513 int memsize = memsize_zz(dc);
2514 int insn_len;
2515 LOG_DIS("bound.%c [$r%u%s, $r%u\n",
2516 memsize_char(memsize),
2517 dc->op1, dc->postinc ? "+]" : "]",
2518 dc->op2);
2520 l[0] = tcg_temp_local_new();
2521 l[1] = tcg_temp_local_new();
2522 insn_len = dec_prep_alu_m(dc, 0, memsize, l[0], l[1]);
2523 cris_cc_mask(dc, CC_MASK_NZ);
2524 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
2525 do_postinc(dc, memsize);
2526 tcg_temp_free(l[0]);
2527 tcg_temp_free(l[1]);
2528 return insn_len;
2531 static unsigned int dec_addc_mr(DisasContext *dc)
2533 TCGv t[2];
2534 int insn_len = 2;
2535 LOG_DIS("addc [$r%u%s, $r%u\n",
2536 dc->op1, dc->postinc ? "+]" : "]",
2537 dc->op2);
2539 cris_evaluate_flags(dc);
2541 /* Set for this insn. */
2542 dc->flagx_known = 1;
2543 dc->flags_x = X_FLAG;
2545 cris_alu_m_alloc_temps(t);
2546 insn_len = dec_prep_alu_m(dc, 0, 4, t[0], t[1]);
2547 cris_cc_mask(dc, CC_MASK_NZVC);
2548 cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
2549 do_postinc(dc, 4);
2550 cris_alu_m_free_temps(t);
2551 return insn_len;
2554 static unsigned int dec_sub_m(DisasContext *dc)
2556 TCGv t[2];
2557 int memsize = memsize_zz(dc);
2558 int insn_len;
2559 LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
2560 memsize_char(memsize),
2561 dc->op1, dc->postinc ? "+]" : "]",
2562 dc->op2, dc->ir, dc->zzsize);
2564 cris_alu_m_alloc_temps(t);
2565 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2566 cris_cc_mask(dc, CC_MASK_NZVC);
2567 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
2568 do_postinc(dc, memsize);
2569 cris_alu_m_free_temps(t);
2570 return insn_len;
2573 static unsigned int dec_or_m(DisasContext *dc)
2575 TCGv t[2];
2576 int memsize = memsize_zz(dc);
2577 int insn_len;
2578 LOG_DIS("or.%c [$r%u%s, $r%u pc=%x\n",
2579 memsize_char(memsize),
2580 dc->op1, dc->postinc ? "+]" : "]",
2581 dc->op2, dc->pc);
2583 cris_alu_m_alloc_temps(t);
2584 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2585 cris_cc_mask(dc, CC_MASK_NZ);
2586 cris_alu(dc, CC_OP_OR,
2587 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2588 do_postinc(dc, memsize);
2589 cris_alu_m_free_temps(t);
2590 return insn_len;
2593 static unsigned int dec_move_mp(DisasContext *dc)
2595 TCGv t[2];
2596 int memsize = memsize_zz(dc);
2597 int insn_len = 2;
2599 LOG_DIS("move.%c [$r%u%s, $p%u\n",
2600 memsize_char(memsize),
2601 dc->op1,
2602 dc->postinc ? "+]" : "]",
2603 dc->op2);
2605 cris_alu_m_alloc_temps(t);
2606 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2607 cris_cc_mask(dc, 0);
2608 if (dc->op2 == PR_CCS) {
2609 cris_evaluate_flags(dc);
2610 if (dc->tb_flags & U_FLAG) {
2611 /* User space is not allowed to touch all flags. */
2612 tcg_gen_andi_tl(t[1], t[1], 0x39f);
2613 tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f);
2614 tcg_gen_or_tl(t[1], t[0], t[1]);
2618 t_gen_mov_preg_TN(dc, dc->op2, t[1]);
2620 do_postinc(dc, memsize);
2621 cris_alu_m_free_temps(t);
2622 return insn_len;
2625 static unsigned int dec_move_pm(DisasContext *dc)
2627 TCGv t0;
2628 int memsize;
2630 memsize = preg_sizes[dc->op2];
2632 LOG_DIS("move.%c $p%u, [$r%u%s\n",
2633 memsize_char(memsize),
2634 dc->op2, dc->op1, dc->postinc ? "+]" : "]");
2636 /* prepare store. Address in T0, value in T1. */
2637 if (dc->op2 == PR_CCS)
2638 cris_evaluate_flags(dc);
2639 t0 = tcg_temp_new();
2640 t_gen_mov_TN_preg(t0, dc->op2);
2641 cris_flush_cc_state(dc);
2642 gen_store(dc, cpu_R[dc->op1], t0, memsize);
2643 tcg_temp_free(t0);
2645 cris_cc_mask(dc, 0);
2646 if (dc->postinc)
2647 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2648 return 2;
2651 static unsigned int dec_movem_mr(DisasContext *dc)
2653 TCGv_i64 tmp[16];
2654 TCGv tmp32;
2655 TCGv addr;
2656 int i;
2657 int nr = dc->op2 + 1;
2659 LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1,
2660 dc->postinc ? "+]" : "]", dc->op2);
2662 addr = tcg_temp_new();
2663 /* There are probably better ways of doing this. */
2664 cris_flush_cc_state(dc);
2665 for (i = 0; i < (nr >> 1); i++) {
2666 tmp[i] = tcg_temp_new_i64();
2667 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2668 gen_load64(dc, tmp[i], addr);
2670 if (nr & 1) {
2671 tmp32 = tcg_temp_new_i32();
2672 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2673 gen_load(dc, tmp32, addr, 4, 0);
2674 } else
2675 TCGV_UNUSED(tmp32);
2676 tcg_temp_free(addr);
2678 for (i = 0; i < (nr >> 1); i++) {
2679 tcg_gen_trunc_i64_i32(cpu_R[i * 2], tmp[i]);
2680 tcg_gen_shri_i64(tmp[i], tmp[i], 32);
2681 tcg_gen_trunc_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
2682 tcg_temp_free_i64(tmp[i]);
2684 if (nr & 1) {
2685 tcg_gen_mov_tl(cpu_R[dc->op2], tmp32);
2686 tcg_temp_free(tmp32);
2689 /* writeback the updated pointer value. */
2690 if (dc->postinc)
2691 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4);
2693 /* gen_load might want to evaluate the previous insns flags. */
2694 cris_cc_mask(dc, 0);
2695 return 2;
2698 static unsigned int dec_movem_rm(DisasContext *dc)
2700 TCGv tmp;
2701 TCGv addr;
2702 int i;
2704 LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1,
2705 dc->postinc ? "+]" : "]");
2707 cris_flush_cc_state(dc);
2709 tmp = tcg_temp_new();
2710 addr = tcg_temp_new();
2711 tcg_gen_movi_tl(tmp, 4);
2712 tcg_gen_mov_tl(addr, cpu_R[dc->op1]);
2713 for (i = 0; i <= dc->op2; i++) {
2714 /* Displace addr. */
2715 /* Perform the store. */
2716 gen_store(dc, addr, cpu_R[i], 4);
2717 tcg_gen_add_tl(addr, addr, tmp);
2719 if (dc->postinc)
2720 tcg_gen_mov_tl(cpu_R[dc->op1], addr);
2721 cris_cc_mask(dc, 0);
2722 tcg_temp_free(tmp);
2723 tcg_temp_free(addr);
2724 return 2;
2727 static unsigned int dec_move_rm(DisasContext *dc)
2729 int memsize;
2731 memsize = memsize_zz(dc);
2733 LOG_DIS("move.%c $r%u, [$r%u]\n",
2734 memsize_char(memsize), dc->op2, dc->op1);
2736 /* prepare store. */
2737 cris_flush_cc_state(dc);
2738 gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize);
2740 if (dc->postinc)
2741 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2742 cris_cc_mask(dc, 0);
2743 return 2;
2746 static unsigned int dec_lapcq(DisasContext *dc)
2748 LOG_DIS("lapcq %x, $r%u\n",
2749 dc->pc + dc->op1*2, dc->op2);
2750 cris_cc_mask(dc, 0);
2751 tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2);
2752 return 2;
2755 static unsigned int dec_lapc_im(DisasContext *dc)
2757 unsigned int rd;
2758 int32_t imm;
2759 int32_t pc;
2761 rd = dc->op2;
2763 cris_cc_mask(dc, 0);
2764 imm = ldl_code(dc->pc + 2);
2765 LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
2767 pc = dc->pc;
2768 pc += imm;
2769 tcg_gen_movi_tl(cpu_R[rd], pc);
2770 return 6;
2773 /* Jump to special reg. */
2774 static unsigned int dec_jump_p(DisasContext *dc)
2776 LOG_DIS("jump $p%u\n", dc->op2);
2778 if (dc->op2 == PR_CCS)
2779 cris_evaluate_flags(dc);
2780 t_gen_mov_TN_preg(env_btarget, dc->op2);
2781 /* rete will often have low bit set to indicate delayslot. */
2782 tcg_gen_andi_tl(env_btarget, env_btarget, ~1);
2783 cris_cc_mask(dc, 0);
2784 cris_prepare_jmp(dc, JMP_INDIRECT);
2785 return 2;
2788 /* Jump and save. */
2789 static unsigned int dec_jas_r(DisasContext *dc)
2791 LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
2792 cris_cc_mask(dc, 0);
2793 /* Store the return address in Pd. */
2794 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2795 if (dc->op2 > 15)
2796 abort();
2797 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4));
2799 cris_prepare_jmp(dc, JMP_INDIRECT);
2800 return 2;
2803 static unsigned int dec_jas_im(DisasContext *dc)
2805 uint32_t imm;
2807 imm = ldl_code(dc->pc + 2);
2809 LOG_DIS("jas 0x%x\n", imm);
2810 cris_cc_mask(dc, 0);
2811 /* Store the return address in Pd. */
2812 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2814 dc->jmp_pc = imm;
2815 cris_prepare_jmp(dc, JMP_DIRECT);
2816 return 6;
2819 static unsigned int dec_jasc_im(DisasContext *dc)
2821 uint32_t imm;
2823 imm = ldl_code(dc->pc + 2);
2825 LOG_DIS("jasc 0x%x\n", imm);
2826 cris_cc_mask(dc, 0);
2827 /* Store the return address in Pd. */
2828 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8 + 4));
2830 dc->jmp_pc = imm;
2831 cris_prepare_jmp(dc, JMP_DIRECT);
2832 return 6;
2835 static unsigned int dec_jasc_r(DisasContext *dc)
2837 LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
2838 cris_cc_mask(dc, 0);
2839 /* Store the return address in Pd. */
2840 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2841 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4 + 4));
2842 cris_prepare_jmp(dc, JMP_INDIRECT);
2843 return 2;
2846 static unsigned int dec_bcc_im(DisasContext *dc)
2848 int32_t offset;
2849 uint32_t cond = dc->op2;
2851 offset = ldsw_code(dc->pc + 2);
2853 LOG_DIS("b%s %d pc=%x dst=%x\n",
2854 cc_name(cond), offset,
2855 dc->pc, dc->pc + offset);
2857 cris_cc_mask(dc, 0);
2858 /* op2 holds the condition-code. */
2859 cris_prepare_cc_branch (dc, offset, cond);
2860 return 4;
2863 static unsigned int dec_bas_im(DisasContext *dc)
2865 int32_t simm;
2868 simm = ldl_code(dc->pc + 2);
2870 LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2871 cris_cc_mask(dc, 0);
2872 /* Store the return address in Pd. */
2873 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2875 dc->jmp_pc = dc->pc + simm;
2876 cris_prepare_jmp(dc, JMP_DIRECT);
2877 return 6;
2880 static unsigned int dec_basc_im(DisasContext *dc)
2882 int32_t simm;
2883 simm = ldl_code(dc->pc + 2);
2885 LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2886 cris_cc_mask(dc, 0);
2887 /* Store the return address in Pd. */
2888 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 12));
2890 dc->jmp_pc = dc->pc + simm;
2891 cris_prepare_jmp(dc, JMP_DIRECT);
2892 return 6;
2895 static unsigned int dec_rfe_etc(DisasContext *dc)
2897 cris_cc_mask(dc, 0);
2899 if (dc->op2 == 15) {
2900 t_gen_mov_env_TN(halted, tcg_const_tl(1));
2901 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2902 t_gen_raise_exception(EXCP_HLT);
2903 return 2;
2906 switch (dc->op2 & 7) {
2907 case 2:
2908 /* rfe. */
2909 LOG_DIS("rfe\n");
2910 cris_evaluate_flags(dc);
2911 gen_helper_rfe();
2912 dc->is_jmp = DISAS_UPDATE;
2913 break;
2914 case 5:
2915 /* rfn. */
2916 LOG_DIS("rfn\n");
2917 cris_evaluate_flags(dc);
2918 gen_helper_rfn();
2919 dc->is_jmp = DISAS_UPDATE;
2920 break;
2921 case 6:
2922 LOG_DIS("break %d\n", dc->op1);
2923 cris_evaluate_flags (dc);
2924 /* break. */
2925 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2927 /* Breaks start at 16 in the exception vector. */
2928 t_gen_mov_env_TN(trap_vector,
2929 tcg_const_tl(dc->op1 + 16));
2930 t_gen_raise_exception(EXCP_BREAK);
2931 dc->is_jmp = DISAS_UPDATE;
2932 break;
2933 default:
2934 printf ("op2=%x\n", dc->op2);
2935 BUG();
2936 break;
2939 return 2;
2942 static unsigned int dec_ftag_fidx_d_m(DisasContext *dc)
2944 return 2;
2947 static unsigned int dec_ftag_fidx_i_m(DisasContext *dc)
2949 return 2;
2952 static unsigned int dec_null(DisasContext *dc)
2954 printf ("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
2955 dc->pc, dc->opcode, dc->op1, dc->op2);
2956 fflush(NULL);
2957 BUG();
2958 return 2;
2961 static struct decoder_info {
2962 struct {
2963 uint32_t bits;
2964 uint32_t mask;
2966 unsigned int (*dec)(DisasContext *dc);
2967 } decinfo[] = {
2968 /* Order matters here. */
2969 {DEC_MOVEQ, dec_moveq},
2970 {DEC_BTSTQ, dec_btstq},
2971 {DEC_CMPQ, dec_cmpq},
2972 {DEC_ADDOQ, dec_addoq},
2973 {DEC_ADDQ, dec_addq},
2974 {DEC_SUBQ, dec_subq},
2975 {DEC_ANDQ, dec_andq},
2976 {DEC_ORQ, dec_orq},
2977 {DEC_ASRQ, dec_asrq},
2978 {DEC_LSLQ, dec_lslq},
2979 {DEC_LSRQ, dec_lsrq},
2980 {DEC_BCCQ, dec_bccq},
2982 {DEC_BCC_IM, dec_bcc_im},
2983 {DEC_JAS_IM, dec_jas_im},
2984 {DEC_JAS_R, dec_jas_r},
2985 {DEC_JASC_IM, dec_jasc_im},
2986 {DEC_JASC_R, dec_jasc_r},
2987 {DEC_BAS_IM, dec_bas_im},
2988 {DEC_BASC_IM, dec_basc_im},
2989 {DEC_JUMP_P, dec_jump_p},
2990 {DEC_LAPC_IM, dec_lapc_im},
2991 {DEC_LAPCQ, dec_lapcq},
2993 {DEC_RFE_ETC, dec_rfe_etc},
2994 {DEC_ADDC_MR, dec_addc_mr},
2996 {DEC_MOVE_MP, dec_move_mp},
2997 {DEC_MOVE_PM, dec_move_pm},
2998 {DEC_MOVEM_MR, dec_movem_mr},
2999 {DEC_MOVEM_RM, dec_movem_rm},
3000 {DEC_MOVE_PR, dec_move_pr},
3001 {DEC_SCC_R, dec_scc_r},
3002 {DEC_SETF, dec_setclrf},
3003 {DEC_CLEARF, dec_setclrf},
3005 {DEC_MOVE_SR, dec_move_sr},
3006 {DEC_MOVE_RP, dec_move_rp},
3007 {DEC_SWAP_R, dec_swap_r},
3008 {DEC_ABS_R, dec_abs_r},
3009 {DEC_LZ_R, dec_lz_r},
3010 {DEC_MOVE_RS, dec_move_rs},
3011 {DEC_BTST_R, dec_btst_r},
3012 {DEC_ADDC_R, dec_addc_r},
3014 {DEC_DSTEP_R, dec_dstep_r},
3015 {DEC_XOR_R, dec_xor_r},
3016 {DEC_MCP_R, dec_mcp_r},
3017 {DEC_CMP_R, dec_cmp_r},
3019 {DEC_ADDI_R, dec_addi_r},
3020 {DEC_ADDI_ACR, dec_addi_acr},
3022 {DEC_ADD_R, dec_add_r},
3023 {DEC_SUB_R, dec_sub_r},
3025 {DEC_ADDU_R, dec_addu_r},
3026 {DEC_ADDS_R, dec_adds_r},
3027 {DEC_SUBU_R, dec_subu_r},
3028 {DEC_SUBS_R, dec_subs_r},
3029 {DEC_LSL_R, dec_lsl_r},
3031 {DEC_AND_R, dec_and_r},
3032 {DEC_OR_R, dec_or_r},
3033 {DEC_BOUND_R, dec_bound_r},
3034 {DEC_ASR_R, dec_asr_r},
3035 {DEC_LSR_R, dec_lsr_r},
3037 {DEC_MOVU_R, dec_movu_r},
3038 {DEC_MOVS_R, dec_movs_r},
3039 {DEC_NEG_R, dec_neg_r},
3040 {DEC_MOVE_R, dec_move_r},
3042 {DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m},
3043 {DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m},
3045 {DEC_MULS_R, dec_muls_r},
3046 {DEC_MULU_R, dec_mulu_r},
3048 {DEC_ADDU_M, dec_addu_m},
3049 {DEC_ADDS_M, dec_adds_m},
3050 {DEC_SUBU_M, dec_subu_m},
3051 {DEC_SUBS_M, dec_subs_m},
3053 {DEC_CMPU_M, dec_cmpu_m},
3054 {DEC_CMPS_M, dec_cmps_m},
3055 {DEC_MOVU_M, dec_movu_m},
3056 {DEC_MOVS_M, dec_movs_m},
3058 {DEC_CMP_M, dec_cmp_m},
3059 {DEC_ADDO_M, dec_addo_m},
3060 {DEC_BOUND_M, dec_bound_m},
3061 {DEC_ADD_M, dec_add_m},
3062 {DEC_SUB_M, dec_sub_m},
3063 {DEC_AND_M, dec_and_m},
3064 {DEC_OR_M, dec_or_m},
3065 {DEC_MOVE_RM, dec_move_rm},
3066 {DEC_TEST_M, dec_test_m},
3067 {DEC_MOVE_MR, dec_move_mr},
3069 {{0, 0}, dec_null}
3072 static unsigned int crisv32_decoder(DisasContext *dc)
3074 unsigned int insn_len = 2;
3075 int i;
3077 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
3078 tcg_gen_debug_insn_start(dc->pc);
3080 /* Load a halfword onto the instruction register. */
3081 dc->ir = lduw_code(dc->pc);
3083 /* Now decode it. */
3084 dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
3085 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3);
3086 dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15);
3087 dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4);
3088 dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5);
3089 dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
3091 /* Large switch for all insns. */
3092 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
3093 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits)
3095 insn_len = decinfo[i].dec(dc);
3096 break;
3100 #if !defined(CONFIG_USER_ONLY)
3101 /* Single-stepping ? */
3102 if (dc->tb_flags & S_FLAG) {
3103 int l1;
3105 l1 = gen_new_label();
3106 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1);
3107 /* We treat SPC as a break with an odd trap vector. */
3108 cris_evaluate_flags (dc);
3109 t_gen_mov_env_TN(trap_vector, tcg_const_tl(3));
3110 tcg_gen_movi_tl(env_pc, dc->pc + insn_len);
3111 tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len);
3112 t_gen_raise_exception(EXCP_BREAK);
3113 gen_set_label(l1);
3115 #endif
3116 return insn_len;
3119 static void check_breakpoint(CPUState *env, DisasContext *dc)
3121 CPUBreakpoint *bp;
3123 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3124 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3125 if (bp->pc == dc->pc) {
3126 cris_evaluate_flags (dc);
3127 tcg_gen_movi_tl(env_pc, dc->pc);
3128 t_gen_raise_exception(EXCP_DEBUG);
3129 dc->is_jmp = DISAS_UPDATE;
3135 #include "translate_v10.c"
3138 * Delay slots on QEMU/CRIS.
3140 * If an exception hits on a delayslot, the core will let ERP (the Exception
3141 * Return Pointer) point to the branch (the previous) insn and set the lsb to
3142 * to give SW a hint that the exception actually hit on the dslot.
3144 * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
3145 * the core and any jmp to an odd addresses will mask off that lsb. It is
3146 * simply there to let sw know there was an exception on a dslot.
3148 * When the software returns from an exception, the branch will re-execute.
3149 * On QEMU care needs to be taken when a branch+delayslot sequence is broken
3150 * and the branch and delayslot dont share pages.
3152 * The TB contaning the branch insn will set up env->btarget and evaluate
3153 * env->btaken. When the translation loop exits we will note that the branch
3154 * sequence is broken and let env->dslot be the size of the branch insn (those
3155 * vary in length).
3157 * The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
3158 * set). It will also expect to have env->dslot setup with the size of the
3159 * delay slot so that env->pc - env->dslot point to the branch insn. This TB
3160 * will execute the dslot and take the branch, either to btarget or just one
3161 * insn ahead.
3163 * When exceptions occur, we check for env->dslot in do_interrupt to detect
3164 * broken branch sequences and setup $erp accordingly (i.e let it point to the
3165 * branch and set lsb). Then env->dslot gets cleared so that the exception
3166 * handler can enter. When returning from exceptions (jump $erp) the lsb gets
3167 * masked off and we will reexecute the branch insn.
3171 /* generate intermediate code for basic block 'tb'. */
3172 static void
3173 gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
3174 int search_pc)
3176 uint16_t *gen_opc_end;
3177 uint32_t pc_start;
3178 unsigned int insn_len, orig_flags;
3179 int j, lj;
3180 struct DisasContext ctx;
3181 struct DisasContext *dc = &ctx;
3182 uint32_t next_page_start;
3183 target_ulong npc;
3184 int num_insns;
3185 int max_insns;
3187 qemu_log_try_set_file(stderr);
3189 if (env->pregs[PR_VR] == 32)
3190 dc->decoder = crisv32_decoder;
3191 else
3192 dc->decoder = crisv10_decoder;
3194 /* Odd PC indicates that branch is rexecuting due to exception in the
3195 * delayslot, like in real hw.
3197 pc_start = tb->pc & ~1;
3198 dc->env = env;
3199 dc->tb = tb;
3201 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3203 dc->is_jmp = DISAS_NEXT;
3204 dc->ppc = pc_start;
3205 dc->pc = pc_start;
3206 dc->singlestep_enabled = env->singlestep_enabled;
3207 dc->flags_uptodate = 1;
3208 dc->flagx_known = 1;
3209 dc->flags_x = tb->flags & X_FLAG;
3210 dc->cc_x_uptodate = 0;
3211 dc->cc_mask = 0;
3212 dc->update_cc = 0;
3213 dc->clear_prefix = 0;
3214 dc->clear_locked_irq = 1;
3216 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
3217 dc->cc_size_uptodate = -1;
3219 /* Decode TB flags. */
3220 orig_flags = dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG \
3221 | X_FLAG | PFIX_FLAG);
3222 dc->delayed_branch = !!(tb->flags & 7);
3223 if (dc->delayed_branch)
3224 dc->jmp = JMP_INDIRECT;
3225 else
3226 dc->jmp = JMP_NOJMP;
3228 dc->cpustate_changed = 0;
3230 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3231 qemu_log(
3232 "srch=%d pc=%x %x flg=%llx bt=%x ds=%u ccs=%x\n"
3233 "pid=%x usp=%x\n"
3234 "%x.%x.%x.%x\n"
3235 "%x.%x.%x.%x\n"
3236 "%x.%x.%x.%x\n"
3237 "%x.%x.%x.%x\n",
3238 search_pc, dc->pc, dc->ppc,
3239 (unsigned long long)tb->flags,
3240 env->btarget, (unsigned)tb->flags & 7,
3241 env->pregs[PR_CCS],
3242 env->pregs[PR_PID], env->pregs[PR_USP],
3243 env->regs[0], env->regs[1], env->regs[2], env->regs[3],
3244 env->regs[4], env->regs[5], env->regs[6], env->regs[7],
3245 env->regs[8], env->regs[9],
3246 env->regs[10], env->regs[11],
3247 env->regs[12], env->regs[13],
3248 env->regs[14], env->regs[15]);
3249 qemu_log("--------------\n");
3250 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3253 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3254 lj = -1;
3255 num_insns = 0;
3256 max_insns = tb->cflags & CF_COUNT_MASK;
3257 if (max_insns == 0)
3258 max_insns = CF_COUNT_MASK;
3260 gen_icount_start();
3263 check_breakpoint(env, dc);
3265 if (search_pc) {
3266 j = gen_opc_ptr - gen_opc_buf;
3267 if (lj < j) {
3268 lj++;
3269 while (lj < j)
3270 gen_opc_instr_start[lj++] = 0;
3272 if (dc->delayed_branch == 1)
3273 gen_opc_pc[lj] = dc->ppc | 1;
3274 else
3275 gen_opc_pc[lj] = dc->pc;
3276 gen_opc_instr_start[lj] = 1;
3277 gen_opc_icount[lj] = num_insns;
3280 /* Pretty disas. */
3281 LOG_DIS("%8.8x:\t", dc->pc);
3283 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3284 gen_io_start();
3285 dc->clear_x = 1;
3287 insn_len = dc->decoder(dc);
3288 dc->ppc = dc->pc;
3289 dc->pc += insn_len;
3290 if (dc->clear_x)
3291 cris_clear_x_flag(dc);
3293 num_insns++;
3294 /* Check for delayed branches here. If we do it before
3295 actually generating any host code, the simulator will just
3296 loop doing nothing for on this program location. */
3297 if (dc->delayed_branch) {
3298 dc->delayed_branch--;
3299 if (dc->delayed_branch == 0)
3301 if (tb->flags & 7)
3302 t_gen_mov_env_TN(dslot,
3303 tcg_const_tl(0));
3304 if (dc->jmp == JMP_DIRECT) {
3305 dc->is_jmp = DISAS_NEXT;
3306 } else {
3307 t_gen_cc_jmp(env_btarget,
3308 tcg_const_tl(dc->pc));
3309 dc->is_jmp = DISAS_JUMP;
3311 break;
3315 /* If we are rexecuting a branch due to exceptions on
3316 delay slots dont break. */
3317 if (!(tb->pc & 1) && env->singlestep_enabled)
3318 break;
3319 } while (!dc->is_jmp && !dc->cpustate_changed
3320 && gen_opc_ptr < gen_opc_end
3321 && !singlestep
3322 && (dc->pc < next_page_start)
3323 && num_insns < max_insns);
3325 if (dc->tb_flags != orig_flags) {
3326 dc->cpustate_changed = 1;
3329 if (dc->clear_locked_irq)
3330 t_gen_mov_env_TN(locked_irq, tcg_const_tl(0));
3332 npc = dc->pc;
3333 if (dc->jmp == JMP_DIRECT && !dc->delayed_branch)
3334 npc = dc->jmp_pc;
3336 if (tb->cflags & CF_LAST_IO)
3337 gen_io_end();
3338 /* Force an update if the per-tb cpu state has changed. */
3339 if (dc->is_jmp == DISAS_NEXT
3340 && (dc->cpustate_changed || !dc->flagx_known
3341 || (dc->flags_x != (tb->flags & X_FLAG)))) {
3342 dc->is_jmp = DISAS_UPDATE;
3343 tcg_gen_movi_tl(env_pc, npc);
3345 /* Broken branch+delayslot sequence. */
3346 if (dc->delayed_branch == 1) {
3347 /* Set env->dslot to the size of the branch insn. */
3348 t_gen_mov_env_TN(dslot, tcg_const_tl(dc->pc - dc->ppc));
3349 cris_store_direct_jmp(dc);
3352 cris_evaluate_flags (dc);
3354 if (unlikely(env->singlestep_enabled)) {
3355 if (dc->is_jmp == DISAS_NEXT)
3356 tcg_gen_movi_tl(env_pc, npc);
3357 t_gen_raise_exception(EXCP_DEBUG);
3358 } else {
3359 switch(dc->is_jmp) {
3360 case DISAS_NEXT:
3361 gen_goto_tb(dc, 1, npc);
3362 break;
3363 default:
3364 case DISAS_JUMP:
3365 case DISAS_UPDATE:
3366 /* indicate that the hash table must be used
3367 to find the next TB */
3368 tcg_gen_exit_tb(0);
3369 break;
3370 case DISAS_SWI:
3371 case DISAS_TB_JUMP:
3372 /* nothing more to generate */
3373 break;
3376 gen_icount_end(tb, num_insns);
3377 *gen_opc_ptr = INDEX_op_end;
3378 if (search_pc) {
3379 j = gen_opc_ptr - gen_opc_buf;
3380 lj++;
3381 while (lj <= j)
3382 gen_opc_instr_start[lj++] = 0;
3383 } else {
3384 tb->size = dc->pc - pc_start;
3385 tb->icount = num_insns;
3388 #ifdef DEBUG_DISAS
3389 #if !DISAS_CRIS
3390 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3391 log_target_disas(pc_start, dc->pc - pc_start,
3392 dc->env->pregs[PR_VR]);
3393 qemu_log("\nisize=%d osize=%zd\n",
3394 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
3396 #endif
3397 #endif
3400 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3402 gen_intermediate_code_internal(env, tb, 0);
3405 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3407 gen_intermediate_code_internal(env, tb, 1);
3410 void cpu_dump_state (CPUState *env, FILE *f,
3411 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
3412 int flags)
3414 int i;
3415 uint32_t srs;
3417 if (!env || !f)
3418 return;
3420 cpu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n"
3421 "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
3422 env->pc, env->pregs[PR_CCS], env->btaken, env->btarget,
3423 env->cc_op,
3424 env->cc_src, env->cc_dest, env->cc_result, env->cc_mask);
3427 for (i = 0; i < 16; i++) {
3428 cpu_fprintf(f, "%s=%8.8x ",regnames[i], env->regs[i]);
3429 if ((i + 1) % 4 == 0)
3430 cpu_fprintf(f, "\n");
3432 cpu_fprintf(f, "\nspecial regs:\n");
3433 for (i = 0; i < 16; i++) {
3434 cpu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]);
3435 if ((i + 1) % 4 == 0)
3436 cpu_fprintf(f, "\n");
3438 srs = env->pregs[PR_SRS];
3439 cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs);
3440 if (srs < 256) {
3441 for (i = 0; i < 16; i++) {
3442 cpu_fprintf(f, "s%2.2d=%8.8x ",
3443 i, env->sregs[srs][i]);
3444 if ((i + 1) % 4 == 0)
3445 cpu_fprintf(f, "\n");
3448 cpu_fprintf(f, "\n\n");
3452 struct
3454 uint32_t vr;
3455 const char *name;
3456 } cris_cores[] = {
3457 {8, "crisv8"},
3458 {9, "crisv9"},
3459 {10, "crisv10"},
3460 {11, "crisv11"},
3461 {32, "crisv32"},
3464 void cris_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3466 unsigned int i;
3468 (*cpu_fprintf)(f, "Available CPUs:\n");
3469 for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3470 (*cpu_fprintf)(f, " %s\n", cris_cores[i].name);
3474 static uint32_t vr_by_name(const char *name)
3476 unsigned int i;
3477 for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3478 if (strcmp(name, cris_cores[i].name) == 0) {
3479 return cris_cores[i].vr;
3482 return 32;
3485 CPUCRISState *cpu_cris_init (const char *cpu_model)
3487 CPUCRISState *env;
3488 static int tcg_initialized = 0;
3489 int i;
3491 env = qemu_mallocz(sizeof(CPUCRISState));
3493 env->pregs[PR_VR] = vr_by_name(cpu_model);
3494 cpu_exec_init(env);
3495 cpu_reset(env);
3496 qemu_init_vcpu(env);
3498 if (tcg_initialized)
3499 return env;
3501 tcg_initialized = 1;
3503 #define GEN_HELPER 2
3504 #include "helper.h"
3506 if (env->pregs[PR_VR] < 32) {
3507 cpu_crisv10_init(env);
3508 return env;
3512 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
3513 cc_x = tcg_global_mem_new(TCG_AREG0,
3514 offsetof(CPUState, cc_x), "cc_x");
3515 cc_src = tcg_global_mem_new(TCG_AREG0,
3516 offsetof(CPUState, cc_src), "cc_src");
3517 cc_dest = tcg_global_mem_new(TCG_AREG0,
3518 offsetof(CPUState, cc_dest),
3519 "cc_dest");
3520 cc_result = tcg_global_mem_new(TCG_AREG0,
3521 offsetof(CPUState, cc_result),
3522 "cc_result");
3523 cc_op = tcg_global_mem_new(TCG_AREG0,
3524 offsetof(CPUState, cc_op), "cc_op");
3525 cc_size = tcg_global_mem_new(TCG_AREG0,
3526 offsetof(CPUState, cc_size),
3527 "cc_size");
3528 cc_mask = tcg_global_mem_new(TCG_AREG0,
3529 offsetof(CPUState, cc_mask),
3530 "cc_mask");
3532 env_pc = tcg_global_mem_new(TCG_AREG0,
3533 offsetof(CPUState, pc),
3534 "pc");
3535 env_btarget = tcg_global_mem_new(TCG_AREG0,
3536 offsetof(CPUState, btarget),
3537 "btarget");
3538 env_btaken = tcg_global_mem_new(TCG_AREG0,
3539 offsetof(CPUState, btaken),
3540 "btaken");
3541 for (i = 0; i < 16; i++) {
3542 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
3543 offsetof(CPUState, regs[i]),
3544 regnames[i]);
3546 for (i = 0; i < 16; i++) {
3547 cpu_PR[i] = tcg_global_mem_new(TCG_AREG0,
3548 offsetof(CPUState, pregs[i]),
3549 pregnames[i]);
3552 return env;
3555 void cpu_reset (CPUCRISState *env)
3557 uint32_t vr;
3559 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
3560 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
3561 log_cpu_state(env, 0);
3564 vr = env->pregs[PR_VR];
3565 memset(env, 0, offsetof(CPUCRISState, breakpoints));
3566 env->pregs[PR_VR] = vr;
3567 tlb_flush(env, 1);
3569 #if defined(CONFIG_USER_ONLY)
3570 /* start in user mode with interrupts enabled. */
3571 env->pregs[PR_CCS] |= U_FLAG | I_FLAG | P_FLAG;
3572 #else
3573 cris_mmu_init(env);
3574 env->pregs[PR_CCS] = 0;
3575 #endif
3578 void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
3579 unsigned long searched_pc, int pc_pos, void *puc)
3581 env->pc = gen_opc_pc[pc_pos];