serial: fix lost character after sysrq
[qemu-kvm/fedora.git] / target-cris / translate.c
blob7801264d824d335f2b19c146cf4915ff5aaa6d2a
1 /*
2 * CRIS emulation for qemu: main translation routines.
4 * Copyright (c) 2008 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
23 * FIXME:
24 * The condition code translation is in need of attention.
27 #include <stdarg.h>
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <inttypes.h>
32 #include <assert.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "disas.h"
37 #include "tcg-op.h"
38 #include "helper.h"
39 #include "mmu.h"
40 #include "crisv32-decode.h"
41 #include "qemu-common.h"
43 #define GEN_HELPER 1
44 #include "helper.h"
46 #define DISAS_CRIS 0
47 #if DISAS_CRIS
48 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
49 #else
50 # define LOG_DIS(...) do { } while (0)
51 #endif
53 #define D(x)
54 #define BUG() (gen_BUG(dc, __FILE__, __LINE__))
55 #define BUG_ON(x) ({if (x) BUG();})
57 #define DISAS_SWI 5
59 /* Used by the decoder. */
60 #define EXTRACT_FIELD(src, start, end) \
61 (((src) >> start) & ((1 << (end - start + 1)) - 1))
63 #define CC_MASK_NZ 0xc
64 #define CC_MASK_NZV 0xe
65 #define CC_MASK_NZVC 0xf
66 #define CC_MASK_RNZV 0x10e
68 static TCGv_ptr cpu_env;
69 static TCGv cpu_R[16];
70 static TCGv cpu_PR[16];
71 static TCGv cc_x;
72 static TCGv cc_src;
73 static TCGv cc_dest;
74 static TCGv cc_result;
75 static TCGv cc_op;
76 static TCGv cc_size;
77 static TCGv cc_mask;
79 static TCGv env_btaken;
80 static TCGv env_btarget;
81 static TCGv env_pc;
83 #include "gen-icount.h"
85 /* This is the state at translation time. */
86 typedef struct DisasContext {
87 CPUState *env;
88 target_ulong pc, ppc;
90 /* Decoder. */
91 uint32_t ir;
92 uint32_t opcode;
93 unsigned int op1;
94 unsigned int op2;
95 unsigned int zsize, zzsize;
96 unsigned int mode;
97 unsigned int postinc;
99 int update_cc;
100 int cc_op;
101 int cc_size;
102 uint32_t cc_mask;
104 int cc_size_uptodate; /* -1 invalid or last written value. */
106 int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not uptodate. */
107 int flags_uptodate; /* Wether or not $ccs is uptodate. */
108 int flagx_known; /* Wether or not flags_x has the x flag known at
109 translation time. */
110 int flags_x;
112 int clear_x; /* Clear x after this insn? */
113 int cpustate_changed;
114 unsigned int tb_flags; /* tb dependent flags. */
115 int is_jmp;
117 #define JMP_NOJMP 0
118 #define JMP_DIRECT 1
119 #define JMP_INDIRECT 2
120 int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
121 uint32_t jmp_pc;
123 int delayed_branch;
125 struct TranslationBlock *tb;
126 int singlestep_enabled;
127 } DisasContext;
129 static void gen_BUG(DisasContext *dc, const char *file, int line)
131 printf ("BUG: pc=%x %s %d\n", dc->pc, file, line);
132 qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line);
133 cpu_abort(dc->env, "%s:%d\n", file, line);
136 static const char *regnames[] =
138 "$r0", "$r1", "$r2", "$r3",
139 "$r4", "$r5", "$r6", "$r7",
140 "$r8", "$r9", "$r10", "$r11",
141 "$r12", "$r13", "$sp", "$acr",
143 static const char *pregnames[] =
145 "$bz", "$vr", "$pid", "$srs",
146 "$wz", "$exs", "$eda", "$mof",
147 "$dz", "$ebp", "$erp", "$srp",
148 "$nrp", "$ccs", "$usp", "$spc",
151 /* We need this table to handle preg-moves with implicit width. */
152 static int preg_sizes[] = {
153 1, /* bz. */
154 1, /* vr. */
155 4, /* pid. */
156 1, /* srs. */
157 2, /* wz. */
158 4, 4, 4,
159 4, 4, 4, 4,
160 4, 4, 4, 4,
163 #define t_gen_mov_TN_env(tn, member) \
164 _t_gen_mov_TN_env((tn), offsetof(CPUState, member))
165 #define t_gen_mov_env_TN(member, tn) \
166 _t_gen_mov_env_TN(offsetof(CPUState, member), (tn))
168 static inline void t_gen_mov_TN_reg(TCGv tn, int r)
170 if (r < 0 || r > 15)
171 fprintf(stderr, "wrong register read $r%d\n", r);
172 tcg_gen_mov_tl(tn, cpu_R[r]);
174 static inline void t_gen_mov_reg_TN(int r, TCGv tn)
176 if (r < 0 || r > 15)
177 fprintf(stderr, "wrong register write $r%d\n", r);
178 tcg_gen_mov_tl(cpu_R[r], tn);
181 static inline void _t_gen_mov_TN_env(TCGv tn, int offset)
183 if (offset > sizeof (CPUState))
184 fprintf(stderr, "wrong load from env from off=%d\n", offset);
185 tcg_gen_ld_tl(tn, cpu_env, offset);
187 static inline void _t_gen_mov_env_TN(int offset, TCGv tn)
189 if (offset > sizeof (CPUState))
190 fprintf(stderr, "wrong store to env at off=%d\n", offset);
191 tcg_gen_st_tl(tn, cpu_env, offset);
194 static inline void t_gen_mov_TN_preg(TCGv tn, int r)
196 if (r < 0 || r > 15)
197 fprintf(stderr, "wrong register read $p%d\n", r);
198 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
199 tcg_gen_mov_tl(tn, tcg_const_tl(0));
200 else if (r == PR_VR)
201 tcg_gen_mov_tl(tn, tcg_const_tl(32));
202 else if (r == PR_EDA) {
203 printf("read from EDA!\n");
204 tcg_gen_mov_tl(tn, cpu_PR[r]);
206 else
207 tcg_gen_mov_tl(tn, cpu_PR[r]);
209 static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
211 if (r < 0 || r > 15)
212 fprintf(stderr, "wrong register write $p%d\n", r);
213 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
214 return;
215 else if (r == PR_SRS)
216 tcg_gen_andi_tl(cpu_PR[r], tn, 3);
217 else {
218 if (r == PR_PID)
219 gen_helper_tlb_flush_pid(tn);
220 if (dc->tb_flags & S_FLAG && r == PR_SPC)
221 gen_helper_spc_write(tn);
222 else if (r == PR_CCS)
223 dc->cpustate_changed = 1;
224 tcg_gen_mov_tl(cpu_PR[r], tn);
228 static inline void t_gen_raise_exception(uint32_t index)
230 TCGv_i32 tmp = tcg_const_i32(index);
231 gen_helper_raise_exception(tmp);
232 tcg_temp_free_i32(tmp);
235 static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
237 TCGv t0, t_31;
239 t0 = tcg_temp_new();
240 t_31 = tcg_const_tl(31);
241 tcg_gen_shl_tl(d, a, b);
243 tcg_gen_sub_tl(t0, t_31, b);
244 tcg_gen_sar_tl(t0, t0, t_31);
245 tcg_gen_and_tl(t0, t0, d);
246 tcg_gen_xor_tl(d, d, t0);
247 tcg_temp_free(t0);
248 tcg_temp_free(t_31);
251 static void t_gen_lsr(TCGv d, TCGv a, TCGv b)
253 TCGv t0, t_31;
255 t0 = tcg_temp_new();
256 t_31 = tcg_temp_new();
257 tcg_gen_shr_tl(d, a, b);
259 tcg_gen_movi_tl(t_31, 31);
260 tcg_gen_sub_tl(t0, t_31, b);
261 tcg_gen_sar_tl(t0, t0, t_31);
262 tcg_gen_and_tl(t0, t0, d);
263 tcg_gen_xor_tl(d, d, t0);
264 tcg_temp_free(t0);
265 tcg_temp_free(t_31);
268 static void t_gen_asr(TCGv d, TCGv a, TCGv b)
270 TCGv t0, t_31;
272 t0 = tcg_temp_new();
273 t_31 = tcg_temp_new();
274 tcg_gen_sar_tl(d, a, b);
276 tcg_gen_movi_tl(t_31, 31);
277 tcg_gen_sub_tl(t0, t_31, b);
278 tcg_gen_sar_tl(t0, t0, t_31);
279 tcg_gen_or_tl(d, d, t0);
280 tcg_temp_free(t0);
281 tcg_temp_free(t_31);
284 /* 64-bit signed mul, lower result in d and upper in d2. */
285 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
287 TCGv_i64 t0, t1;
289 t0 = tcg_temp_new_i64();
290 t1 = tcg_temp_new_i64();
292 tcg_gen_ext_i32_i64(t0, a);
293 tcg_gen_ext_i32_i64(t1, b);
294 tcg_gen_mul_i64(t0, t0, t1);
296 tcg_gen_trunc_i64_i32(d, t0);
297 tcg_gen_shri_i64(t0, t0, 32);
298 tcg_gen_trunc_i64_i32(d2, t0);
300 tcg_temp_free_i64(t0);
301 tcg_temp_free_i64(t1);
304 /* 64-bit unsigned muls, lower result in d and upper in d2. */
305 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
307 TCGv_i64 t0, t1;
309 t0 = tcg_temp_new_i64();
310 t1 = tcg_temp_new_i64();
312 tcg_gen_extu_i32_i64(t0, a);
313 tcg_gen_extu_i32_i64(t1, b);
314 tcg_gen_mul_i64(t0, t0, t1);
316 tcg_gen_trunc_i64_i32(d, t0);
317 tcg_gen_shri_i64(t0, t0, 32);
318 tcg_gen_trunc_i64_i32(d2, t0);
320 tcg_temp_free_i64(t0);
321 tcg_temp_free_i64(t1);
324 static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
326 int l1;
328 l1 = gen_new_label();
331 * d <<= 1
332 * if (d >= s)
333 * d -= s;
335 tcg_gen_shli_tl(d, a, 1);
336 tcg_gen_brcond_tl(TCG_COND_LTU, d, b, l1);
337 tcg_gen_sub_tl(d, d, b);
338 gen_set_label(l1);
341 /* Extended arithmetics on CRIS. */
342 static inline void t_gen_add_flag(TCGv d, int flag)
344 TCGv c;
346 c = tcg_temp_new();
347 t_gen_mov_TN_preg(c, PR_CCS);
348 /* Propagate carry into d. */
349 tcg_gen_andi_tl(c, c, 1 << flag);
350 if (flag)
351 tcg_gen_shri_tl(c, c, flag);
352 tcg_gen_add_tl(d, d, c);
353 tcg_temp_free(c);
356 static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
358 if (dc->flagx_known) {
359 if (dc->flags_x) {
360 TCGv c;
362 c = tcg_temp_new();
363 t_gen_mov_TN_preg(c, PR_CCS);
364 /* C flag is already at bit 0. */
365 tcg_gen_andi_tl(c, c, C_FLAG);
366 tcg_gen_add_tl(d, d, c);
367 tcg_temp_free(c);
369 } else {
370 TCGv x, c;
372 x = tcg_temp_new();
373 c = tcg_temp_new();
374 t_gen_mov_TN_preg(x, PR_CCS);
375 tcg_gen_mov_tl(c, x);
377 /* Propagate carry into d if X is set. Branch free. */
378 tcg_gen_andi_tl(c, c, C_FLAG);
379 tcg_gen_andi_tl(x, x, X_FLAG);
380 tcg_gen_shri_tl(x, x, 4);
382 tcg_gen_and_tl(x, x, c);
383 tcg_gen_add_tl(d, d, x);
384 tcg_temp_free(x);
385 tcg_temp_free(c);
389 static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
391 if (dc->flagx_known) {
392 if (dc->flags_x) {
393 TCGv c;
395 c = tcg_temp_new();
396 t_gen_mov_TN_preg(c, PR_CCS);
397 /* C flag is already at bit 0. */
398 tcg_gen_andi_tl(c, c, C_FLAG);
399 tcg_gen_sub_tl(d, d, c);
400 tcg_temp_free(c);
402 } else {
403 TCGv x, c;
405 x = tcg_temp_new();
406 c = tcg_temp_new();
407 t_gen_mov_TN_preg(x, PR_CCS);
408 tcg_gen_mov_tl(c, x);
410 /* Propagate carry into d if X is set. Branch free. */
411 tcg_gen_andi_tl(c, c, C_FLAG);
412 tcg_gen_andi_tl(x, x, X_FLAG);
413 tcg_gen_shri_tl(x, x, 4);
415 tcg_gen_and_tl(x, x, c);
416 tcg_gen_sub_tl(d, d, x);
417 tcg_temp_free(x);
418 tcg_temp_free(c);
422 /* Swap the two bytes within each half word of the s operand.
423 T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
424 static inline void t_gen_swapb(TCGv d, TCGv s)
426 TCGv t, org_s;
428 t = tcg_temp_new();
429 org_s = tcg_temp_new();
431 /* d and s may refer to the same object. */
432 tcg_gen_mov_tl(org_s, s);
433 tcg_gen_shli_tl(t, org_s, 8);
434 tcg_gen_andi_tl(d, t, 0xff00ff00);
435 tcg_gen_shri_tl(t, org_s, 8);
436 tcg_gen_andi_tl(t, t, 0x00ff00ff);
437 tcg_gen_or_tl(d, d, t);
438 tcg_temp_free(t);
439 tcg_temp_free(org_s);
442 /* Swap the halfwords of the s operand. */
443 static inline void t_gen_swapw(TCGv d, TCGv s)
445 TCGv t;
446 /* d and s refer the same object. */
447 t = tcg_temp_new();
448 tcg_gen_mov_tl(t, s);
449 tcg_gen_shli_tl(d, t, 16);
450 tcg_gen_shri_tl(t, t, 16);
451 tcg_gen_or_tl(d, d, t);
452 tcg_temp_free(t);
455 /* Reverse the within each byte.
456 T0 = (((T0 << 7) & 0x80808080) |
457 ((T0 << 5) & 0x40404040) |
458 ((T0 << 3) & 0x20202020) |
459 ((T0 << 1) & 0x10101010) |
460 ((T0 >> 1) & 0x08080808) |
461 ((T0 >> 3) & 0x04040404) |
462 ((T0 >> 5) & 0x02020202) |
463 ((T0 >> 7) & 0x01010101));
465 static inline void t_gen_swapr(TCGv d, TCGv s)
467 struct {
468 int shift; /* LSL when positive, LSR when negative. */
469 uint32_t mask;
470 } bitrev [] = {
471 {7, 0x80808080},
472 {5, 0x40404040},
473 {3, 0x20202020},
474 {1, 0x10101010},
475 {-1, 0x08080808},
476 {-3, 0x04040404},
477 {-5, 0x02020202},
478 {-7, 0x01010101}
480 int i;
481 TCGv t, org_s;
483 /* d and s refer the same object. */
484 t = tcg_temp_new();
485 org_s = tcg_temp_new();
486 tcg_gen_mov_tl(org_s, s);
488 tcg_gen_shli_tl(t, org_s, bitrev[0].shift);
489 tcg_gen_andi_tl(d, t, bitrev[0].mask);
490 for (i = 1; i < ARRAY_SIZE(bitrev); i++) {
491 if (bitrev[i].shift >= 0) {
492 tcg_gen_shli_tl(t, org_s, bitrev[i].shift);
493 } else {
494 tcg_gen_shri_tl(t, org_s, -bitrev[i].shift);
496 tcg_gen_andi_tl(t, t, bitrev[i].mask);
497 tcg_gen_or_tl(d, d, t);
499 tcg_temp_free(t);
500 tcg_temp_free(org_s);
503 static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
505 TCGv btaken;
506 int l1;
508 l1 = gen_new_label();
509 btaken = tcg_temp_new();
511 /* Conditional jmp. */
512 tcg_gen_mov_tl(btaken, env_btaken);
513 tcg_gen_mov_tl(env_pc, pc_false);
514 tcg_gen_brcondi_tl(TCG_COND_EQ, btaken, 0, l1);
515 tcg_gen_mov_tl(env_pc, pc_true);
516 gen_set_label(l1);
518 tcg_temp_free(btaken);
521 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
523 TranslationBlock *tb;
524 tb = dc->tb;
525 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
526 tcg_gen_goto_tb(n);
527 tcg_gen_movi_tl(env_pc, dest);
528 tcg_gen_exit_tb((long)tb + n);
529 } else {
530 tcg_gen_movi_tl(env_pc, dest);
531 tcg_gen_exit_tb(0);
535 /* Sign extend at translation time. */
536 static int sign_extend(unsigned int val, unsigned int width)
538 int sval;
540 /* LSL. */
541 val <<= 31 - width;
542 sval = val;
543 /* ASR. */
544 sval >>= 31 - width;
545 return sval;
548 static inline void cris_clear_x_flag(DisasContext *dc)
550 if (dc->flagx_known && dc->flags_x)
551 dc->flags_uptodate = 0;
553 dc->flagx_known = 1;
554 dc->flags_x = 0;
557 static void cris_flush_cc_state(DisasContext *dc)
559 if (dc->cc_size_uptodate != dc->cc_size) {
560 tcg_gen_movi_tl(cc_size, dc->cc_size);
561 dc->cc_size_uptodate = dc->cc_size;
563 tcg_gen_movi_tl(cc_op, dc->cc_op);
564 tcg_gen_movi_tl(cc_mask, dc->cc_mask);
567 static void cris_evaluate_flags(DisasContext *dc)
569 if (dc->flags_uptodate)
570 return;
572 cris_flush_cc_state(dc);
574 switch (dc->cc_op)
576 case CC_OP_MCP:
577 gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS],
578 cpu_PR[PR_CCS], cc_src,
579 cc_dest, cc_result);
580 break;
581 case CC_OP_MULS:
582 gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS],
583 cpu_PR[PR_CCS], cc_result,
584 cpu_PR[PR_MOF]);
585 break;
586 case CC_OP_MULU:
587 gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS],
588 cpu_PR[PR_CCS], cc_result,
589 cpu_PR[PR_MOF]);
590 break;
591 case CC_OP_MOVE:
592 case CC_OP_AND:
593 case CC_OP_OR:
594 case CC_OP_XOR:
595 case CC_OP_ASR:
596 case CC_OP_LSR:
597 case CC_OP_LSL:
598 switch (dc->cc_size)
600 case 4:
601 gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
602 cpu_PR[PR_CCS], cc_result);
603 break;
604 case 2:
605 gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
606 cpu_PR[PR_CCS], cc_result);
607 break;
608 default:
609 gen_helper_evaluate_flags();
610 break;
612 break;
613 case CC_OP_FLAGS:
614 /* live. */
615 break;
616 case CC_OP_SUB:
617 case CC_OP_CMP:
618 if (dc->cc_size == 4)
619 gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS],
620 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
621 else
622 gen_helper_evaluate_flags();
624 break;
625 default:
626 switch (dc->cc_size)
628 case 4:
629 gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS],
630 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
631 break;
632 default:
633 gen_helper_evaluate_flags();
634 break;
636 break;
639 if (dc->flagx_known) {
640 if (dc->flags_x)
641 tcg_gen_ori_tl(cpu_PR[PR_CCS],
642 cpu_PR[PR_CCS], X_FLAG);
643 else
644 tcg_gen_andi_tl(cpu_PR[PR_CCS],
645 cpu_PR[PR_CCS], ~X_FLAG);
647 dc->flags_uptodate = 1;
650 static void cris_cc_mask(DisasContext *dc, unsigned int mask)
652 uint32_t ovl;
654 if (!mask) {
655 dc->update_cc = 0;
656 return;
659 /* Check if we need to evaluate the condition codes due to
660 CC overlaying. */
661 ovl = (dc->cc_mask ^ mask) & ~mask;
662 if (ovl) {
663 /* TODO: optimize this case. It trigs all the time. */
664 cris_evaluate_flags (dc);
666 dc->cc_mask = mask;
667 dc->update_cc = 1;
670 static void cris_update_cc_op(DisasContext *dc, int op, int size)
672 dc->cc_op = op;
673 dc->cc_size = size;
674 dc->flags_uptodate = 0;
677 static inline void cris_update_cc_x(DisasContext *dc)
679 /* Save the x flag state at the time of the cc snapshot. */
680 if (dc->flagx_known) {
681 if (dc->cc_x_uptodate == (2 | dc->flags_x))
682 return;
683 tcg_gen_movi_tl(cc_x, dc->flags_x);
684 dc->cc_x_uptodate = 2 | dc->flags_x;
686 else {
687 tcg_gen_andi_tl(cc_x, cpu_PR[PR_CCS], X_FLAG);
688 dc->cc_x_uptodate = 1;
692 /* Update cc prior to executing ALU op. Needs source operands untouched. */
693 static void cris_pre_alu_update_cc(DisasContext *dc, int op,
694 TCGv dst, TCGv src, int size)
696 if (dc->update_cc) {
697 cris_update_cc_op(dc, op, size);
698 tcg_gen_mov_tl(cc_src, src);
700 if (op != CC_OP_MOVE
701 && op != CC_OP_AND
702 && op != CC_OP_OR
703 && op != CC_OP_XOR
704 && op != CC_OP_ASR
705 && op != CC_OP_LSR
706 && op != CC_OP_LSL)
707 tcg_gen_mov_tl(cc_dest, dst);
709 cris_update_cc_x(dc);
713 /* Update cc after executing ALU op. needs the result. */
714 static inline void cris_update_result(DisasContext *dc, TCGv res)
716 if (dc->update_cc)
717 tcg_gen_mov_tl(cc_result, res);
720 /* Returns one if the write back stage should execute. */
721 static void cris_alu_op_exec(DisasContext *dc, int op,
722 TCGv dst, TCGv a, TCGv b, int size)
724 /* Emit the ALU insns. */
725 switch (op)
727 case CC_OP_ADD:
728 tcg_gen_add_tl(dst, a, b);
729 /* Extended arithmetics. */
730 t_gen_addx_carry(dc, dst);
731 break;
732 case CC_OP_ADDC:
733 tcg_gen_add_tl(dst, a, b);
734 t_gen_add_flag(dst, 0); /* C_FLAG. */
735 break;
736 case CC_OP_MCP:
737 tcg_gen_add_tl(dst, a, b);
738 t_gen_add_flag(dst, 8); /* R_FLAG. */
739 break;
740 case CC_OP_SUB:
741 tcg_gen_sub_tl(dst, a, b);
742 /* Extended arithmetics. */
743 t_gen_subx_carry(dc, dst);
744 break;
745 case CC_OP_MOVE:
746 tcg_gen_mov_tl(dst, b);
747 break;
748 case CC_OP_OR:
749 tcg_gen_or_tl(dst, a, b);
750 break;
751 case CC_OP_AND:
752 tcg_gen_and_tl(dst, a, b);
753 break;
754 case CC_OP_XOR:
755 tcg_gen_xor_tl(dst, a, b);
756 break;
757 case CC_OP_LSL:
758 t_gen_lsl(dst, a, b);
759 break;
760 case CC_OP_LSR:
761 t_gen_lsr(dst, a, b);
762 break;
763 case CC_OP_ASR:
764 t_gen_asr(dst, a, b);
765 break;
766 case CC_OP_NEG:
767 tcg_gen_neg_tl(dst, b);
768 /* Extended arithmetics. */
769 t_gen_subx_carry(dc, dst);
770 break;
771 case CC_OP_LZ:
772 gen_helper_lz(dst, b);
773 break;
774 case CC_OP_MULS:
775 t_gen_muls(dst, cpu_PR[PR_MOF], a, b);
776 break;
777 case CC_OP_MULU:
778 t_gen_mulu(dst, cpu_PR[PR_MOF], a, b);
779 break;
780 case CC_OP_DSTEP:
781 t_gen_cris_dstep(dst, a, b);
782 break;
783 case CC_OP_BOUND:
785 int l1;
786 l1 = gen_new_label();
787 tcg_gen_mov_tl(dst, a);
788 tcg_gen_brcond_tl(TCG_COND_LEU, a, b, l1);
789 tcg_gen_mov_tl(dst, b);
790 gen_set_label(l1);
792 break;
793 case CC_OP_CMP:
794 tcg_gen_sub_tl(dst, a, b);
795 /* Extended arithmetics. */
796 t_gen_subx_carry(dc, dst);
797 break;
798 default:
799 qemu_log("illegal ALU op.\n");
800 BUG();
801 break;
804 if (size == 1)
805 tcg_gen_andi_tl(dst, dst, 0xff);
806 else if (size == 2)
807 tcg_gen_andi_tl(dst, dst, 0xffff);
810 static void cris_alu(DisasContext *dc, int op,
811 TCGv d, TCGv op_a, TCGv op_b, int size)
813 TCGv tmp;
814 int writeback;
816 writeback = 1;
818 if (op == CC_OP_CMP) {
819 tmp = tcg_temp_new();
820 writeback = 0;
821 } else if (size == 4) {
822 tmp = d;
823 writeback = 0;
824 } else
825 tmp = tcg_temp_new();
828 cris_pre_alu_update_cc(dc, op, op_a, op_b, size);
829 cris_alu_op_exec(dc, op, tmp, op_a, op_b, size);
830 cris_update_result(dc, tmp);
832 /* Writeback. */
833 if (writeback) {
834 if (size == 1)
835 tcg_gen_andi_tl(d, d, ~0xff);
836 else
837 tcg_gen_andi_tl(d, d, ~0xffff);
838 tcg_gen_or_tl(d, d, tmp);
840 if (!TCGV_EQUAL(tmp, d))
841 tcg_temp_free(tmp);
844 static int arith_cc(DisasContext *dc)
846 if (dc->update_cc) {
847 switch (dc->cc_op) {
848 case CC_OP_ADDC: return 1;
849 case CC_OP_ADD: return 1;
850 case CC_OP_SUB: return 1;
851 case CC_OP_DSTEP: return 1;
852 case CC_OP_LSL: return 1;
853 case CC_OP_LSR: return 1;
854 case CC_OP_ASR: return 1;
855 case CC_OP_CMP: return 1;
856 case CC_OP_NEG: return 1;
857 case CC_OP_OR: return 1;
858 case CC_OP_AND: return 1;
859 case CC_OP_XOR: return 1;
860 case CC_OP_MULU: return 1;
861 case CC_OP_MULS: return 1;
862 default:
863 return 0;
866 return 0;
869 static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
871 int arith_opt, move_opt;
873 /* TODO: optimize more condition codes. */
876 * If the flags are live, we've gotta look into the bits of CCS.
877 * Otherwise, if we just did an arithmetic operation we try to
878 * evaluate the condition code faster.
880 * When this function is done, T0 should be non-zero if the condition
881 * code is true.
883 arith_opt = arith_cc(dc) && !dc->flags_uptodate;
884 move_opt = (dc->cc_op == CC_OP_MOVE);
885 switch (cond) {
886 case CC_EQ:
887 if (arith_opt || move_opt) {
888 /* If cc_result is zero, T0 should be
889 non-zero otherwise T0 should be zero. */
890 int l1;
891 l1 = gen_new_label();
892 tcg_gen_movi_tl(cc, 0);
893 tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
894 0, l1);
895 tcg_gen_movi_tl(cc, 1);
896 gen_set_label(l1);
898 else {
899 cris_evaluate_flags(dc);
900 tcg_gen_andi_tl(cc,
901 cpu_PR[PR_CCS], Z_FLAG);
903 break;
904 case CC_NE:
905 if (arith_opt || move_opt)
906 tcg_gen_mov_tl(cc, cc_result);
907 else {
908 cris_evaluate_flags(dc);
909 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
910 Z_FLAG);
911 tcg_gen_andi_tl(cc, cc, Z_FLAG);
913 break;
914 case CC_CS:
915 cris_evaluate_flags(dc);
916 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG);
917 break;
918 case CC_CC:
919 cris_evaluate_flags(dc);
920 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG);
921 tcg_gen_andi_tl(cc, cc, C_FLAG);
922 break;
923 case CC_VS:
924 cris_evaluate_flags(dc);
925 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG);
926 break;
927 case CC_VC:
928 cris_evaluate_flags(dc);
929 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
930 V_FLAG);
931 tcg_gen_andi_tl(cc, cc, V_FLAG);
932 break;
933 case CC_PL:
934 if (arith_opt || move_opt) {
935 int bits = 31;
937 if (dc->cc_size == 1)
938 bits = 7;
939 else if (dc->cc_size == 2)
940 bits = 15;
942 tcg_gen_shri_tl(cc, cc_result, bits);
943 tcg_gen_xori_tl(cc, cc, 1);
944 } else {
945 cris_evaluate_flags(dc);
946 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
947 N_FLAG);
948 tcg_gen_andi_tl(cc, cc, N_FLAG);
950 break;
951 case CC_MI:
952 if (arith_opt || move_opt) {
953 int bits = 31;
955 if (dc->cc_size == 1)
956 bits = 7;
957 else if (dc->cc_size == 2)
958 bits = 15;
960 tcg_gen_shri_tl(cc, cc_result, bits);
961 tcg_gen_andi_tl(cc, cc, 1);
963 else {
964 cris_evaluate_flags(dc);
965 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
966 N_FLAG);
968 break;
969 case CC_LS:
970 cris_evaluate_flags(dc);
971 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
972 C_FLAG | Z_FLAG);
973 break;
974 case CC_HI:
975 cris_evaluate_flags(dc);
977 TCGv tmp;
979 tmp = tcg_temp_new();
980 tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS],
981 C_FLAG | Z_FLAG);
982 /* Overlay the C flag on top of the Z. */
983 tcg_gen_shli_tl(cc, tmp, 2);
984 tcg_gen_and_tl(cc, tmp, cc);
985 tcg_gen_andi_tl(cc, cc, Z_FLAG);
987 tcg_temp_free(tmp);
989 break;
990 case CC_GE:
991 cris_evaluate_flags(dc);
992 /* Overlay the V flag on top of the N. */
993 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
994 tcg_gen_xor_tl(cc,
995 cpu_PR[PR_CCS], cc);
996 tcg_gen_andi_tl(cc, cc, N_FLAG);
997 tcg_gen_xori_tl(cc, cc, N_FLAG);
998 break;
999 case CC_LT:
1000 cris_evaluate_flags(dc);
1001 /* Overlay the V flag on top of the N. */
1002 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1003 tcg_gen_xor_tl(cc,
1004 cpu_PR[PR_CCS], cc);
1005 tcg_gen_andi_tl(cc, cc, N_FLAG);
1006 break;
1007 case CC_GT:
1008 cris_evaluate_flags(dc);
1010 TCGv n, z;
1012 n = tcg_temp_new();
1013 z = tcg_temp_new();
1015 /* To avoid a shift we overlay everything on
1016 the V flag. */
1017 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1018 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1019 /* invert Z. */
1020 tcg_gen_xori_tl(z, z, 2);
1022 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1023 tcg_gen_xori_tl(n, n, 2);
1024 tcg_gen_and_tl(cc, z, n);
1025 tcg_gen_andi_tl(cc, cc, 2);
1027 tcg_temp_free(n);
1028 tcg_temp_free(z);
1030 break;
1031 case CC_LE:
1032 cris_evaluate_flags(dc);
1034 TCGv n, z;
1036 n = tcg_temp_new();
1037 z = tcg_temp_new();
1039 /* To avoid a shift we overlay everything on
1040 the V flag. */
1041 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1042 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1044 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1045 tcg_gen_or_tl(cc, z, n);
1046 tcg_gen_andi_tl(cc, cc, 2);
1048 tcg_temp_free(n);
1049 tcg_temp_free(z);
1051 break;
1052 case CC_P:
1053 cris_evaluate_flags(dc);
1054 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG);
1055 break;
1056 case CC_A:
1057 tcg_gen_movi_tl(cc, 1);
1058 break;
1059 default:
1060 BUG();
1061 break;
1065 static void cris_store_direct_jmp(DisasContext *dc)
1067 /* Store the direct jmp state into the cpu-state. */
1068 if (dc->jmp == JMP_DIRECT) {
1069 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1070 tcg_gen_movi_tl(env_btaken, 1);
1074 static void cris_prepare_cc_branch (DisasContext *dc,
1075 int offset, int cond)
1077 /* This helps us re-schedule the micro-code to insns in delay-slots
1078 before the actual jump. */
1079 dc->delayed_branch = 2;
1080 dc->jmp_pc = dc->pc + offset;
1082 if (cond != CC_A)
1084 dc->jmp = JMP_INDIRECT;
1085 gen_tst_cc (dc, env_btaken, cond);
1086 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1087 } else {
1088 /* Allow chaining. */
1089 dc->jmp = JMP_DIRECT;
1094 /* jumps, when the dest is in a live reg for example. Direct should be set
1095 when the dest addr is constant to allow tb chaining. */
1096 static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
1098 /* This helps us re-schedule the micro-code to insns in delay-slots
1099 before the actual jump. */
1100 dc->delayed_branch = 2;
1101 dc->jmp = type;
1102 if (type == JMP_INDIRECT)
1103 tcg_gen_movi_tl(env_btaken, 1);
1106 static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
1108 int mem_index = cpu_mmu_index(dc->env);
1110 /* If we get a fault on a delayslot we must keep the jmp state in
1111 the cpu-state to be able to re-execute the jmp. */
1112 if (dc->delayed_branch == 1)
1113 cris_store_direct_jmp(dc);
1115 tcg_gen_qemu_ld64(dst, addr, mem_index);
1118 static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
1119 unsigned int size, int sign)
1121 int mem_index = cpu_mmu_index(dc->env);
1123 /* If we get a fault on a delayslot we must keep the jmp state in
1124 the cpu-state to be able to re-execute the jmp. */
1125 if (dc->delayed_branch == 1)
1126 cris_store_direct_jmp(dc);
1128 if (size == 1) {
1129 if (sign)
1130 tcg_gen_qemu_ld8s(dst, addr, mem_index);
1131 else
1132 tcg_gen_qemu_ld8u(dst, addr, mem_index);
1134 else if (size == 2) {
1135 if (sign)
1136 tcg_gen_qemu_ld16s(dst, addr, mem_index);
1137 else
1138 tcg_gen_qemu_ld16u(dst, addr, mem_index);
1140 else if (size == 4) {
1141 tcg_gen_qemu_ld32u(dst, addr, mem_index);
1143 else {
1144 abort();
1148 static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
1149 unsigned int size)
1151 int mem_index = cpu_mmu_index(dc->env);
1153 /* If we get a fault on a delayslot we must keep the jmp state in
1154 the cpu-state to be able to re-execute the jmp. */
1155 if (dc->delayed_branch == 1)
1156 cris_store_direct_jmp(dc);
1159 /* Conditional writes. We only support the kind were X and P are known
1160 at translation time. */
1161 if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
1162 dc->postinc = 0;
1163 cris_evaluate_flags(dc);
1164 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
1165 return;
1168 if (size == 1)
1169 tcg_gen_qemu_st8(val, addr, mem_index);
1170 else if (size == 2)
1171 tcg_gen_qemu_st16(val, addr, mem_index);
1172 else
1173 tcg_gen_qemu_st32(val, addr, mem_index);
1175 if (dc->flagx_known && dc->flags_x) {
1176 cris_evaluate_flags(dc);
1177 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
1181 static inline void t_gen_sext(TCGv d, TCGv s, int size)
1183 if (size == 1)
1184 tcg_gen_ext8s_i32(d, s);
1185 else if (size == 2)
1186 tcg_gen_ext16s_i32(d, s);
1187 else if(!TCGV_EQUAL(d, s))
1188 tcg_gen_mov_tl(d, s);
1191 static inline void t_gen_zext(TCGv d, TCGv s, int size)
1193 if (size == 1)
1194 tcg_gen_ext8u_i32(d, s);
1195 else if (size == 2)
1196 tcg_gen_ext16u_i32(d, s);
1197 else if (!TCGV_EQUAL(d, s))
1198 tcg_gen_mov_tl(d, s);
1201 #if DISAS_CRIS
1202 static char memsize_char(int size)
1204 switch (size)
1206 case 1: return 'b'; break;
1207 case 2: return 'w'; break;
1208 case 4: return 'd'; break;
1209 default:
1210 return 'x';
1211 break;
1214 #endif
1216 static inline unsigned int memsize_z(DisasContext *dc)
1218 return dc->zsize + 1;
1221 static inline unsigned int memsize_zz(DisasContext *dc)
1223 switch (dc->zzsize)
1225 case 0: return 1;
1226 case 1: return 2;
1227 default:
1228 return 4;
1232 static inline void do_postinc (DisasContext *dc, int size)
1234 if (dc->postinc)
1235 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size);
1238 static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd,
1239 int size, int s_ext, TCGv dst)
1241 if (s_ext)
1242 t_gen_sext(dst, cpu_R[rs], size);
1243 else
1244 t_gen_zext(dst, cpu_R[rs], size);
1247 /* Prepare T0 and T1 for a register alu operation.
1248 s_ext decides if the operand1 should be sign-extended or zero-extended when
1249 needed. */
1250 static void dec_prep_alu_r(DisasContext *dc, int rs, int rd,
1251 int size, int s_ext, TCGv dst, TCGv src)
1253 dec_prep_move_r(dc, rs, rd, size, s_ext, src);
1255 if (s_ext)
1256 t_gen_sext(dst, cpu_R[rd], size);
1257 else
1258 t_gen_zext(dst, cpu_R[rd], size);
1261 static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
1262 TCGv dst)
1264 unsigned int rs, rd;
1265 uint32_t imm;
1266 int is_imm;
1267 int insn_len = 2;
1269 rs = dc->op1;
1270 rd = dc->op2;
1271 is_imm = rs == 15 && dc->postinc;
1273 /* Load [$rs] onto T1. */
1274 if (is_imm) {
1275 insn_len = 2 + memsize;
1276 if (memsize == 1)
1277 insn_len++;
1279 if (memsize != 4) {
1280 if (s_ext) {
1281 if (memsize == 1)
1282 imm = ldsb_code(dc->pc + 2);
1283 else
1284 imm = ldsw_code(dc->pc + 2);
1285 } else {
1286 if (memsize == 1)
1287 imm = ldub_code(dc->pc + 2);
1288 else
1289 imm = lduw_code(dc->pc + 2);
1291 } else
1292 imm = ldl_code(dc->pc + 2);
1294 tcg_gen_movi_tl(dst, imm);
1295 dc->postinc = 0;
1296 } else {
1297 cris_flush_cc_state(dc);
1298 gen_load(dc, dst, cpu_R[rs], memsize, 0);
1299 if (s_ext)
1300 t_gen_sext(dst, dst, memsize);
1301 else
1302 t_gen_zext(dst, dst, memsize);
1304 return insn_len;
1307 /* Prepare T0 and T1 for a memory + alu operation.
1308 s_ext decides if the operand1 should be sign-extended or zero-extended when
1309 needed. */
1310 static int dec_prep_alu_m(DisasContext *dc, int s_ext, int memsize,
1311 TCGv dst, TCGv src)
1313 int insn_len;
1315 insn_len = dec_prep_move_m(dc, s_ext, memsize, src);
1316 tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
1317 return insn_len;
1320 #if DISAS_CRIS
1321 static const char *cc_name(int cc)
1323 static const char *cc_names[16] = {
1324 "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
1325 "ls", "hi", "ge", "lt", "gt", "le", "a", "p"
1327 assert(cc < 16);
1328 return cc_names[cc];
1330 #endif
1332 /* Start of insn decoders. */
1334 static unsigned int dec_bccq(DisasContext *dc)
1336 int32_t offset;
1337 int sign;
1338 uint32_t cond = dc->op2;
1339 int tmp;
1341 offset = EXTRACT_FIELD (dc->ir, 1, 7);
1342 sign = EXTRACT_FIELD(dc->ir, 0, 0);
1344 offset *= 2;
1345 offset |= sign << 8;
1346 tmp = offset;
1347 offset = sign_extend(offset, 8);
1349 LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset);
1351 /* op2 holds the condition-code. */
1352 cris_cc_mask(dc, 0);
1353 cris_prepare_cc_branch (dc, offset, cond);
1354 return 2;
1356 static unsigned int dec_addoq(DisasContext *dc)
1358 int32_t imm;
1360 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7);
1361 imm = sign_extend(dc->op1, 7);
1363 LOG_DIS("addoq %d, $r%u\n", imm, dc->op2);
1364 cris_cc_mask(dc, 0);
1365 /* Fetch register operand, */
1366 tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm);
1368 return 2;
1370 static unsigned int dec_addq(DisasContext *dc)
1372 LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
1374 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1376 cris_cc_mask(dc, CC_MASK_NZVC);
1378 cris_alu(dc, CC_OP_ADD,
1379 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1380 return 2;
1382 static unsigned int dec_moveq(DisasContext *dc)
1384 uint32_t imm;
1386 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1387 imm = sign_extend(dc->op1, 5);
1388 LOG_DIS("moveq %d, $r%u\n", imm, dc->op2);
1390 tcg_gen_mov_tl(cpu_R[dc->op2], tcg_const_tl(imm));
1391 return 2;
1393 static unsigned int dec_subq(DisasContext *dc)
1395 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1397 LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
1399 cris_cc_mask(dc, CC_MASK_NZVC);
1400 cris_alu(dc, CC_OP_SUB,
1401 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1402 return 2;
1404 static unsigned int dec_cmpq(DisasContext *dc)
1406 uint32_t imm;
1407 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1408 imm = sign_extend(dc->op1, 5);
1410 LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
1411 cris_cc_mask(dc, CC_MASK_NZVC);
1413 cris_alu(dc, CC_OP_CMP,
1414 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1415 return 2;
1417 static unsigned int dec_andq(DisasContext *dc)
1419 uint32_t imm;
1420 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1421 imm = sign_extend(dc->op1, 5);
1423 LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
1424 cris_cc_mask(dc, CC_MASK_NZ);
1426 cris_alu(dc, CC_OP_AND,
1427 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1428 return 2;
1430 static unsigned int dec_orq(DisasContext *dc)
1432 uint32_t imm;
1433 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1434 imm = sign_extend(dc->op1, 5);
1435 LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
1436 cris_cc_mask(dc, CC_MASK_NZ);
1438 cris_alu(dc, CC_OP_OR,
1439 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1440 return 2;
1442 static unsigned int dec_btstq(DisasContext *dc)
1444 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1445 LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
1447 cris_cc_mask(dc, CC_MASK_NZ);
1448 cris_evaluate_flags(dc);
1449 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1450 tcg_const_tl(dc->op1), cpu_PR[PR_CCS]);
1451 cris_alu(dc, CC_OP_MOVE,
1452 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1453 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1454 dc->flags_uptodate = 1;
1455 return 2;
1457 static unsigned int dec_asrq(DisasContext *dc)
1459 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1460 LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
1461 cris_cc_mask(dc, CC_MASK_NZ);
1463 tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1464 cris_alu(dc, CC_OP_MOVE,
1465 cpu_R[dc->op2],
1466 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1467 return 2;
1469 static unsigned int dec_lslq(DisasContext *dc)
1471 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1472 LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
1474 cris_cc_mask(dc, CC_MASK_NZ);
1476 tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1478 cris_alu(dc, CC_OP_MOVE,
1479 cpu_R[dc->op2],
1480 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1481 return 2;
1483 static unsigned int dec_lsrq(DisasContext *dc)
1485 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1486 LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
1488 cris_cc_mask(dc, CC_MASK_NZ);
1490 tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1491 cris_alu(dc, CC_OP_MOVE,
1492 cpu_R[dc->op2],
1493 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1494 return 2;
1497 static unsigned int dec_move_r(DisasContext *dc)
1499 int size = memsize_zz(dc);
1501 LOG_DIS("move.%c $r%u, $r%u\n",
1502 memsize_char(size), dc->op1, dc->op2);
1504 cris_cc_mask(dc, CC_MASK_NZ);
1505 if (size == 4) {
1506 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]);
1507 cris_cc_mask(dc, CC_MASK_NZ);
1508 cris_update_cc_op(dc, CC_OP_MOVE, 4);
1509 cris_update_cc_x(dc);
1510 cris_update_result(dc, cpu_R[dc->op2]);
1512 else {
1513 TCGv t0;
1515 t0 = tcg_temp_new();
1516 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1517 cris_alu(dc, CC_OP_MOVE,
1518 cpu_R[dc->op2],
1519 cpu_R[dc->op2], t0, size);
1520 tcg_temp_free(t0);
1522 return 2;
1525 static unsigned int dec_scc_r(DisasContext *dc)
1527 int cond = dc->op2;
1529 LOG_DIS("s%s $r%u\n",
1530 cc_name(cond), dc->op1);
1532 if (cond != CC_A)
1534 int l1;
1536 gen_tst_cc (dc, cpu_R[dc->op1], cond);
1537 l1 = gen_new_label();
1538 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[dc->op1], 0, l1);
1539 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1540 gen_set_label(l1);
1542 else
1543 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1545 cris_cc_mask(dc, 0);
1546 return 2;
1549 static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t)
1551 if (size == 4) {
1552 t[0] = cpu_R[dc->op2];
1553 t[1] = cpu_R[dc->op1];
1554 } else {
1555 t[0] = tcg_temp_new();
1556 t[1] = tcg_temp_new();
1560 static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t)
1562 if (size != 4) {
1563 tcg_temp_free(t[0]);
1564 tcg_temp_free(t[1]);
1568 static unsigned int dec_and_r(DisasContext *dc)
1570 TCGv t[2];
1571 int size = memsize_zz(dc);
1573 LOG_DIS("and.%c $r%u, $r%u\n",
1574 memsize_char(size), dc->op1, dc->op2);
1576 cris_cc_mask(dc, CC_MASK_NZ);
1578 cris_alu_alloc_temps(dc, size, t);
1579 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1580 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size);
1581 cris_alu_free_temps(dc, size, t);
1582 return 2;
1585 static unsigned int dec_lz_r(DisasContext *dc)
1587 TCGv t0;
1588 LOG_DIS("lz $r%u, $r%u\n",
1589 dc->op1, dc->op2);
1590 cris_cc_mask(dc, CC_MASK_NZ);
1591 t0 = tcg_temp_new();
1592 dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0);
1593 cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1594 tcg_temp_free(t0);
1595 return 2;
1598 static unsigned int dec_lsl_r(DisasContext *dc)
1600 TCGv t[2];
1601 int size = memsize_zz(dc);
1603 LOG_DIS("lsl.%c $r%u, $r%u\n",
1604 memsize_char(size), dc->op1, dc->op2);
1606 cris_cc_mask(dc, CC_MASK_NZ);
1607 cris_alu_alloc_temps(dc, size, t);
1608 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1609 tcg_gen_andi_tl(t[1], t[1], 63);
1610 cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size);
1611 cris_alu_alloc_temps(dc, size, t);
1612 return 2;
1615 static unsigned int dec_lsr_r(DisasContext *dc)
1617 TCGv t[2];
1618 int size = memsize_zz(dc);
1620 LOG_DIS("lsr.%c $r%u, $r%u\n",
1621 memsize_char(size), dc->op1, dc->op2);
1623 cris_cc_mask(dc, CC_MASK_NZ);
1624 cris_alu_alloc_temps(dc, size, t);
1625 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1626 tcg_gen_andi_tl(t[1], t[1], 63);
1627 cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size);
1628 cris_alu_free_temps(dc, size, t);
1629 return 2;
1632 static unsigned int dec_asr_r(DisasContext *dc)
1634 TCGv t[2];
1635 int size = memsize_zz(dc);
1637 LOG_DIS("asr.%c $r%u, $r%u\n",
1638 memsize_char(size), dc->op1, dc->op2);
1640 cris_cc_mask(dc, CC_MASK_NZ);
1641 cris_alu_alloc_temps(dc, size, t);
1642 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1643 tcg_gen_andi_tl(t[1], t[1], 63);
1644 cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size);
1645 cris_alu_free_temps(dc, size, t);
1646 return 2;
1649 static unsigned int dec_muls_r(DisasContext *dc)
1651 TCGv t[2];
1652 int size = memsize_zz(dc);
1654 LOG_DIS("muls.%c $r%u, $r%u\n",
1655 memsize_char(size), dc->op1, dc->op2);
1656 cris_cc_mask(dc, CC_MASK_NZV);
1657 cris_alu_alloc_temps(dc, size, t);
1658 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1660 cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4);
1661 cris_alu_free_temps(dc, size, t);
1662 return 2;
1665 static unsigned int dec_mulu_r(DisasContext *dc)
1667 TCGv t[2];
1668 int size = memsize_zz(dc);
1670 LOG_DIS("mulu.%c $r%u, $r%u\n",
1671 memsize_char(size), dc->op1, dc->op2);
1672 cris_cc_mask(dc, CC_MASK_NZV);
1673 cris_alu_alloc_temps(dc, size, t);
1674 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1676 cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4);
1677 cris_alu_alloc_temps(dc, size, t);
1678 return 2;
1682 static unsigned int dec_dstep_r(DisasContext *dc)
1684 LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
1685 cris_cc_mask(dc, CC_MASK_NZ);
1686 cris_alu(dc, CC_OP_DSTEP,
1687 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1688 return 2;
1691 static unsigned int dec_xor_r(DisasContext *dc)
1693 TCGv t[2];
1694 int size = memsize_zz(dc);
1695 LOG_DIS("xor.%c $r%u, $r%u\n",
1696 memsize_char(size), dc->op1, dc->op2);
1697 BUG_ON(size != 4); /* xor is dword. */
1698 cris_cc_mask(dc, CC_MASK_NZ);
1699 cris_alu_alloc_temps(dc, size, t);
1700 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1702 cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4);
1703 cris_alu_free_temps(dc, size, t);
1704 return 2;
1707 static unsigned int dec_bound_r(DisasContext *dc)
1709 TCGv l0;
1710 int size = memsize_zz(dc);
1711 LOG_DIS("bound.%c $r%u, $r%u\n",
1712 memsize_char(size), dc->op1, dc->op2);
1713 cris_cc_mask(dc, CC_MASK_NZ);
1714 l0 = tcg_temp_local_new();
1715 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
1716 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
1717 tcg_temp_free(l0);
1718 return 2;
1721 static unsigned int dec_cmp_r(DisasContext *dc)
1723 TCGv t[2];
1724 int size = memsize_zz(dc);
1725 LOG_DIS("cmp.%c $r%u, $r%u\n",
1726 memsize_char(size), dc->op1, dc->op2);
1727 cris_cc_mask(dc, CC_MASK_NZVC);
1728 cris_alu_alloc_temps(dc, size, t);
1729 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1731 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size);
1732 cris_alu_free_temps(dc, size, t);
1733 return 2;
1736 static unsigned int dec_abs_r(DisasContext *dc)
1738 TCGv t0;
1740 LOG_DIS("abs $r%u, $r%u\n",
1741 dc->op1, dc->op2);
1742 cris_cc_mask(dc, CC_MASK_NZ);
1744 t0 = tcg_temp_new();
1745 tcg_gen_sari_tl(t0, cpu_R[dc->op1], 31);
1746 tcg_gen_xor_tl(cpu_R[dc->op2], cpu_R[dc->op1], t0);
1747 tcg_gen_sub_tl(cpu_R[dc->op2], cpu_R[dc->op2], t0);
1748 tcg_temp_free(t0);
1750 cris_alu(dc, CC_OP_MOVE,
1751 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1752 return 2;
1755 static unsigned int dec_add_r(DisasContext *dc)
1757 TCGv t[2];
1758 int size = memsize_zz(dc);
1759 LOG_DIS("add.%c $r%u, $r%u\n",
1760 memsize_char(size), dc->op1, dc->op2);
1761 cris_cc_mask(dc, CC_MASK_NZVC);
1762 cris_alu_alloc_temps(dc, size, t);
1763 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1765 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size);
1766 cris_alu_free_temps(dc, size, t);
1767 return 2;
1770 static unsigned int dec_addc_r(DisasContext *dc)
1772 LOG_DIS("addc $r%u, $r%u\n",
1773 dc->op1, dc->op2);
1774 cris_evaluate_flags(dc);
1775 /* Set for this insn. */
1776 dc->flagx_known = 1;
1777 dc->flags_x = X_FLAG;
1779 cris_cc_mask(dc, CC_MASK_NZVC);
1780 cris_alu(dc, CC_OP_ADDC,
1781 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1782 return 2;
1785 static unsigned int dec_mcp_r(DisasContext *dc)
1787 LOG_DIS("mcp $p%u, $r%u\n",
1788 dc->op2, dc->op1);
1789 cris_evaluate_flags(dc);
1790 cris_cc_mask(dc, CC_MASK_RNZV);
1791 cris_alu(dc, CC_OP_MCP,
1792 cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4);
1793 return 2;
1796 #if DISAS_CRIS
1797 static char * swapmode_name(int mode, char *modename) {
1798 int i = 0;
1799 if (mode & 8)
1800 modename[i++] = 'n';
1801 if (mode & 4)
1802 modename[i++] = 'w';
1803 if (mode & 2)
1804 modename[i++] = 'b';
1805 if (mode & 1)
1806 modename[i++] = 'r';
1807 modename[i++] = 0;
1808 return modename;
1810 #endif
1812 static unsigned int dec_swap_r(DisasContext *dc)
1814 TCGv t0;
1815 #if DISAS_CRIS
1816 char modename[4];
1817 #endif
1818 LOG_DIS("swap%s $r%u\n",
1819 swapmode_name(dc->op2, modename), dc->op1);
1821 cris_cc_mask(dc, CC_MASK_NZ);
1822 t0 = tcg_temp_new();
1823 t_gen_mov_TN_reg(t0, dc->op1);
1824 if (dc->op2 & 8)
1825 tcg_gen_not_tl(t0, t0);
1826 if (dc->op2 & 4)
1827 t_gen_swapw(t0, t0);
1828 if (dc->op2 & 2)
1829 t_gen_swapb(t0, t0);
1830 if (dc->op2 & 1)
1831 t_gen_swapr(t0, t0);
1832 cris_alu(dc, CC_OP_MOVE,
1833 cpu_R[dc->op1], cpu_R[dc->op1], t0, 4);
1834 tcg_temp_free(t0);
1835 return 2;
1838 static unsigned int dec_or_r(DisasContext *dc)
1840 TCGv t[2];
1841 int size = memsize_zz(dc);
1842 LOG_DIS("or.%c $r%u, $r%u\n",
1843 memsize_char(size), dc->op1, dc->op2);
1844 cris_cc_mask(dc, CC_MASK_NZ);
1845 cris_alu_alloc_temps(dc, size, t);
1846 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1847 cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size);
1848 cris_alu_free_temps(dc, size, t);
1849 return 2;
1852 static unsigned int dec_addi_r(DisasContext *dc)
1854 TCGv t0;
1855 LOG_DIS("addi.%c $r%u, $r%u\n",
1856 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1857 cris_cc_mask(dc, 0);
1858 t0 = tcg_temp_new();
1859 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1860 tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0);
1861 tcg_temp_free(t0);
1862 return 2;
1865 static unsigned int dec_addi_acr(DisasContext *dc)
1867 TCGv t0;
1868 LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
1869 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1870 cris_cc_mask(dc, 0);
1871 t0 = tcg_temp_new();
1872 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1873 tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0);
1874 tcg_temp_free(t0);
1875 return 2;
1878 static unsigned int dec_neg_r(DisasContext *dc)
1880 TCGv t[2];
1881 int size = memsize_zz(dc);
1882 LOG_DIS("neg.%c $r%u, $r%u\n",
1883 memsize_char(size), dc->op1, dc->op2);
1884 cris_cc_mask(dc, CC_MASK_NZVC);
1885 cris_alu_alloc_temps(dc, size, t);
1886 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1888 cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size);
1889 cris_alu_free_temps(dc, size, t);
1890 return 2;
1893 static unsigned int dec_btst_r(DisasContext *dc)
1895 LOG_DIS("btst $r%u, $r%u\n",
1896 dc->op1, dc->op2);
1897 cris_cc_mask(dc, CC_MASK_NZ);
1898 cris_evaluate_flags(dc);
1899 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1900 cpu_R[dc->op1], cpu_PR[PR_CCS]);
1901 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
1902 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1903 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1904 dc->flags_uptodate = 1;
1905 return 2;
1908 static unsigned int dec_sub_r(DisasContext *dc)
1910 TCGv t[2];
1911 int size = memsize_zz(dc);
1912 LOG_DIS("sub.%c $r%u, $r%u\n",
1913 memsize_char(size), dc->op1, dc->op2);
1914 cris_cc_mask(dc, CC_MASK_NZVC);
1915 cris_alu_alloc_temps(dc, size, t);
1916 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1917 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size);
1918 cris_alu_free_temps(dc, size, t);
1919 return 2;
1922 /* Zero extension. From size to dword. */
1923 static unsigned int dec_movu_r(DisasContext *dc)
1925 TCGv t0;
1926 int size = memsize_z(dc);
1927 LOG_DIS("movu.%c $r%u, $r%u\n",
1928 memsize_char(size),
1929 dc->op1, dc->op2);
1931 cris_cc_mask(dc, CC_MASK_NZ);
1932 t0 = tcg_temp_new();
1933 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1934 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1935 tcg_temp_free(t0);
1936 return 2;
1939 /* Sign extension. From size to dword. */
1940 static unsigned int dec_movs_r(DisasContext *dc)
1942 TCGv t0;
1943 int size = memsize_z(dc);
1944 LOG_DIS("movs.%c $r%u, $r%u\n",
1945 memsize_char(size),
1946 dc->op1, dc->op2);
1948 cris_cc_mask(dc, CC_MASK_NZ);
1949 t0 = tcg_temp_new();
1950 /* Size can only be qi or hi. */
1951 t_gen_sext(t0, cpu_R[dc->op1], size);
1952 cris_alu(dc, CC_OP_MOVE,
1953 cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
1954 tcg_temp_free(t0);
1955 return 2;
1958 /* zero extension. From size to dword. */
1959 static unsigned int dec_addu_r(DisasContext *dc)
1961 TCGv t0;
1962 int size = memsize_z(dc);
1963 LOG_DIS("addu.%c $r%u, $r%u\n",
1964 memsize_char(size),
1965 dc->op1, dc->op2);
1967 cris_cc_mask(dc, CC_MASK_NZVC);
1968 t0 = tcg_temp_new();
1969 /* Size can only be qi or hi. */
1970 t_gen_zext(t0, cpu_R[dc->op1], size);
1971 cris_alu(dc, CC_OP_ADD,
1972 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1973 tcg_temp_free(t0);
1974 return 2;
1977 /* Sign extension. From size to dword. */
1978 static unsigned int dec_adds_r(DisasContext *dc)
1980 TCGv t0;
1981 int size = memsize_z(dc);
1982 LOG_DIS("adds.%c $r%u, $r%u\n",
1983 memsize_char(size),
1984 dc->op1, dc->op2);
1986 cris_cc_mask(dc, CC_MASK_NZVC);
1987 t0 = tcg_temp_new();
1988 /* Size can only be qi or hi. */
1989 t_gen_sext(t0, cpu_R[dc->op1], size);
1990 cris_alu(dc, CC_OP_ADD,
1991 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1992 tcg_temp_free(t0);
1993 return 2;
1996 /* Zero extension. From size to dword. */
1997 static unsigned int dec_subu_r(DisasContext *dc)
1999 TCGv t0;
2000 int size = memsize_z(dc);
2001 LOG_DIS("subu.%c $r%u, $r%u\n",
2002 memsize_char(size),
2003 dc->op1, dc->op2);
2005 cris_cc_mask(dc, CC_MASK_NZVC);
2006 t0 = tcg_temp_new();
2007 /* Size can only be qi or hi. */
2008 t_gen_zext(t0, cpu_R[dc->op1], size);
2009 cris_alu(dc, CC_OP_SUB,
2010 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2011 tcg_temp_free(t0);
2012 return 2;
2015 /* Sign extension. From size to dword. */
2016 static unsigned int dec_subs_r(DisasContext *dc)
2018 TCGv t0;
2019 int size = memsize_z(dc);
2020 LOG_DIS("subs.%c $r%u, $r%u\n",
2021 memsize_char(size),
2022 dc->op1, dc->op2);
2024 cris_cc_mask(dc, CC_MASK_NZVC);
2025 t0 = tcg_temp_new();
2026 /* Size can only be qi or hi. */
2027 t_gen_sext(t0, cpu_R[dc->op1], size);
2028 cris_alu(dc, CC_OP_SUB,
2029 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2030 tcg_temp_free(t0);
2031 return 2;
2034 static unsigned int dec_setclrf(DisasContext *dc)
2036 uint32_t flags;
2037 int set = (~dc->opcode >> 2) & 1;
2040 flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
2041 | EXTRACT_FIELD(dc->ir, 0, 3);
2042 if (set && flags == 0) {
2043 LOG_DIS("nop\n");
2044 return 2;
2045 } else if (!set && (flags & 0x20)) {
2046 LOG_DIS("di\n");
2048 else {
2049 LOG_DIS("%sf %x\n",
2050 set ? "set" : "clr",
2051 flags);
2054 /* User space is not allowed to touch these. Silently ignore. */
2055 if (dc->tb_flags & U_FLAG) {
2056 flags &= ~(S_FLAG | I_FLAG | U_FLAG);
2059 if (flags & X_FLAG) {
2060 dc->flagx_known = 1;
2061 if (set)
2062 dc->flags_x = X_FLAG;
2063 else
2064 dc->flags_x = 0;
2067 /* Break the TB if the P flag changes. */
2068 if (flags & P_FLAG) {
2069 if ((set && !(dc->tb_flags & P_FLAG))
2070 || (!set && (dc->tb_flags & P_FLAG))) {
2071 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2072 dc->is_jmp = DISAS_UPDATE;
2073 dc->cpustate_changed = 1;
2076 if (flags & S_FLAG) {
2077 dc->cpustate_changed = 1;
2081 /* Simply decode the flags. */
2082 cris_evaluate_flags (dc);
2083 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2084 cris_update_cc_x(dc);
2085 tcg_gen_movi_tl(cc_op, dc->cc_op);
2087 if (set) {
2088 if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
2089 /* Enter user mode. */
2090 t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
2091 tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
2092 dc->cpustate_changed = 1;
2094 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
2096 else
2097 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags);
2099 dc->flags_uptodate = 1;
2100 dc->clear_x = 0;
2101 return 2;
2104 static unsigned int dec_move_rs(DisasContext *dc)
2106 LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
2107 cris_cc_mask(dc, 0);
2108 gen_helper_movl_sreg_reg(tcg_const_tl(dc->op2), tcg_const_tl(dc->op1));
2109 return 2;
2111 static unsigned int dec_move_sr(DisasContext *dc)
2113 LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
2114 cris_cc_mask(dc, 0);
2115 gen_helper_movl_reg_sreg(tcg_const_tl(dc->op1), tcg_const_tl(dc->op2));
2116 return 2;
2119 static unsigned int dec_move_rp(DisasContext *dc)
2121 TCGv t[2];
2122 LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
2123 cris_cc_mask(dc, 0);
2125 t[0] = tcg_temp_new();
2126 if (dc->op2 == PR_CCS) {
2127 cris_evaluate_flags(dc);
2128 t_gen_mov_TN_reg(t[0], dc->op1);
2129 if (dc->tb_flags & U_FLAG) {
2130 t[1] = tcg_temp_new();
2131 /* User space is not allowed to touch all flags. */
2132 tcg_gen_andi_tl(t[0], t[0], 0x39f);
2133 tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f);
2134 tcg_gen_or_tl(t[0], t[1], t[0]);
2135 tcg_temp_free(t[1]);
2138 else
2139 t_gen_mov_TN_reg(t[0], dc->op1);
2141 t_gen_mov_preg_TN(dc, dc->op2, t[0]);
2142 if (dc->op2 == PR_CCS) {
2143 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2144 dc->flags_uptodate = 1;
2146 tcg_temp_free(t[0]);
2147 return 2;
2149 static unsigned int dec_move_pr(DisasContext *dc)
2151 TCGv t0;
2152 LOG_DIS("move $p%u, $r%u\n", dc->op1, dc->op2);
2153 cris_cc_mask(dc, 0);
2155 if (dc->op2 == PR_CCS)
2156 cris_evaluate_flags(dc);
2158 t0 = tcg_temp_new();
2159 t_gen_mov_TN_preg(t0, dc->op2);
2160 cris_alu(dc, CC_OP_MOVE,
2161 cpu_R[dc->op1], cpu_R[dc->op1], t0, preg_sizes[dc->op2]);
2162 tcg_temp_free(t0);
2163 return 2;
2166 static unsigned int dec_move_mr(DisasContext *dc)
2168 int memsize = memsize_zz(dc);
2169 int insn_len;
2170 LOG_DIS("move.%c [$r%u%s, $r%u\n",
2171 memsize_char(memsize),
2172 dc->op1, dc->postinc ? "+]" : "]",
2173 dc->op2);
2175 if (memsize == 4) {
2176 insn_len = dec_prep_move_m(dc, 0, 4, cpu_R[dc->op2]);
2177 cris_cc_mask(dc, CC_MASK_NZ);
2178 cris_update_cc_op(dc, CC_OP_MOVE, 4);
2179 cris_update_cc_x(dc);
2180 cris_update_result(dc, cpu_R[dc->op2]);
2182 else {
2183 TCGv t0;
2185 t0 = tcg_temp_new();
2186 insn_len = dec_prep_move_m(dc, 0, memsize, t0);
2187 cris_cc_mask(dc, CC_MASK_NZ);
2188 cris_alu(dc, CC_OP_MOVE,
2189 cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
2190 tcg_temp_free(t0);
2192 do_postinc(dc, memsize);
2193 return insn_len;
2196 static inline void cris_alu_m_alloc_temps(TCGv *t)
2198 t[0] = tcg_temp_new();
2199 t[1] = tcg_temp_new();
2202 static inline void cris_alu_m_free_temps(TCGv *t)
2204 tcg_temp_free(t[0]);
2205 tcg_temp_free(t[1]);
2208 static unsigned int dec_movs_m(DisasContext *dc)
2210 TCGv t[2];
2211 int memsize = memsize_z(dc);
2212 int insn_len;
2213 LOG_DIS("movs.%c [$r%u%s, $r%u\n",
2214 memsize_char(memsize),
2215 dc->op1, dc->postinc ? "+]" : "]",
2216 dc->op2);
2218 cris_alu_m_alloc_temps(t);
2219 /* sign extend. */
2220 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2221 cris_cc_mask(dc, CC_MASK_NZ);
2222 cris_alu(dc, CC_OP_MOVE,
2223 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2224 do_postinc(dc, memsize);
2225 cris_alu_m_free_temps(t);
2226 return insn_len;
2229 static unsigned int dec_addu_m(DisasContext *dc)
2231 TCGv t[2];
2232 int memsize = memsize_z(dc);
2233 int insn_len;
2234 LOG_DIS("addu.%c [$r%u%s, $r%u\n",
2235 memsize_char(memsize),
2236 dc->op1, dc->postinc ? "+]" : "]",
2237 dc->op2);
2239 cris_alu_m_alloc_temps(t);
2240 /* sign extend. */
2241 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2242 cris_cc_mask(dc, CC_MASK_NZVC);
2243 cris_alu(dc, CC_OP_ADD,
2244 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2245 do_postinc(dc, memsize);
2246 cris_alu_m_free_temps(t);
2247 return insn_len;
2250 static unsigned int dec_adds_m(DisasContext *dc)
2252 TCGv t[2];
2253 int memsize = memsize_z(dc);
2254 int insn_len;
2255 LOG_DIS("adds.%c [$r%u%s, $r%u\n",
2256 memsize_char(memsize),
2257 dc->op1, dc->postinc ? "+]" : "]",
2258 dc->op2);
2260 cris_alu_m_alloc_temps(t);
2261 /* sign extend. */
2262 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2263 cris_cc_mask(dc, CC_MASK_NZVC);
2264 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2265 do_postinc(dc, memsize);
2266 cris_alu_m_free_temps(t);
2267 return insn_len;
2270 static unsigned int dec_subu_m(DisasContext *dc)
2272 TCGv t[2];
2273 int memsize = memsize_z(dc);
2274 int insn_len;
2275 LOG_DIS("subu.%c [$r%u%s, $r%u\n",
2276 memsize_char(memsize),
2277 dc->op1, dc->postinc ? "+]" : "]",
2278 dc->op2);
2280 cris_alu_m_alloc_temps(t);
2281 /* sign extend. */
2282 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2283 cris_cc_mask(dc, CC_MASK_NZVC);
2284 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2285 do_postinc(dc, memsize);
2286 cris_alu_m_free_temps(t);
2287 return insn_len;
2290 static unsigned int dec_subs_m(DisasContext *dc)
2292 TCGv t[2];
2293 int memsize = memsize_z(dc);
2294 int insn_len;
2295 LOG_DIS("subs.%c [$r%u%s, $r%u\n",
2296 memsize_char(memsize),
2297 dc->op1, dc->postinc ? "+]" : "]",
2298 dc->op2);
2300 cris_alu_m_alloc_temps(t);
2301 /* sign extend. */
2302 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2303 cris_cc_mask(dc, CC_MASK_NZVC);
2304 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2305 do_postinc(dc, memsize);
2306 cris_alu_m_free_temps(t);
2307 return insn_len;
2310 static unsigned int dec_movu_m(DisasContext *dc)
2312 TCGv t[2];
2313 int memsize = memsize_z(dc);
2314 int insn_len;
2316 LOG_DIS("movu.%c [$r%u%s, $r%u\n",
2317 memsize_char(memsize),
2318 dc->op1, dc->postinc ? "+]" : "]",
2319 dc->op2);
2321 cris_alu_m_alloc_temps(t);
2322 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2323 cris_cc_mask(dc, CC_MASK_NZ);
2324 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2325 do_postinc(dc, memsize);
2326 cris_alu_m_free_temps(t);
2327 return insn_len;
2330 static unsigned int dec_cmpu_m(DisasContext *dc)
2332 TCGv t[2];
2333 int memsize = memsize_z(dc);
2334 int insn_len;
2335 LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
2336 memsize_char(memsize),
2337 dc->op1, dc->postinc ? "+]" : "]",
2338 dc->op2);
2340 cris_alu_m_alloc_temps(t);
2341 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2342 cris_cc_mask(dc, CC_MASK_NZVC);
2343 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2344 do_postinc(dc, memsize);
2345 cris_alu_m_free_temps(t);
2346 return insn_len;
2349 static unsigned int dec_cmps_m(DisasContext *dc)
2351 TCGv t[2];
2352 int memsize = memsize_z(dc);
2353 int insn_len;
2354 LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
2355 memsize_char(memsize),
2356 dc->op1, dc->postinc ? "+]" : "]",
2357 dc->op2);
2359 cris_alu_m_alloc_temps(t);
2360 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2361 cris_cc_mask(dc, CC_MASK_NZVC);
2362 cris_alu(dc, CC_OP_CMP,
2363 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2364 memsize_zz(dc));
2365 do_postinc(dc, memsize);
2366 cris_alu_m_free_temps(t);
2367 return insn_len;
2370 static unsigned int dec_cmp_m(DisasContext *dc)
2372 TCGv t[2];
2373 int memsize = memsize_zz(dc);
2374 int insn_len;
2375 LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
2376 memsize_char(memsize),
2377 dc->op1, dc->postinc ? "+]" : "]",
2378 dc->op2);
2380 cris_alu_m_alloc_temps(t);
2381 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2382 cris_cc_mask(dc, CC_MASK_NZVC);
2383 cris_alu(dc, CC_OP_CMP,
2384 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2385 memsize_zz(dc));
2386 do_postinc(dc, memsize);
2387 cris_alu_m_free_temps(t);
2388 return insn_len;
2391 static unsigned int dec_test_m(DisasContext *dc)
2393 TCGv t[2];
2394 int memsize = memsize_zz(dc);
2395 int insn_len;
2396 LOG_DIS("test.%d [$r%u%s] op2=%x\n",
2397 memsize_char(memsize),
2398 dc->op1, dc->postinc ? "+]" : "]",
2399 dc->op2);
2401 cris_evaluate_flags(dc);
2403 cris_alu_m_alloc_temps(t);
2404 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2405 cris_cc_mask(dc, CC_MASK_NZ);
2406 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
2408 cris_alu(dc, CC_OP_CMP,
2409 cpu_R[dc->op2], t[1], tcg_const_tl(0), memsize_zz(dc));
2410 do_postinc(dc, memsize);
2411 cris_alu_m_free_temps(t);
2412 return insn_len;
2415 static unsigned int dec_and_m(DisasContext *dc)
2417 TCGv t[2];
2418 int memsize = memsize_zz(dc);
2419 int insn_len;
2420 LOG_DIS("and.%d [$r%u%s, $r%u\n",
2421 memsize_char(memsize),
2422 dc->op1, dc->postinc ? "+]" : "]",
2423 dc->op2);
2425 cris_alu_m_alloc_temps(t);
2426 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2427 cris_cc_mask(dc, CC_MASK_NZ);
2428 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2429 do_postinc(dc, memsize);
2430 cris_alu_m_free_temps(t);
2431 return insn_len;
2434 static unsigned int dec_add_m(DisasContext *dc)
2436 TCGv t[2];
2437 int memsize = memsize_zz(dc);
2438 int insn_len;
2439 LOG_DIS("add.%d [$r%u%s, $r%u\n",
2440 memsize_char(memsize),
2441 dc->op1, dc->postinc ? "+]" : "]",
2442 dc->op2);
2444 cris_alu_m_alloc_temps(t);
2445 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2446 cris_cc_mask(dc, CC_MASK_NZVC);
2447 cris_alu(dc, CC_OP_ADD,
2448 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2449 do_postinc(dc, memsize);
2450 cris_alu_m_free_temps(t);
2451 return insn_len;
2454 static unsigned int dec_addo_m(DisasContext *dc)
2456 TCGv t[2];
2457 int memsize = memsize_zz(dc);
2458 int insn_len;
2459 LOG_DIS("add.%d [$r%u%s, $r%u\n",
2460 memsize_char(memsize),
2461 dc->op1, dc->postinc ? "+]" : "]",
2462 dc->op2);
2464 cris_alu_m_alloc_temps(t);
2465 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2466 cris_cc_mask(dc, 0);
2467 cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
2468 do_postinc(dc, memsize);
2469 cris_alu_m_free_temps(t);
2470 return insn_len;
2473 static unsigned int dec_bound_m(DisasContext *dc)
2475 TCGv l[2];
2476 int memsize = memsize_zz(dc);
2477 int insn_len;
2478 LOG_DIS("bound.%d [$r%u%s, $r%u\n",
2479 memsize_char(memsize),
2480 dc->op1, dc->postinc ? "+]" : "]",
2481 dc->op2);
2483 l[0] = tcg_temp_local_new();
2484 l[1] = tcg_temp_local_new();
2485 insn_len = dec_prep_alu_m(dc, 0, memsize, l[0], l[1]);
2486 cris_cc_mask(dc, CC_MASK_NZ);
2487 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
2488 do_postinc(dc, memsize);
2489 tcg_temp_free(l[0]);
2490 tcg_temp_free(l[1]);
2491 return insn_len;
2494 static unsigned int dec_addc_mr(DisasContext *dc)
2496 TCGv t[2];
2497 int insn_len = 2;
2498 LOG_DIS("addc [$r%u%s, $r%u\n",
2499 dc->op1, dc->postinc ? "+]" : "]",
2500 dc->op2);
2502 cris_evaluate_flags(dc);
2504 /* Set for this insn. */
2505 dc->flagx_known = 1;
2506 dc->flags_x = X_FLAG;
2508 cris_alu_m_alloc_temps(t);
2509 insn_len = dec_prep_alu_m(dc, 0, 4, t[0], t[1]);
2510 cris_cc_mask(dc, CC_MASK_NZVC);
2511 cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
2512 do_postinc(dc, 4);
2513 cris_alu_m_free_temps(t);
2514 return insn_len;
2517 static unsigned int dec_sub_m(DisasContext *dc)
2519 TCGv t[2];
2520 int memsize = memsize_zz(dc);
2521 int insn_len;
2522 LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
2523 memsize_char(memsize),
2524 dc->op1, dc->postinc ? "+]" : "]",
2525 dc->op2, dc->ir, dc->zzsize);
2527 cris_alu_m_alloc_temps(t);
2528 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2529 cris_cc_mask(dc, CC_MASK_NZVC);
2530 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
2531 do_postinc(dc, memsize);
2532 cris_alu_m_free_temps(t);
2533 return insn_len;
2536 static unsigned int dec_or_m(DisasContext *dc)
2538 TCGv t[2];
2539 int memsize = memsize_zz(dc);
2540 int insn_len;
2541 LOG_DIS("or.%d [$r%u%s, $r%u pc=%x\n",
2542 memsize_char(memsize),
2543 dc->op1, dc->postinc ? "+]" : "]",
2544 dc->op2, dc->pc);
2546 cris_alu_m_alloc_temps(t);
2547 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2548 cris_cc_mask(dc, CC_MASK_NZ);
2549 cris_alu(dc, CC_OP_OR,
2550 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2551 do_postinc(dc, memsize);
2552 cris_alu_m_free_temps(t);
2553 return insn_len;
2556 static unsigned int dec_move_mp(DisasContext *dc)
2558 TCGv t[2];
2559 int memsize = memsize_zz(dc);
2560 int insn_len = 2;
2562 LOG_DIS("move.%c [$r%u%s, $p%u\n",
2563 memsize_char(memsize),
2564 dc->op1,
2565 dc->postinc ? "+]" : "]",
2566 dc->op2);
2568 cris_alu_m_alloc_temps(t);
2569 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2570 cris_cc_mask(dc, 0);
2571 if (dc->op2 == PR_CCS) {
2572 cris_evaluate_flags(dc);
2573 if (dc->tb_flags & U_FLAG) {
2574 /* User space is not allowed to touch all flags. */
2575 tcg_gen_andi_tl(t[1], t[1], 0x39f);
2576 tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f);
2577 tcg_gen_or_tl(t[1], t[0], t[1]);
2581 t_gen_mov_preg_TN(dc, dc->op2, t[1]);
2583 do_postinc(dc, memsize);
2584 cris_alu_m_free_temps(t);
2585 return insn_len;
2588 static unsigned int dec_move_pm(DisasContext *dc)
2590 TCGv t0;
2591 int memsize;
2593 memsize = preg_sizes[dc->op2];
2595 LOG_DIS("move.%c $p%u, [$r%u%s\n",
2596 memsize_char(memsize),
2597 dc->op2, dc->op1, dc->postinc ? "+]" : "]");
2599 /* prepare store. Address in T0, value in T1. */
2600 if (dc->op2 == PR_CCS)
2601 cris_evaluate_flags(dc);
2602 t0 = tcg_temp_new();
2603 t_gen_mov_TN_preg(t0, dc->op2);
2604 cris_flush_cc_state(dc);
2605 gen_store(dc, cpu_R[dc->op1], t0, memsize);
2606 tcg_temp_free(t0);
2608 cris_cc_mask(dc, 0);
2609 if (dc->postinc)
2610 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2611 return 2;
2614 static unsigned int dec_movem_mr(DisasContext *dc)
2616 TCGv_i64 tmp[16];
2617 TCGv tmp32;
2618 TCGv addr;
2619 int i;
2620 int nr = dc->op2 + 1;
2622 LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1,
2623 dc->postinc ? "+]" : "]", dc->op2);
2625 addr = tcg_temp_new();
2626 /* There are probably better ways of doing this. */
2627 cris_flush_cc_state(dc);
2628 for (i = 0; i < (nr >> 1); i++) {
2629 tmp[i] = tcg_temp_new_i64();
2630 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2631 gen_load64(dc, tmp[i], addr);
2633 if (nr & 1) {
2634 tmp32 = tcg_temp_new_i32();
2635 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2636 gen_load(dc, tmp32, addr, 4, 0);
2637 } else
2638 TCGV_UNUSED(tmp32);
2639 tcg_temp_free(addr);
2641 for (i = 0; i < (nr >> 1); i++) {
2642 tcg_gen_trunc_i64_i32(cpu_R[i * 2], tmp[i]);
2643 tcg_gen_shri_i64(tmp[i], tmp[i], 32);
2644 tcg_gen_trunc_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
2645 tcg_temp_free_i64(tmp[i]);
2647 if (nr & 1) {
2648 tcg_gen_mov_tl(cpu_R[dc->op2], tmp32);
2649 tcg_temp_free(tmp32);
2652 /* writeback the updated pointer value. */
2653 if (dc->postinc)
2654 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4);
2656 /* gen_load might want to evaluate the previous insns flags. */
2657 cris_cc_mask(dc, 0);
2658 return 2;
2661 static unsigned int dec_movem_rm(DisasContext *dc)
2663 TCGv tmp;
2664 TCGv addr;
2665 int i;
2667 LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1,
2668 dc->postinc ? "+]" : "]");
2670 cris_flush_cc_state(dc);
2672 tmp = tcg_temp_new();
2673 addr = tcg_temp_new();
2674 tcg_gen_movi_tl(tmp, 4);
2675 tcg_gen_mov_tl(addr, cpu_R[dc->op1]);
2676 for (i = 0; i <= dc->op2; i++) {
2677 /* Displace addr. */
2678 /* Perform the store. */
2679 gen_store(dc, addr, cpu_R[i], 4);
2680 tcg_gen_add_tl(addr, addr, tmp);
2682 if (dc->postinc)
2683 tcg_gen_mov_tl(cpu_R[dc->op1], addr);
2684 cris_cc_mask(dc, 0);
2685 tcg_temp_free(tmp);
2686 tcg_temp_free(addr);
2687 return 2;
2690 static unsigned int dec_move_rm(DisasContext *dc)
2692 int memsize;
2694 memsize = memsize_zz(dc);
2696 LOG_DIS("move.%d $r%u, [$r%u]\n",
2697 memsize, dc->op2, dc->op1);
2699 /* prepare store. */
2700 cris_flush_cc_state(dc);
2701 gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize);
2703 if (dc->postinc)
2704 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2705 cris_cc_mask(dc, 0);
2706 return 2;
2709 static unsigned int dec_lapcq(DisasContext *dc)
2711 LOG_DIS("lapcq %x, $r%u\n",
2712 dc->pc + dc->op1*2, dc->op2);
2713 cris_cc_mask(dc, 0);
2714 tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2);
2715 return 2;
2718 static unsigned int dec_lapc_im(DisasContext *dc)
2720 unsigned int rd;
2721 int32_t imm;
2722 int32_t pc;
2724 rd = dc->op2;
2726 cris_cc_mask(dc, 0);
2727 imm = ldl_code(dc->pc + 2);
2728 LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
2730 pc = dc->pc;
2731 pc += imm;
2732 t_gen_mov_reg_TN(rd, tcg_const_tl(pc));
2733 return 6;
2736 /* Jump to special reg. */
2737 static unsigned int dec_jump_p(DisasContext *dc)
2739 LOG_DIS("jump $p%u\n", dc->op2);
2741 if (dc->op2 == PR_CCS)
2742 cris_evaluate_flags(dc);
2743 t_gen_mov_TN_preg(env_btarget, dc->op2);
2744 /* rete will often have low bit set to indicate delayslot. */
2745 tcg_gen_andi_tl(env_btarget, env_btarget, ~1);
2746 cris_cc_mask(dc, 0);
2747 cris_prepare_jmp(dc, JMP_INDIRECT);
2748 return 2;
2751 /* Jump and save. */
2752 static unsigned int dec_jas_r(DisasContext *dc)
2754 LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
2755 cris_cc_mask(dc, 0);
2756 /* Store the return address in Pd. */
2757 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2758 if (dc->op2 > 15)
2759 abort();
2760 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4));
2762 cris_prepare_jmp(dc, JMP_INDIRECT);
2763 return 2;
2766 static unsigned int dec_jas_im(DisasContext *dc)
2768 uint32_t imm;
2770 imm = ldl_code(dc->pc + 2);
2772 LOG_DIS("jas 0x%x\n", imm);
2773 cris_cc_mask(dc, 0);
2774 /* Store the return address in Pd. */
2775 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2777 dc->jmp_pc = imm;
2778 cris_prepare_jmp(dc, JMP_DIRECT);
2779 return 6;
2782 static unsigned int dec_jasc_im(DisasContext *dc)
2784 uint32_t imm;
2786 imm = ldl_code(dc->pc + 2);
2788 LOG_DIS("jasc 0x%x\n", imm);
2789 cris_cc_mask(dc, 0);
2790 /* Store the return address in Pd. */
2791 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8 + 4));
2793 dc->jmp_pc = imm;
2794 cris_prepare_jmp(dc, JMP_DIRECT);
2795 return 6;
2798 static unsigned int dec_jasc_r(DisasContext *dc)
2800 LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
2801 cris_cc_mask(dc, 0);
2802 /* Store the return address in Pd. */
2803 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2804 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4 + 4));
2805 cris_prepare_jmp(dc, JMP_INDIRECT);
2806 return 2;
2809 static unsigned int dec_bcc_im(DisasContext *dc)
2811 int32_t offset;
2812 uint32_t cond = dc->op2;
2814 offset = ldsw_code(dc->pc + 2);
2816 LOG_DIS("b%s %d pc=%x dst=%x\n",
2817 cc_name(cond), offset,
2818 dc->pc, dc->pc + offset);
2820 cris_cc_mask(dc, 0);
2821 /* op2 holds the condition-code. */
2822 cris_prepare_cc_branch (dc, offset, cond);
2823 return 4;
2826 static unsigned int dec_bas_im(DisasContext *dc)
2828 int32_t simm;
2831 simm = ldl_code(dc->pc + 2);
2833 LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2834 cris_cc_mask(dc, 0);
2835 /* Store the return address in Pd. */
2836 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2838 dc->jmp_pc = dc->pc + simm;
2839 cris_prepare_jmp(dc, JMP_DIRECT);
2840 return 6;
2843 static unsigned int dec_basc_im(DisasContext *dc)
2845 int32_t simm;
2846 simm = ldl_code(dc->pc + 2);
2848 LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2849 cris_cc_mask(dc, 0);
2850 /* Store the return address in Pd. */
2851 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 12));
2853 dc->jmp_pc = dc->pc + simm;
2854 cris_prepare_jmp(dc, JMP_DIRECT);
2855 return 6;
2858 static unsigned int dec_rfe_etc(DisasContext *dc)
2860 cris_cc_mask(dc, 0);
2862 if (dc->op2 == 15) {
2863 t_gen_mov_env_TN(halted, tcg_const_tl(1));
2864 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2865 t_gen_raise_exception(EXCP_HLT);
2866 return 2;
2869 switch (dc->op2 & 7) {
2870 case 2:
2871 /* rfe. */
2872 LOG_DIS("rfe\n");
2873 cris_evaluate_flags(dc);
2874 gen_helper_rfe();
2875 dc->is_jmp = DISAS_UPDATE;
2876 break;
2877 case 5:
2878 /* rfn. */
2879 LOG_DIS("rfn\n");
2880 cris_evaluate_flags(dc);
2881 gen_helper_rfn();
2882 dc->is_jmp = DISAS_UPDATE;
2883 break;
2884 case 6:
2885 LOG_DIS("break %d\n", dc->op1);
2886 cris_evaluate_flags (dc);
2887 /* break. */
2888 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2890 /* Breaks start at 16 in the exception vector. */
2891 t_gen_mov_env_TN(trap_vector,
2892 tcg_const_tl(dc->op1 + 16));
2893 t_gen_raise_exception(EXCP_BREAK);
2894 dc->is_jmp = DISAS_UPDATE;
2895 break;
2896 default:
2897 printf ("op2=%x\n", dc->op2);
2898 BUG();
2899 break;
2902 return 2;
2905 static unsigned int dec_ftag_fidx_d_m(DisasContext *dc)
2907 return 2;
2910 static unsigned int dec_ftag_fidx_i_m(DisasContext *dc)
2912 return 2;
2915 static unsigned int dec_null(DisasContext *dc)
2917 printf ("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
2918 dc->pc, dc->opcode, dc->op1, dc->op2);
2919 fflush(NULL);
2920 BUG();
2921 return 2;
2924 static struct decoder_info {
2925 struct {
2926 uint32_t bits;
2927 uint32_t mask;
2929 unsigned int (*dec)(DisasContext *dc);
2930 } decinfo[] = {
2931 /* Order matters here. */
2932 {DEC_MOVEQ, dec_moveq},
2933 {DEC_BTSTQ, dec_btstq},
2934 {DEC_CMPQ, dec_cmpq},
2935 {DEC_ADDOQ, dec_addoq},
2936 {DEC_ADDQ, dec_addq},
2937 {DEC_SUBQ, dec_subq},
2938 {DEC_ANDQ, dec_andq},
2939 {DEC_ORQ, dec_orq},
2940 {DEC_ASRQ, dec_asrq},
2941 {DEC_LSLQ, dec_lslq},
2942 {DEC_LSRQ, dec_lsrq},
2943 {DEC_BCCQ, dec_bccq},
2945 {DEC_BCC_IM, dec_bcc_im},
2946 {DEC_JAS_IM, dec_jas_im},
2947 {DEC_JAS_R, dec_jas_r},
2948 {DEC_JASC_IM, dec_jasc_im},
2949 {DEC_JASC_R, dec_jasc_r},
2950 {DEC_BAS_IM, dec_bas_im},
2951 {DEC_BASC_IM, dec_basc_im},
2952 {DEC_JUMP_P, dec_jump_p},
2953 {DEC_LAPC_IM, dec_lapc_im},
2954 {DEC_LAPCQ, dec_lapcq},
2956 {DEC_RFE_ETC, dec_rfe_etc},
2957 {DEC_ADDC_MR, dec_addc_mr},
2959 {DEC_MOVE_MP, dec_move_mp},
2960 {DEC_MOVE_PM, dec_move_pm},
2961 {DEC_MOVEM_MR, dec_movem_mr},
2962 {DEC_MOVEM_RM, dec_movem_rm},
2963 {DEC_MOVE_PR, dec_move_pr},
2964 {DEC_SCC_R, dec_scc_r},
2965 {DEC_SETF, dec_setclrf},
2966 {DEC_CLEARF, dec_setclrf},
2968 {DEC_MOVE_SR, dec_move_sr},
2969 {DEC_MOVE_RP, dec_move_rp},
2970 {DEC_SWAP_R, dec_swap_r},
2971 {DEC_ABS_R, dec_abs_r},
2972 {DEC_LZ_R, dec_lz_r},
2973 {DEC_MOVE_RS, dec_move_rs},
2974 {DEC_BTST_R, dec_btst_r},
2975 {DEC_ADDC_R, dec_addc_r},
2977 {DEC_DSTEP_R, dec_dstep_r},
2978 {DEC_XOR_R, dec_xor_r},
2979 {DEC_MCP_R, dec_mcp_r},
2980 {DEC_CMP_R, dec_cmp_r},
2982 {DEC_ADDI_R, dec_addi_r},
2983 {DEC_ADDI_ACR, dec_addi_acr},
2985 {DEC_ADD_R, dec_add_r},
2986 {DEC_SUB_R, dec_sub_r},
2988 {DEC_ADDU_R, dec_addu_r},
2989 {DEC_ADDS_R, dec_adds_r},
2990 {DEC_SUBU_R, dec_subu_r},
2991 {DEC_SUBS_R, dec_subs_r},
2992 {DEC_LSL_R, dec_lsl_r},
2994 {DEC_AND_R, dec_and_r},
2995 {DEC_OR_R, dec_or_r},
2996 {DEC_BOUND_R, dec_bound_r},
2997 {DEC_ASR_R, dec_asr_r},
2998 {DEC_LSR_R, dec_lsr_r},
3000 {DEC_MOVU_R, dec_movu_r},
3001 {DEC_MOVS_R, dec_movs_r},
3002 {DEC_NEG_R, dec_neg_r},
3003 {DEC_MOVE_R, dec_move_r},
3005 {DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m},
3006 {DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m},
3008 {DEC_MULS_R, dec_muls_r},
3009 {DEC_MULU_R, dec_mulu_r},
3011 {DEC_ADDU_M, dec_addu_m},
3012 {DEC_ADDS_M, dec_adds_m},
3013 {DEC_SUBU_M, dec_subu_m},
3014 {DEC_SUBS_M, dec_subs_m},
3016 {DEC_CMPU_M, dec_cmpu_m},
3017 {DEC_CMPS_M, dec_cmps_m},
3018 {DEC_MOVU_M, dec_movu_m},
3019 {DEC_MOVS_M, dec_movs_m},
3021 {DEC_CMP_M, dec_cmp_m},
3022 {DEC_ADDO_M, dec_addo_m},
3023 {DEC_BOUND_M, dec_bound_m},
3024 {DEC_ADD_M, dec_add_m},
3025 {DEC_SUB_M, dec_sub_m},
3026 {DEC_AND_M, dec_and_m},
3027 {DEC_OR_M, dec_or_m},
3028 {DEC_MOVE_RM, dec_move_rm},
3029 {DEC_TEST_M, dec_test_m},
3030 {DEC_MOVE_MR, dec_move_mr},
3032 {{0, 0}, dec_null}
3035 static inline unsigned int
3036 cris_decoder(DisasContext *dc)
3038 unsigned int insn_len = 2;
3039 int i;
3041 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
3042 tcg_gen_debug_insn_start(dc->pc);
3044 /* Load a halfword onto the instruction register. */
3045 dc->ir = lduw_code(dc->pc);
3047 /* Now decode it. */
3048 dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
3049 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3);
3050 dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15);
3051 dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4);
3052 dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5);
3053 dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
3055 /* Large switch for all insns. */
3056 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
3057 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits)
3059 insn_len = decinfo[i].dec(dc);
3060 break;
3064 #if !defined(CONFIG_USER_ONLY)
3065 /* Single-stepping ? */
3066 if (dc->tb_flags & S_FLAG) {
3067 int l1;
3069 l1 = gen_new_label();
3070 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1);
3071 /* We treat SPC as a break with an odd trap vector. */
3072 cris_evaluate_flags (dc);
3073 t_gen_mov_env_TN(trap_vector, tcg_const_tl(3));
3074 tcg_gen_movi_tl(env_pc, dc->pc + insn_len);
3075 tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len);
3076 t_gen_raise_exception(EXCP_BREAK);
3077 gen_set_label(l1);
3079 #endif
3080 return insn_len;
3083 static void check_breakpoint(CPUState *env, DisasContext *dc)
3085 CPUBreakpoint *bp;
3087 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
3088 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
3089 if (bp->pc == dc->pc) {
3090 cris_evaluate_flags (dc);
3091 tcg_gen_movi_tl(env_pc, dc->pc);
3092 t_gen_raise_exception(EXCP_DEBUG);
3093 dc->is_jmp = DISAS_UPDATE;
3101 * Delay slots on QEMU/CRIS.
3103 * If an exception hits on a delayslot, the core will let ERP (the Exception
3104 * Return Pointer) point to the branch (the previous) insn and set the lsb to
3105 * to give SW a hint that the exception actually hit on the dslot.
3107 * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
3108 * the core and any jmp to an odd addresses will mask off that lsb. It is
3109 * simply there to let sw know there was an exception on a dslot.
3111 * When the software returns from an exception, the branch will re-execute.
3112 * On QEMU care needs to be taken when a branch+delayslot sequence is broken
3113 * and the branch and delayslot dont share pages.
3115 * The TB contaning the branch insn will set up env->btarget and evaluate
3116 * env->btaken. When the translation loop exits we will note that the branch
3117 * sequence is broken and let env->dslot be the size of the branch insn (those
3118 * vary in length).
3120 * The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
3121 * set). It will also expect to have env->dslot setup with the size of the
3122 * delay slot so that env->pc - env->dslot point to the branch insn. This TB
3123 * will execute the dslot and take the branch, either to btarget or just one
3124 * insn ahead.
3126 * When exceptions occur, we check for env->dslot in do_interrupt to detect
3127 * broken branch sequences and setup $erp accordingly (i.e let it point to the
3128 * branch and set lsb). Then env->dslot gets cleared so that the exception
3129 * handler can enter. When returning from exceptions (jump $erp) the lsb gets
3130 * masked off and we will reexecute the branch insn.
3134 /* generate intermediate code for basic block 'tb'. */
3135 static void
3136 gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
3137 int search_pc)
3139 uint16_t *gen_opc_end;
3140 uint32_t pc_start;
3141 unsigned int insn_len;
3142 int j, lj;
3143 struct DisasContext ctx;
3144 struct DisasContext *dc = &ctx;
3145 uint32_t next_page_start;
3146 target_ulong npc;
3147 int num_insns;
3148 int max_insns;
3150 qemu_log_try_set_file(stderr);
3152 /* Odd PC indicates that branch is rexecuting due to exception in the
3153 * delayslot, like in real hw.
3155 pc_start = tb->pc & ~1;
3156 dc->env = env;
3157 dc->tb = tb;
3159 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3161 dc->is_jmp = DISAS_NEXT;
3162 dc->ppc = pc_start;
3163 dc->pc = pc_start;
3164 dc->singlestep_enabled = env->singlestep_enabled;
3165 dc->flags_uptodate = 1;
3166 dc->flagx_known = 1;
3167 dc->flags_x = tb->flags & X_FLAG;
3168 dc->cc_x_uptodate = 0;
3169 dc->cc_mask = 0;
3170 dc->update_cc = 0;
3172 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
3173 dc->cc_size_uptodate = -1;
3175 /* Decode TB flags. */
3176 dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
3177 dc->delayed_branch = !!(tb->flags & 7);
3178 if (dc->delayed_branch)
3179 dc->jmp = JMP_INDIRECT;
3180 else
3181 dc->jmp = JMP_NOJMP;
3183 dc->cpustate_changed = 0;
3185 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3186 qemu_log(
3187 "srch=%d pc=%x %x flg=%llx bt=%x ds=%u ccs=%x\n"
3188 "pid=%x usp=%x\n"
3189 "%x.%x.%x.%x\n"
3190 "%x.%x.%x.%x\n"
3191 "%x.%x.%x.%x\n"
3192 "%x.%x.%x.%x\n",
3193 search_pc, dc->pc, dc->ppc,
3194 (unsigned long long)tb->flags,
3195 env->btarget, (unsigned)tb->flags & 7,
3196 env->pregs[PR_CCS],
3197 env->pregs[PR_PID], env->pregs[PR_USP],
3198 env->regs[0], env->regs[1], env->regs[2], env->regs[3],
3199 env->regs[4], env->regs[5], env->regs[6], env->regs[7],
3200 env->regs[8], env->regs[9],
3201 env->regs[10], env->regs[11],
3202 env->regs[12], env->regs[13],
3203 env->regs[14], env->regs[15]);
3204 qemu_log("--------------\n");
3205 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3208 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3209 lj = -1;
3210 num_insns = 0;
3211 max_insns = tb->cflags & CF_COUNT_MASK;
3212 if (max_insns == 0)
3213 max_insns = CF_COUNT_MASK;
3215 gen_icount_start();
3218 check_breakpoint(env, dc);
3220 if (search_pc) {
3221 j = gen_opc_ptr - gen_opc_buf;
3222 if (lj < j) {
3223 lj++;
3224 while (lj < j)
3225 gen_opc_instr_start[lj++] = 0;
3227 if (dc->delayed_branch == 1)
3228 gen_opc_pc[lj] = dc->ppc | 1;
3229 else
3230 gen_opc_pc[lj] = dc->pc;
3231 gen_opc_instr_start[lj] = 1;
3232 gen_opc_icount[lj] = num_insns;
3235 /* Pretty disas. */
3236 LOG_DIS("%8.8x:\t", dc->pc);
3238 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3239 gen_io_start();
3240 dc->clear_x = 1;
3242 insn_len = cris_decoder(dc);
3243 dc->ppc = dc->pc;
3244 dc->pc += insn_len;
3245 if (dc->clear_x)
3246 cris_clear_x_flag(dc);
3248 num_insns++;
3249 /* Check for delayed branches here. If we do it before
3250 actually generating any host code, the simulator will just
3251 loop doing nothing for on this program location. */
3252 if (dc->delayed_branch) {
3253 dc->delayed_branch--;
3254 if (dc->delayed_branch == 0)
3256 if (tb->flags & 7)
3257 t_gen_mov_env_TN(dslot,
3258 tcg_const_tl(0));
3259 if (dc->jmp == JMP_DIRECT) {
3260 dc->is_jmp = DISAS_NEXT;
3261 } else {
3262 t_gen_cc_jmp(env_btarget,
3263 tcg_const_tl(dc->pc));
3264 dc->is_jmp = DISAS_JUMP;
3266 break;
3270 /* If we are rexecuting a branch due to exceptions on
3271 delay slots dont break. */
3272 if (!(tb->pc & 1) && env->singlestep_enabled)
3273 break;
3274 } while (!dc->is_jmp && !dc->cpustate_changed
3275 && gen_opc_ptr < gen_opc_end
3276 && (dc->pc < next_page_start)
3277 && num_insns < max_insns);
3279 npc = dc->pc;
3280 if (dc->jmp == JMP_DIRECT && !dc->delayed_branch)
3281 npc = dc->jmp_pc;
3283 if (tb->cflags & CF_LAST_IO)
3284 gen_io_end();
3285 /* Force an update if the per-tb cpu state has changed. */
3286 if (dc->is_jmp == DISAS_NEXT
3287 && (dc->cpustate_changed || !dc->flagx_known
3288 || (dc->flags_x != (tb->flags & X_FLAG)))) {
3289 dc->is_jmp = DISAS_UPDATE;
3290 tcg_gen_movi_tl(env_pc, npc);
3292 /* Broken branch+delayslot sequence. */
3293 if (dc->delayed_branch == 1) {
3294 /* Set env->dslot to the size of the branch insn. */
3295 t_gen_mov_env_TN(dslot, tcg_const_tl(dc->pc - dc->ppc));
3296 cris_store_direct_jmp(dc);
3299 cris_evaluate_flags (dc);
3301 if (unlikely(env->singlestep_enabled)) {
3302 if (dc->is_jmp == DISAS_NEXT)
3303 tcg_gen_movi_tl(env_pc, npc);
3304 t_gen_raise_exception(EXCP_DEBUG);
3305 } else {
3306 switch(dc->is_jmp) {
3307 case DISAS_NEXT:
3308 gen_goto_tb(dc, 1, npc);
3309 break;
3310 default:
3311 case DISAS_JUMP:
3312 case DISAS_UPDATE:
3313 /* indicate that the hash table must be used
3314 to find the next TB */
3315 tcg_gen_exit_tb(0);
3316 break;
3317 case DISAS_SWI:
3318 case DISAS_TB_JUMP:
3319 /* nothing more to generate */
3320 break;
3323 gen_icount_end(tb, num_insns);
3324 *gen_opc_ptr = INDEX_op_end;
3325 if (search_pc) {
3326 j = gen_opc_ptr - gen_opc_buf;
3327 lj++;
3328 while (lj <= j)
3329 gen_opc_instr_start[lj++] = 0;
3330 } else {
3331 tb->size = dc->pc - pc_start;
3332 tb->icount = num_insns;
3335 #ifdef DEBUG_DISAS
3336 #if !DISAS_CRIS
3337 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3338 log_target_disas(pc_start, dc->pc - pc_start, 0);
3339 qemu_log("\nisize=%d osize=%zd\n",
3340 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
3342 #endif
3343 #endif
3346 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3348 gen_intermediate_code_internal(env, tb, 0);
3351 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3353 gen_intermediate_code_internal(env, tb, 1);
3356 void cpu_dump_state (CPUState *env, FILE *f,
3357 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
3358 int flags)
3360 int i;
3361 uint32_t srs;
3363 if (!env || !f)
3364 return;
3366 cpu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n"
3367 "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
3368 env->pc, env->pregs[PR_CCS], env->btaken, env->btarget,
3369 env->cc_op,
3370 env->cc_src, env->cc_dest, env->cc_result, env->cc_mask);
3373 for (i = 0; i < 16; i++) {
3374 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
3375 if ((i + 1) % 4 == 0)
3376 cpu_fprintf(f, "\n");
3378 cpu_fprintf(f, "\nspecial regs:\n");
3379 for (i = 0; i < 16; i++) {
3380 cpu_fprintf(f, "p%2.2d=%8.8x ", i, env->pregs[i]);
3381 if ((i + 1) % 4 == 0)
3382 cpu_fprintf(f, "\n");
3384 srs = env->pregs[PR_SRS];
3385 cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs);
3386 if (srs < 256) {
3387 for (i = 0; i < 16; i++) {
3388 cpu_fprintf(f, "s%2.2d=%8.8x ",
3389 i, env->sregs[srs][i]);
3390 if ((i + 1) % 4 == 0)
3391 cpu_fprintf(f, "\n");
3394 cpu_fprintf(f, "\n\n");
3398 CPUCRISState *cpu_cris_init (const char *cpu_model)
3400 CPUCRISState *env;
3401 static int tcg_initialized = 0;
3402 int i;
3404 env = qemu_mallocz(sizeof(CPUCRISState));
3406 cpu_exec_init(env);
3407 cpu_reset(env);
3409 if (tcg_initialized)
3410 return env;
3412 tcg_initialized = 1;
3414 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
3415 cc_x = tcg_global_mem_new(TCG_AREG0,
3416 offsetof(CPUState, cc_x), "cc_x");
3417 cc_src = tcg_global_mem_new(TCG_AREG0,
3418 offsetof(CPUState, cc_src), "cc_src");
3419 cc_dest = tcg_global_mem_new(TCG_AREG0,
3420 offsetof(CPUState, cc_dest),
3421 "cc_dest");
3422 cc_result = tcg_global_mem_new(TCG_AREG0,
3423 offsetof(CPUState, cc_result),
3424 "cc_result");
3425 cc_op = tcg_global_mem_new(TCG_AREG0,
3426 offsetof(CPUState, cc_op), "cc_op");
3427 cc_size = tcg_global_mem_new(TCG_AREG0,
3428 offsetof(CPUState, cc_size),
3429 "cc_size");
3430 cc_mask = tcg_global_mem_new(TCG_AREG0,
3431 offsetof(CPUState, cc_mask),
3432 "cc_mask");
3434 env_pc = tcg_global_mem_new(TCG_AREG0,
3435 offsetof(CPUState, pc),
3436 "pc");
3437 env_btarget = tcg_global_mem_new(TCG_AREG0,
3438 offsetof(CPUState, btarget),
3439 "btarget");
3440 env_btaken = tcg_global_mem_new(TCG_AREG0,
3441 offsetof(CPUState, btaken),
3442 "btaken");
3443 for (i = 0; i < 16; i++) {
3444 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
3445 offsetof(CPUState, regs[i]),
3446 regnames[i]);
3448 for (i = 0; i < 16; i++) {
3449 cpu_PR[i] = tcg_global_mem_new(TCG_AREG0,
3450 offsetof(CPUState, pregs[i]),
3451 pregnames[i]);
3454 #define GEN_HELPER 2
3455 #include "helper.h"
3457 return env;
3460 void cpu_reset (CPUCRISState *env)
3462 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
3463 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
3464 log_cpu_state(env, 0);
3467 memset(env, 0, offsetof(CPUCRISState, breakpoints));
3468 tlb_flush(env, 1);
3470 env->pregs[PR_VR] = 32;
3471 #if defined(CONFIG_USER_ONLY)
3472 /* start in user mode with interrupts enabled. */
3473 env->pregs[PR_CCS] |= U_FLAG | I_FLAG;
3474 #else
3475 cris_mmu_init(env);
3476 env->pregs[PR_CCS] = 0;
3477 #endif
3480 void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
3481 unsigned long searched_pc, int pc_pos, void *puc)
3483 env->pc = gen_opc_pc[pc_pos];