ehci: iovec support, remove buffer
[qemu.git] / target-alpha / translate.c
blobc61906a8b38160a898230ee23a04fa790be139c2
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
24 #include "cpu.h"
25 #include "disas.h"
26 #include "host-utils.h"
27 #include "tcg-op.h"
28 #include "qemu-common.h"
30 #include "helper.h"
31 #define GEN_HELPER 1
32 #include "helper.h"
34 #undef ALPHA_DEBUG_DISAS
35 #define CONFIG_SOFTFLOAT_INLINE
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DISAS(...) do { } while (0)
41 #endif
43 typedef struct DisasContext DisasContext;
44 struct DisasContext {
45 struct TranslationBlock *tb;
46 CPUAlphaState *env;
47 uint64_t pc;
48 int mem_idx;
50 /* Current rounding mode for this TB. */
51 int tb_rm;
52 /* Current flush-to-zero setting for this TB. */
53 int tb_ftz;
56 /* Return values from translate_one, indicating the state of the TB.
57 Note that zero indicates that we are not exiting the TB. */
59 typedef enum {
60 NO_EXIT,
62 /* We have emitted one or more goto_tb. No fixup required. */
63 EXIT_GOTO_TB,
65 /* We are not using a goto_tb (for whatever reason), but have updated
66 the PC (for whatever reason), so there's no need to do it again on
67 exiting the TB. */
68 EXIT_PC_UPDATED,
70 /* We are exiting the TB, but have neither emitted a goto_tb, nor
71 updated the PC for the next instruction to be executed. */
72 EXIT_PC_STALE,
74 /* We are ending the TB with a noreturn function call, e.g. longjmp.
75 No following code will be executed. */
76 EXIT_NORETURN,
77 } ExitStatus;
79 /* global register indexes */
80 static TCGv_ptr cpu_env;
81 static TCGv cpu_ir[31];
82 static TCGv cpu_fir[31];
83 static TCGv cpu_pc;
84 static TCGv cpu_lock_addr;
85 static TCGv cpu_lock_st_addr;
86 static TCGv cpu_lock_value;
87 static TCGv cpu_unique;
88 #ifndef CONFIG_USER_ONLY
89 static TCGv cpu_sysval;
90 static TCGv cpu_usp;
91 #endif
93 /* register names */
94 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
96 #include "gen-icount.h"
98 static void alpha_translate_init(void)
100 int i;
101 char *p;
102 static int done_init = 0;
104 if (done_init)
105 return;
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
109 p = cpu_reg_names;
110 for (i = 0; i < 31; i++) {
111 sprintf(p, "ir%d", i);
112 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
113 offsetof(CPUState, ir[i]), p);
114 p += (i < 10) ? 4 : 5;
116 sprintf(p, "fir%d", i);
117 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
118 offsetof(CPUState, fir[i]), p);
119 p += (i < 10) ? 5 : 6;
122 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
123 offsetof(CPUState, pc), "pc");
125 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
126 offsetof(CPUState, lock_addr),
127 "lock_addr");
128 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
129 offsetof(CPUState, lock_st_addr),
130 "lock_st_addr");
131 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
132 offsetof(CPUState, lock_value),
133 "lock_value");
135 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
136 offsetof(CPUState, unique), "unique");
137 #ifndef CONFIG_USER_ONLY
138 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
139 offsetof(CPUState, sysval), "sysval");
140 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
141 offsetof(CPUState, usp), "usp");
142 #endif
144 /* register helpers */
145 #define GEN_HELPER 2
146 #include "helper.h"
148 done_init = 1;
151 static void gen_excp_1(int exception, int error_code)
153 TCGv_i32 tmp1, tmp2;
155 tmp1 = tcg_const_i32(exception);
156 tmp2 = tcg_const_i32(error_code);
157 gen_helper_excp(tmp1, tmp2);
158 tcg_temp_free_i32(tmp2);
159 tcg_temp_free_i32(tmp1);
162 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
164 tcg_gen_movi_i64(cpu_pc, ctx->pc);
165 gen_excp_1(exception, error_code);
166 return EXIT_NORETURN;
169 static inline ExitStatus gen_invalid(DisasContext *ctx)
171 return gen_excp(ctx, EXCP_OPCDEC, 0);
174 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
176 TCGv tmp = tcg_temp_new();
177 TCGv_i32 tmp32 = tcg_temp_new_i32();
178 tcg_gen_qemu_ld32u(tmp, t1, flags);
179 tcg_gen_trunc_i64_i32(tmp32, tmp);
180 gen_helper_memory_to_f(t0, tmp32);
181 tcg_temp_free_i32(tmp32);
182 tcg_temp_free(tmp);
185 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
187 TCGv tmp = tcg_temp_new();
188 tcg_gen_qemu_ld64(tmp, t1, flags);
189 gen_helper_memory_to_g(t0, tmp);
190 tcg_temp_free(tmp);
193 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
195 TCGv tmp = tcg_temp_new();
196 TCGv_i32 tmp32 = tcg_temp_new_i32();
197 tcg_gen_qemu_ld32u(tmp, t1, flags);
198 tcg_gen_trunc_i64_i32(tmp32, tmp);
199 gen_helper_memory_to_s(t0, tmp32);
200 tcg_temp_free_i32(tmp32);
201 tcg_temp_free(tmp);
204 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
206 tcg_gen_qemu_ld32s(t0, t1, flags);
207 tcg_gen_mov_i64(cpu_lock_addr, t1);
208 tcg_gen_mov_i64(cpu_lock_value, t0);
211 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
213 tcg_gen_qemu_ld64(t0, t1, flags);
214 tcg_gen_mov_i64(cpu_lock_addr, t1);
215 tcg_gen_mov_i64(cpu_lock_value, t0);
218 static inline void gen_load_mem(DisasContext *ctx,
219 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
220 int flags),
221 int ra, int rb, int32_t disp16, int fp,
222 int clear)
224 TCGv addr, va;
226 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
227 prefetches, which we can treat as nops. No worries about
228 missed exceptions here. */
229 if (unlikely(ra == 31)) {
230 return;
233 addr = tcg_temp_new();
234 if (rb != 31) {
235 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
236 if (clear) {
237 tcg_gen_andi_i64(addr, addr, ~0x7);
239 } else {
240 if (clear) {
241 disp16 &= ~0x7;
243 tcg_gen_movi_i64(addr, disp16);
246 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
247 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
249 tcg_temp_free(addr);
252 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
254 TCGv_i32 tmp32 = tcg_temp_new_i32();
255 TCGv tmp = tcg_temp_new();
256 gen_helper_f_to_memory(tmp32, t0);
257 tcg_gen_extu_i32_i64(tmp, tmp32);
258 tcg_gen_qemu_st32(tmp, t1, flags);
259 tcg_temp_free(tmp);
260 tcg_temp_free_i32(tmp32);
263 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
265 TCGv tmp = tcg_temp_new();
266 gen_helper_g_to_memory(tmp, t0);
267 tcg_gen_qemu_st64(tmp, t1, flags);
268 tcg_temp_free(tmp);
271 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
273 TCGv_i32 tmp32 = tcg_temp_new_i32();
274 TCGv tmp = tcg_temp_new();
275 gen_helper_s_to_memory(tmp32, t0);
276 tcg_gen_extu_i32_i64(tmp, tmp32);
277 tcg_gen_qemu_st32(tmp, t1, flags);
278 tcg_temp_free(tmp);
279 tcg_temp_free_i32(tmp32);
282 static inline void gen_store_mem(DisasContext *ctx,
283 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
284 int flags),
285 int ra, int rb, int32_t disp16, int fp,
286 int clear)
288 TCGv addr, va;
290 addr = tcg_temp_new();
291 if (rb != 31) {
292 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
293 if (clear) {
294 tcg_gen_andi_i64(addr, addr, ~0x7);
296 } else {
297 if (clear) {
298 disp16 &= ~0x7;
300 tcg_gen_movi_i64(addr, disp16);
303 if (ra == 31) {
304 va = tcg_const_i64(0);
305 } else {
306 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
308 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
310 tcg_temp_free(addr);
311 if (ra == 31) {
312 tcg_temp_free(va);
316 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
317 int32_t disp16, int quad)
319 TCGv addr;
321 if (ra == 31) {
322 /* ??? Don't bother storing anything. The user can't tell
323 the difference, since the zero register always reads zero. */
324 return NO_EXIT;
327 #if defined(CONFIG_USER_ONLY)
328 addr = cpu_lock_st_addr;
329 #else
330 addr = tcg_temp_local_new();
331 #endif
333 if (rb != 31) {
334 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
335 } else {
336 tcg_gen_movi_i64(addr, disp16);
339 #if defined(CONFIG_USER_ONLY)
340 /* ??? This is handled via a complicated version of compare-and-swap
341 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
342 in TCG so that this isn't necessary. */
343 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
344 #else
345 /* ??? In system mode we are never multi-threaded, so CAS can be
346 implemented via a non-atomic load-compare-store sequence. */
348 int lab_fail, lab_done;
349 TCGv val;
351 lab_fail = gen_new_label();
352 lab_done = gen_new_label();
353 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
355 val = tcg_temp_new();
356 if (quad) {
357 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
358 } else {
359 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
361 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
363 if (quad) {
364 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
365 } else {
366 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
368 tcg_gen_movi_i64(cpu_ir[ra], 1);
369 tcg_gen_br(lab_done);
371 gen_set_label(lab_fail);
372 tcg_gen_movi_i64(cpu_ir[ra], 0);
374 gen_set_label(lab_done);
375 tcg_gen_movi_i64(cpu_lock_addr, -1);
377 tcg_temp_free(addr);
378 return NO_EXIT;
380 #endif
383 static int use_goto_tb(DisasContext *ctx, uint64_t dest)
385 /* Check for the dest on the same page as the start of the TB. We
386 also want to suppress goto_tb in the case of single-steping and IO. */
387 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
388 && !ctx->env->singlestep_enabled
389 && !(ctx->tb->cflags & CF_LAST_IO));
392 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
394 uint64_t dest = ctx->pc + (disp << 2);
396 if (ra != 31) {
397 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
400 /* Notice branch-to-next; used to initialize RA with the PC. */
401 if (disp == 0) {
402 return 0;
403 } else if (use_goto_tb(ctx, dest)) {
404 tcg_gen_goto_tb(0);
405 tcg_gen_movi_i64(cpu_pc, dest);
406 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
407 return EXIT_GOTO_TB;
408 } else {
409 tcg_gen_movi_i64(cpu_pc, dest);
410 return EXIT_PC_UPDATED;
414 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
415 TCGv cmp, int32_t disp)
417 uint64_t dest = ctx->pc + (disp << 2);
418 int lab_true = gen_new_label();
420 if (use_goto_tb(ctx, dest)) {
421 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
423 tcg_gen_goto_tb(0);
424 tcg_gen_movi_i64(cpu_pc, ctx->pc);
425 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
427 gen_set_label(lab_true);
428 tcg_gen_goto_tb(1);
429 tcg_gen_movi_i64(cpu_pc, dest);
430 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
432 return EXIT_GOTO_TB;
433 } else {
434 int lab_over = gen_new_label();
436 /* ??? Consider using either
437 movi pc, next
438 addi tmp, pc, disp
439 movcond pc, cond, 0, tmp, pc
441 setcond tmp, cond, 0
442 movi pc, next
443 neg tmp, tmp
444 andi tmp, tmp, disp
445 add pc, pc, tmp
446 The current diamond subgraph surely isn't efficient. */
448 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
449 tcg_gen_movi_i64(cpu_pc, ctx->pc);
450 tcg_gen_br(lab_over);
451 gen_set_label(lab_true);
452 tcg_gen_movi_i64(cpu_pc, dest);
453 gen_set_label(lab_over);
455 return EXIT_PC_UPDATED;
459 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
460 int32_t disp, int mask)
462 TCGv cmp_tmp;
464 if (unlikely(ra == 31)) {
465 cmp_tmp = tcg_const_i64(0);
466 } else {
467 cmp_tmp = tcg_temp_new();
468 if (mask) {
469 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
470 } else {
471 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
475 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
478 /* Fold -0.0 for comparison with COND. */
480 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
482 uint64_t mzero = 1ull << 63;
484 switch (cond) {
485 case TCG_COND_LE:
486 case TCG_COND_GT:
487 /* For <= or >, the -0.0 value directly compares the way we want. */
488 tcg_gen_mov_i64(dest, src);
489 break;
491 case TCG_COND_EQ:
492 case TCG_COND_NE:
493 /* For == or !=, we can simply mask off the sign bit and compare. */
494 tcg_gen_andi_i64(dest, src, mzero - 1);
495 break;
497 case TCG_COND_GE:
498 case TCG_COND_LT:
499 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
500 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
501 tcg_gen_neg_i64(dest, dest);
502 tcg_gen_and_i64(dest, dest, src);
503 break;
505 default:
506 abort();
510 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
511 int32_t disp)
513 TCGv cmp_tmp;
515 if (unlikely(ra == 31)) {
516 /* Very uncommon case, but easier to optimize it to an integer
517 comparison than continuing with the floating point comparison. */
518 return gen_bcond(ctx, cond, ra, disp, 0);
521 cmp_tmp = tcg_temp_new();
522 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
523 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
526 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
527 int islit, uint8_t lit, int mask)
529 TCGCond inv_cond = tcg_invert_cond(cond);
530 int l1;
532 if (unlikely(rc == 31))
533 return;
535 l1 = gen_new_label();
537 if (ra != 31) {
538 if (mask) {
539 TCGv tmp = tcg_temp_new();
540 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
541 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
542 tcg_temp_free(tmp);
543 } else
544 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
545 } else {
546 /* Very uncommon case - Do not bother to optimize. */
547 TCGv tmp = tcg_const_i64(0);
548 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
549 tcg_temp_free(tmp);
552 if (islit)
553 tcg_gen_movi_i64(cpu_ir[rc], lit);
554 else
555 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
556 gen_set_label(l1);
559 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
561 TCGv cmp_tmp;
562 int l1;
564 if (unlikely(rc == 31)) {
565 return;
568 cmp_tmp = tcg_temp_new();
569 if (unlikely(ra == 31)) {
570 tcg_gen_movi_i64(cmp_tmp, 0);
571 } else {
572 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
575 l1 = gen_new_label();
576 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
577 tcg_temp_free(cmp_tmp);
579 if (rb != 31)
580 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
581 else
582 tcg_gen_movi_i64(cpu_fir[rc], 0);
583 gen_set_label(l1);
586 #define QUAL_RM_N 0x080 /* Round mode nearest even */
587 #define QUAL_RM_C 0x000 /* Round mode chopped */
588 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
589 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
590 #define QUAL_RM_MASK 0x0c0
592 #define QUAL_U 0x100 /* Underflow enable (fp output) */
593 #define QUAL_V 0x100 /* Overflow enable (int output) */
594 #define QUAL_S 0x400 /* Software completion enable */
595 #define QUAL_I 0x200 /* Inexact detection enable */
597 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
599 TCGv_i32 tmp;
601 fn11 &= QUAL_RM_MASK;
602 if (fn11 == ctx->tb_rm) {
603 return;
605 ctx->tb_rm = fn11;
607 tmp = tcg_temp_new_i32();
608 switch (fn11) {
609 case QUAL_RM_N:
610 tcg_gen_movi_i32(tmp, float_round_nearest_even);
611 break;
612 case QUAL_RM_C:
613 tcg_gen_movi_i32(tmp, float_round_to_zero);
614 break;
615 case QUAL_RM_M:
616 tcg_gen_movi_i32(tmp, float_round_down);
617 break;
618 case QUAL_RM_D:
619 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
620 break;
623 #if defined(CONFIG_SOFTFLOAT_INLINE)
624 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
625 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
626 sets the one field. */
627 tcg_gen_st8_i32(tmp, cpu_env,
628 offsetof(CPUState, fp_status.float_rounding_mode));
629 #else
630 gen_helper_setroundmode(tmp);
631 #endif
633 tcg_temp_free_i32(tmp);
636 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
638 TCGv_i32 tmp;
640 fn11 &= QUAL_U;
641 if (fn11 == ctx->tb_ftz) {
642 return;
644 ctx->tb_ftz = fn11;
646 tmp = tcg_temp_new_i32();
647 if (fn11) {
648 /* Underflow is enabled, use the FPCR setting. */
649 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
650 } else {
651 /* Underflow is disabled, force flush-to-zero. */
652 tcg_gen_movi_i32(tmp, 1);
655 #if defined(CONFIG_SOFTFLOAT_INLINE)
656 tcg_gen_st8_i32(tmp, cpu_env,
657 offsetof(CPUState, fp_status.flush_to_zero));
658 #else
659 gen_helper_setflushzero(tmp);
660 #endif
662 tcg_temp_free_i32(tmp);
665 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
667 TCGv val = tcg_temp_new();
668 if (reg == 31) {
669 tcg_gen_movi_i64(val, 0);
670 } else if (fn11 & QUAL_S) {
671 gen_helper_ieee_input_s(val, cpu_fir[reg]);
672 } else if (is_cmp) {
673 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
674 } else {
675 gen_helper_ieee_input(val, cpu_fir[reg]);
677 return val;
680 static void gen_fp_exc_clear(void)
682 #if defined(CONFIG_SOFTFLOAT_INLINE)
683 TCGv_i32 zero = tcg_const_i32(0);
684 tcg_gen_st8_i32(zero, cpu_env,
685 offsetof(CPUState, fp_status.float_exception_flags));
686 tcg_temp_free_i32(zero);
687 #else
688 gen_helper_fp_exc_clear();
689 #endif
692 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
694 /* ??? We ought to be able to do something with imprecise exceptions.
695 E.g. notice we're still in the trap shadow of something within the
696 TB and do not generate the code to signal the exception; end the TB
697 when an exception is forced to arrive, either by consumption of a
698 register value or TRAPB or EXCB. */
699 TCGv_i32 exc = tcg_temp_new_i32();
700 TCGv_i32 reg;
702 #if defined(CONFIG_SOFTFLOAT_INLINE)
703 tcg_gen_ld8u_i32(exc, cpu_env,
704 offsetof(CPUState, fp_status.float_exception_flags));
705 #else
706 gen_helper_fp_exc_get(exc);
707 #endif
709 if (ignore) {
710 tcg_gen_andi_i32(exc, exc, ~ignore);
713 /* ??? Pass in the regno of the destination so that the helper can
714 set EXC_MASK, which contains a bitmask of destination registers
715 that have caused arithmetic traps. A simple userspace emulation
716 does not require this. We do need it for a guest kernel's entArith,
717 or if we were to do something clever with imprecise exceptions. */
718 reg = tcg_const_i32(rc + 32);
720 if (fn11 & QUAL_S) {
721 gen_helper_fp_exc_raise_s(exc, reg);
722 } else {
723 gen_helper_fp_exc_raise(exc, reg);
726 tcg_temp_free_i32(reg);
727 tcg_temp_free_i32(exc);
730 static inline void gen_fp_exc_raise(int rc, int fn11)
732 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
735 static void gen_fcvtlq(int rb, int rc)
737 if (unlikely(rc == 31)) {
738 return;
740 if (unlikely(rb == 31)) {
741 tcg_gen_movi_i64(cpu_fir[rc], 0);
742 } else {
743 TCGv tmp = tcg_temp_new();
745 /* The arithmetic right shift here, plus the sign-extended mask below
746 yields a sign-extended result without an explicit ext32s_i64. */
747 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
748 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
749 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
750 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
751 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
753 tcg_temp_free(tmp);
757 static void gen_fcvtql(int rb, int rc)
759 if (unlikely(rc == 31)) {
760 return;
762 if (unlikely(rb == 31)) {
763 tcg_gen_movi_i64(cpu_fir[rc], 0);
764 } else {
765 TCGv tmp = tcg_temp_new();
767 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
768 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
769 tcg_gen_shli_i64(tmp, tmp, 32);
770 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
771 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
773 tcg_temp_free(tmp);
777 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
779 if (rb != 31) {
780 int lab = gen_new_label();
781 TCGv tmp = tcg_temp_new();
783 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
784 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
785 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
787 gen_set_label(lab);
789 gen_fcvtql(rb, rc);
792 #define FARITH2(name) \
793 static inline void glue(gen_f, name)(int rb, int rc) \
795 if (unlikely(rc == 31)) { \
796 return; \
798 if (rb != 31) { \
799 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
800 } else { \
801 TCGv tmp = tcg_const_i64(0); \
802 gen_helper_ ## name (cpu_fir[rc], tmp); \
803 tcg_temp_free(tmp); \
807 /* ??? VAX instruction qualifiers ignored. */
808 FARITH2(sqrtf)
809 FARITH2(sqrtg)
810 FARITH2(cvtgf)
811 FARITH2(cvtgq)
812 FARITH2(cvtqf)
813 FARITH2(cvtqg)
815 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
816 int rb, int rc, int fn11)
818 TCGv vb;
820 /* ??? This is wrong: the instruction is not a nop, it still may
821 raise exceptions. */
822 if (unlikely(rc == 31)) {
823 return;
826 gen_qual_roundmode(ctx, fn11);
827 gen_qual_flushzero(ctx, fn11);
828 gen_fp_exc_clear();
830 vb = gen_ieee_input(rb, fn11, 0);
831 helper(cpu_fir[rc], vb);
832 tcg_temp_free(vb);
834 gen_fp_exc_raise(rc, fn11);
837 #define IEEE_ARITH2(name) \
838 static inline void glue(gen_f, name)(DisasContext *ctx, \
839 int rb, int rc, int fn11) \
841 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
843 IEEE_ARITH2(sqrts)
844 IEEE_ARITH2(sqrtt)
845 IEEE_ARITH2(cvtst)
846 IEEE_ARITH2(cvtts)
848 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
850 TCGv vb;
851 int ignore = 0;
853 /* ??? This is wrong: the instruction is not a nop, it still may
854 raise exceptions. */
855 if (unlikely(rc == 31)) {
856 return;
859 /* No need to set flushzero, since we have an integer output. */
860 gen_fp_exc_clear();
861 vb = gen_ieee_input(rb, fn11, 0);
863 /* Almost all integer conversions use cropped rounding, and most
864 also do not have integer overflow enabled. Special case that. */
865 switch (fn11) {
866 case QUAL_RM_C:
867 gen_helper_cvttq_c(cpu_fir[rc], vb);
868 break;
869 case QUAL_V | QUAL_RM_C:
870 case QUAL_S | QUAL_V | QUAL_RM_C:
871 ignore = float_flag_inexact;
872 /* FALLTHRU */
873 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
874 gen_helper_cvttq_svic(cpu_fir[rc], vb);
875 break;
876 default:
877 gen_qual_roundmode(ctx, fn11);
878 gen_helper_cvttq(cpu_fir[rc], vb);
879 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
880 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
881 break;
883 tcg_temp_free(vb);
885 gen_fp_exc_raise_ignore(rc, fn11, ignore);
888 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
889 int rb, int rc, int fn11)
891 TCGv vb;
893 /* ??? This is wrong: the instruction is not a nop, it still may
894 raise exceptions. */
895 if (unlikely(rc == 31)) {
896 return;
899 gen_qual_roundmode(ctx, fn11);
901 if (rb == 31) {
902 vb = tcg_const_i64(0);
903 } else {
904 vb = cpu_fir[rb];
907 /* The only exception that can be raised by integer conversion
908 is inexact. Thus we only need to worry about exceptions when
909 inexact handling is requested. */
910 if (fn11 & QUAL_I) {
911 gen_fp_exc_clear();
912 helper(cpu_fir[rc], vb);
913 gen_fp_exc_raise(rc, fn11);
914 } else {
915 helper(cpu_fir[rc], vb);
918 if (rb == 31) {
919 tcg_temp_free(vb);
923 #define IEEE_INTCVT(name) \
924 static inline void glue(gen_f, name)(DisasContext *ctx, \
925 int rb, int rc, int fn11) \
927 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
929 IEEE_INTCVT(cvtqs)
930 IEEE_INTCVT(cvtqt)
932 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
934 TCGv va, vb, vmask;
935 int za = 0, zb = 0;
937 if (unlikely(rc == 31)) {
938 return;
941 vmask = tcg_const_i64(mask);
943 TCGV_UNUSED_I64(va);
944 if (ra == 31) {
945 if (inv_a) {
946 va = vmask;
947 } else {
948 za = 1;
950 } else {
951 va = tcg_temp_new_i64();
952 tcg_gen_mov_i64(va, cpu_fir[ra]);
953 if (inv_a) {
954 tcg_gen_andc_i64(va, vmask, va);
955 } else {
956 tcg_gen_and_i64(va, va, vmask);
960 TCGV_UNUSED_I64(vb);
961 if (rb == 31) {
962 zb = 1;
963 } else {
964 vb = tcg_temp_new_i64();
965 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
968 switch (za << 1 | zb) {
969 case 0 | 0:
970 tcg_gen_or_i64(cpu_fir[rc], va, vb);
971 break;
972 case 0 | 1:
973 tcg_gen_mov_i64(cpu_fir[rc], va);
974 break;
975 case 2 | 0:
976 tcg_gen_mov_i64(cpu_fir[rc], vb);
977 break;
978 case 2 | 1:
979 tcg_gen_movi_i64(cpu_fir[rc], 0);
980 break;
983 tcg_temp_free(vmask);
984 if (ra != 31) {
985 tcg_temp_free(va);
987 if (rb != 31) {
988 tcg_temp_free(vb);
992 static inline void gen_fcpys(int ra, int rb, int rc)
994 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
997 static inline void gen_fcpysn(int ra, int rb, int rc)
999 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1002 static inline void gen_fcpyse(int ra, int rb, int rc)
1004 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1007 #define FARITH3(name) \
1008 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1010 TCGv va, vb; \
1012 if (unlikely(rc == 31)) { \
1013 return; \
1015 if (ra == 31) { \
1016 va = tcg_const_i64(0); \
1017 } else { \
1018 va = cpu_fir[ra]; \
1020 if (rb == 31) { \
1021 vb = tcg_const_i64(0); \
1022 } else { \
1023 vb = cpu_fir[rb]; \
1026 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1028 if (ra == 31) { \
1029 tcg_temp_free(va); \
1031 if (rb == 31) { \
1032 tcg_temp_free(vb); \
1036 /* ??? VAX instruction qualifiers ignored. */
1037 FARITH3(addf)
1038 FARITH3(subf)
1039 FARITH3(mulf)
1040 FARITH3(divf)
1041 FARITH3(addg)
1042 FARITH3(subg)
1043 FARITH3(mulg)
1044 FARITH3(divg)
1045 FARITH3(cmpgeq)
1046 FARITH3(cmpglt)
1047 FARITH3(cmpgle)
1049 static void gen_ieee_arith3(DisasContext *ctx,
1050 void (*helper)(TCGv, TCGv, TCGv),
1051 int ra, int rb, int rc, int fn11)
1053 TCGv va, vb;
1055 /* ??? This is wrong: the instruction is not a nop, it still may
1056 raise exceptions. */
1057 if (unlikely(rc == 31)) {
1058 return;
1061 gen_qual_roundmode(ctx, fn11);
1062 gen_qual_flushzero(ctx, fn11);
1063 gen_fp_exc_clear();
1065 va = gen_ieee_input(ra, fn11, 0);
1066 vb = gen_ieee_input(rb, fn11, 0);
1067 helper(cpu_fir[rc], va, vb);
1068 tcg_temp_free(va);
1069 tcg_temp_free(vb);
1071 gen_fp_exc_raise(rc, fn11);
1074 #define IEEE_ARITH3(name) \
1075 static inline void glue(gen_f, name)(DisasContext *ctx, \
1076 int ra, int rb, int rc, int fn11) \
1078 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1080 IEEE_ARITH3(adds)
1081 IEEE_ARITH3(subs)
1082 IEEE_ARITH3(muls)
1083 IEEE_ARITH3(divs)
1084 IEEE_ARITH3(addt)
1085 IEEE_ARITH3(subt)
1086 IEEE_ARITH3(mult)
1087 IEEE_ARITH3(divt)
1089 static void gen_ieee_compare(DisasContext *ctx,
1090 void (*helper)(TCGv, TCGv, TCGv),
1091 int ra, int rb, int rc, int fn11)
1093 TCGv va, vb;
1095 /* ??? This is wrong: the instruction is not a nop, it still may
1096 raise exceptions. */
1097 if (unlikely(rc == 31)) {
1098 return;
1101 gen_fp_exc_clear();
1103 va = gen_ieee_input(ra, fn11, 1);
1104 vb = gen_ieee_input(rb, fn11, 1);
1105 helper(cpu_fir[rc], va, vb);
1106 tcg_temp_free(va);
1107 tcg_temp_free(vb);
1109 gen_fp_exc_raise(rc, fn11);
1112 #define IEEE_CMP3(name) \
1113 static inline void glue(gen_f, name)(DisasContext *ctx, \
1114 int ra, int rb, int rc, int fn11) \
1116 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1118 IEEE_CMP3(cmptun)
1119 IEEE_CMP3(cmpteq)
1120 IEEE_CMP3(cmptlt)
1121 IEEE_CMP3(cmptle)
1123 static inline uint64_t zapnot_mask(uint8_t lit)
1125 uint64_t mask = 0;
1126 int i;
1128 for (i = 0; i < 8; ++i) {
1129 if ((lit >> i) & 1)
1130 mask |= 0xffull << (i * 8);
1132 return mask;
1135 /* Implement zapnot with an immediate operand, which expands to some
1136 form of immediate AND. This is a basic building block in the
1137 definition of many of the other byte manipulation instructions. */
1138 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1140 switch (lit) {
1141 case 0x00:
1142 tcg_gen_movi_i64(dest, 0);
1143 break;
1144 case 0x01:
1145 tcg_gen_ext8u_i64(dest, src);
1146 break;
1147 case 0x03:
1148 tcg_gen_ext16u_i64(dest, src);
1149 break;
1150 case 0x0f:
1151 tcg_gen_ext32u_i64(dest, src);
1152 break;
1153 case 0xff:
1154 tcg_gen_mov_i64(dest, src);
1155 break;
1156 default:
1157 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1158 break;
1162 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1164 if (unlikely(rc == 31))
1165 return;
1166 else if (unlikely(ra == 31))
1167 tcg_gen_movi_i64(cpu_ir[rc], 0);
1168 else if (islit)
1169 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1170 else
1171 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1174 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1176 if (unlikely(rc == 31))
1177 return;
1178 else if (unlikely(ra == 31))
1179 tcg_gen_movi_i64(cpu_ir[rc], 0);
1180 else if (islit)
1181 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1182 else
1183 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1187 /* EXTWH, EXTLH, EXTQH */
1188 static void gen_ext_h(int ra, int rb, int rc, int islit,
1189 uint8_t lit, uint8_t byte_mask)
1191 if (unlikely(rc == 31))
1192 return;
1193 else if (unlikely(ra == 31))
1194 tcg_gen_movi_i64(cpu_ir[rc], 0);
1195 else {
1196 if (islit) {
1197 lit = (64 - (lit & 7) * 8) & 0x3f;
1198 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1199 } else {
1200 TCGv tmp1 = tcg_temp_new();
1201 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1202 tcg_gen_shli_i64(tmp1, tmp1, 3);
1203 tcg_gen_neg_i64(tmp1, tmp1);
1204 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1205 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1206 tcg_temp_free(tmp1);
1208 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1212 /* EXTBL, EXTWL, EXTLL, EXTQL */
1213 static void gen_ext_l(int ra, int rb, int rc, int islit,
1214 uint8_t lit, uint8_t byte_mask)
1216 if (unlikely(rc == 31))
1217 return;
1218 else if (unlikely(ra == 31))
1219 tcg_gen_movi_i64(cpu_ir[rc], 0);
1220 else {
1221 if (islit) {
1222 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1223 } else {
1224 TCGv tmp = tcg_temp_new();
1225 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1226 tcg_gen_shli_i64(tmp, tmp, 3);
1227 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1228 tcg_temp_free(tmp);
1230 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1234 /* INSWH, INSLH, INSQH */
1235 static void gen_ins_h(int ra, int rb, int rc, int islit,
1236 uint8_t lit, uint8_t byte_mask)
1238 if (unlikely(rc == 31))
1239 return;
1240 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1241 tcg_gen_movi_i64(cpu_ir[rc], 0);
1242 else {
1243 TCGv tmp = tcg_temp_new();
1245 /* The instruction description has us left-shift the byte mask
1246 and extract bits <15:8> and apply that zap at the end. This
1247 is equivalent to simply performing the zap first and shifting
1248 afterward. */
1249 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1251 if (islit) {
1252 /* Note that we have handled the lit==0 case above. */
1253 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1254 } else {
1255 TCGv shift = tcg_temp_new();
1257 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1258 Do this portably by splitting the shift into two parts:
1259 shift_count-1 and 1. Arrange for the -1 by using
1260 ones-complement instead of twos-complement in the negation:
1261 ~((B & 7) * 8) & 63. */
1263 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1264 tcg_gen_shli_i64(shift, shift, 3);
1265 tcg_gen_not_i64(shift, shift);
1266 tcg_gen_andi_i64(shift, shift, 0x3f);
1268 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1269 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1270 tcg_temp_free(shift);
1272 tcg_temp_free(tmp);
1276 /* INSBL, INSWL, INSLL, INSQL */
1277 static void gen_ins_l(int ra, int rb, int rc, int islit,
1278 uint8_t lit, uint8_t byte_mask)
1280 if (unlikely(rc == 31))
1281 return;
1282 else if (unlikely(ra == 31))
1283 tcg_gen_movi_i64(cpu_ir[rc], 0);
1284 else {
1285 TCGv tmp = tcg_temp_new();
1287 /* The instruction description has us left-shift the byte mask
1288 the same number of byte slots as the data and apply the zap
1289 at the end. This is equivalent to simply performing the zap
1290 first and shifting afterward. */
1291 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1293 if (islit) {
1294 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1295 } else {
1296 TCGv shift = tcg_temp_new();
1297 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1298 tcg_gen_shli_i64(shift, shift, 3);
1299 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1300 tcg_temp_free(shift);
1302 tcg_temp_free(tmp);
1306 /* MSKWH, MSKLH, MSKQH */
1307 static void gen_msk_h(int ra, int rb, int rc, int islit,
1308 uint8_t lit, uint8_t byte_mask)
1310 if (unlikely(rc == 31))
1311 return;
1312 else if (unlikely(ra == 31))
1313 tcg_gen_movi_i64(cpu_ir[rc], 0);
1314 else if (islit) {
1315 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1316 } else {
1317 TCGv shift = tcg_temp_new();
1318 TCGv mask = tcg_temp_new();
1320 /* The instruction description is as above, where the byte_mask
1321 is shifted left, and then we extract bits <15:8>. This can be
1322 emulated with a right-shift on the expanded byte mask. This
1323 requires extra care because for an input <2:0> == 0 we need a
1324 shift of 64 bits in order to generate a zero. This is done by
1325 splitting the shift into two parts, the variable shift - 1
1326 followed by a constant 1 shift. The code we expand below is
1327 equivalent to ~((B & 7) * 8) & 63. */
1329 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1330 tcg_gen_shli_i64(shift, shift, 3);
1331 tcg_gen_not_i64(shift, shift);
1332 tcg_gen_andi_i64(shift, shift, 0x3f);
1333 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1334 tcg_gen_shr_i64(mask, mask, shift);
1335 tcg_gen_shri_i64(mask, mask, 1);
1337 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1339 tcg_temp_free(mask);
1340 tcg_temp_free(shift);
1344 /* MSKBL, MSKWL, MSKLL, MSKQL */
1345 static void gen_msk_l(int ra, int rb, int rc, int islit,
1346 uint8_t lit, uint8_t byte_mask)
1348 if (unlikely(rc == 31))
1349 return;
1350 else if (unlikely(ra == 31))
1351 tcg_gen_movi_i64(cpu_ir[rc], 0);
1352 else if (islit) {
1353 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1354 } else {
1355 TCGv shift = tcg_temp_new();
1356 TCGv mask = tcg_temp_new();
1358 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1359 tcg_gen_shli_i64(shift, shift, 3);
1360 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1361 tcg_gen_shl_i64(mask, mask, shift);
1363 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1365 tcg_temp_free(mask);
1366 tcg_temp_free(shift);
1370 /* Code to call arith3 helpers */
1371 #define ARITH3(name) \
1372 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1373 uint8_t lit) \
1375 if (unlikely(rc == 31)) \
1376 return; \
1378 if (ra != 31) { \
1379 if (islit) { \
1380 TCGv tmp = tcg_const_i64(lit); \
1381 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1382 tcg_temp_free(tmp); \
1383 } else \
1384 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1385 } else { \
1386 TCGv tmp1 = tcg_const_i64(0); \
1387 if (islit) { \
1388 TCGv tmp2 = tcg_const_i64(lit); \
1389 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1390 tcg_temp_free(tmp2); \
1391 } else \
1392 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1393 tcg_temp_free(tmp1); \
1396 ARITH3(cmpbge)
1397 ARITH3(addlv)
1398 ARITH3(sublv)
1399 ARITH3(addqv)
1400 ARITH3(subqv)
1401 ARITH3(umulh)
1402 ARITH3(mullv)
1403 ARITH3(mulqv)
1404 ARITH3(minub8)
1405 ARITH3(minsb8)
1406 ARITH3(minuw4)
1407 ARITH3(minsw4)
1408 ARITH3(maxub8)
1409 ARITH3(maxsb8)
1410 ARITH3(maxuw4)
1411 ARITH3(maxsw4)
1412 ARITH3(perr)
1414 #define MVIOP2(name) \
1415 static inline void glue(gen_, name)(int rb, int rc) \
1417 if (unlikely(rc == 31)) \
1418 return; \
1419 if (unlikely(rb == 31)) \
1420 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1421 else \
1422 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1424 MVIOP2(pklb)
1425 MVIOP2(pkwb)
1426 MVIOP2(unpkbl)
1427 MVIOP2(unpkbw)
1429 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1430 int islit, uint8_t lit)
1432 TCGv va, vb;
1434 if (unlikely(rc == 31)) {
1435 return;
1438 if (ra == 31) {
1439 va = tcg_const_i64(0);
1440 } else {
1441 va = cpu_ir[ra];
1443 if (islit) {
1444 vb = tcg_const_i64(lit);
1445 } else {
1446 vb = cpu_ir[rb];
1449 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1451 if (ra == 31) {
1452 tcg_temp_free(va);
1454 if (islit) {
1455 tcg_temp_free(vb);
1459 static void gen_rx(int ra, int set)
1461 TCGv_i32 tmp;
1463 if (ra != 31) {
1464 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1467 tmp = tcg_const_i32(set);
1468 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1469 tcg_temp_free_i32(tmp);
1472 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1474 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1475 to internal cpu registers. */
1477 /* Unprivileged PAL call */
1478 if (palcode >= 0x80 && palcode < 0xC0) {
1479 switch (palcode) {
1480 case 0x86:
1481 /* IMB */
1482 /* No-op inside QEMU. */
1483 break;
1484 case 0x9E:
1485 /* RDUNIQUE */
1486 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1487 break;
1488 case 0x9F:
1489 /* WRUNIQUE */
1490 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1491 break;
1492 default:
1493 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1495 return NO_EXIT;
1498 #ifndef CONFIG_USER_ONLY
1499 /* Privileged PAL code */
1500 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1501 switch (palcode) {
1502 case 0x01:
1503 /* CFLUSH */
1504 /* No-op inside QEMU. */
1505 break;
1506 case 0x02:
1507 /* DRAINA */
1508 /* No-op inside QEMU. */
1509 break;
1510 case 0x2D:
1511 /* WRVPTPTR */
1512 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr));
1513 break;
1514 case 0x31:
1515 /* WRVAL */
1516 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1517 break;
1518 case 0x32:
1519 /* RDVAL */
1520 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1521 break;
1523 case 0x35: {
1524 /* SWPIPL */
1525 TCGv tmp;
1527 /* Note that we already know we're in kernel mode, so we know
1528 that PS only contains the 3 IPL bits. */
1529 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1531 /* But make sure and store only the 3 IPL bits from the user. */
1532 tmp = tcg_temp_new();
1533 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1534 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps));
1535 tcg_temp_free(tmp);
1536 break;
1539 case 0x36:
1540 /* RDPS */
1541 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1542 break;
1543 case 0x38:
1544 /* WRUSP */
1545 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1546 break;
1547 case 0x3A:
1548 /* RDUSP */
1549 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1550 break;
1551 case 0x3C:
1552 /* WHAMI */
1553 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1554 offsetof(CPUState, cpu_index));
1555 break;
1557 default:
1558 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1560 return NO_EXIT;
1562 #endif
1564 return gen_invalid(ctx);
1567 #ifndef CONFIG_USER_ONLY
1569 #define PR_BYTE 0x100000
1570 #define PR_LONG 0x200000
1572 static int cpu_pr_data(int pr)
1574 switch (pr) {
1575 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1576 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1577 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1578 case 3: return offsetof(CPUAlphaState, trap_arg0);
1579 case 4: return offsetof(CPUAlphaState, trap_arg1);
1580 case 5: return offsetof(CPUAlphaState, trap_arg2);
1581 case 6: return offsetof(CPUAlphaState, exc_addr);
1582 case 7: return offsetof(CPUAlphaState, palbr);
1583 case 8: return offsetof(CPUAlphaState, ptbr);
1584 case 9: return offsetof(CPUAlphaState, vptptr);
1585 case 10: return offsetof(CPUAlphaState, unique);
1586 case 11: return offsetof(CPUAlphaState, sysval);
1587 case 12: return offsetof(CPUAlphaState, usp);
1589 case 32 ... 39:
1590 return offsetof(CPUAlphaState, shadow[pr - 32]);
1591 case 40 ... 63:
1592 return offsetof(CPUAlphaState, scratch[pr - 40]);
1594 return 0;
1597 static void gen_mfpr(int ra, int regno)
1599 int data = cpu_pr_data(regno);
1601 /* In our emulated PALcode, these processor registers have no
1602 side effects from reading. */
1603 if (ra == 31) {
1604 return;
1607 /* The basic registers are data only, and unknown registers
1608 are read-zero, write-ignore. */
1609 if (data == 0) {
1610 tcg_gen_movi_i64(cpu_ir[ra], 0);
1611 } else if (data & PR_BYTE) {
1612 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1613 } else if (data & PR_LONG) {
1614 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1615 } else {
1616 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1620 static void gen_mtpr(int rb, int regno)
1622 TCGv tmp;
1624 if (rb == 31) {
1625 tmp = tcg_const_i64(0);
1626 } else {
1627 tmp = cpu_ir[rb];
1630 /* These two register numbers perform a TLB cache flush. Thankfully we
1631 can only do this inside PALmode, which means that the current basic
1632 block cannot be affected by the change in mappings. */
1633 if (regno == 255) {
1634 /* TBIA */
1635 gen_helper_tbia();
1636 } else if (regno == 254) {
1637 /* TBIS */
1638 gen_helper_tbis(tmp);
1639 } else {
1640 /* The basic registers are data only, and unknown registers
1641 are read-zero, write-ignore. */
1642 int data = cpu_pr_data(regno);
1643 if (data != 0) {
1644 if (data & PR_BYTE) {
1645 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1646 } else if (data & PR_LONG) {
1647 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1648 } else {
1649 tcg_gen_st_i64(tmp, cpu_env, data);
1654 if (rb == 31) {
1655 tcg_temp_free(tmp);
1658 #endif /* !USER_ONLY*/
1660 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1662 uint32_t palcode;
1663 int32_t disp21, disp16;
1664 #ifndef CONFIG_USER_ONLY
1665 int32_t disp12;
1666 #endif
1667 uint16_t fn11;
1668 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1669 uint8_t lit;
1670 ExitStatus ret;
1672 /* Decode all instruction fields */
1673 opc = insn >> 26;
1674 ra = (insn >> 21) & 0x1F;
1675 rb = (insn >> 16) & 0x1F;
1676 rc = insn & 0x1F;
1677 real_islit = islit = (insn >> 12) & 1;
1678 if (rb == 31 && !islit) {
1679 islit = 1;
1680 lit = 0;
1681 } else
1682 lit = (insn >> 13) & 0xFF;
1683 palcode = insn & 0x03FFFFFF;
1684 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1685 disp16 = (int16_t)(insn & 0x0000FFFF);
1686 #ifndef CONFIG_USER_ONLY
1687 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1688 #endif
1689 fn11 = (insn >> 5) & 0x000007FF;
1690 fpfn = fn11 & 0x3F;
1691 fn7 = (insn >> 5) & 0x0000007F;
1692 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1693 opc, ra, rb, rc, disp16);
1695 ret = NO_EXIT;
1696 switch (opc) {
1697 case 0x00:
1698 /* CALL_PAL */
1699 ret = gen_call_pal(ctx, palcode);
1700 break;
1701 case 0x01:
1702 /* OPC01 */
1703 goto invalid_opc;
1704 case 0x02:
1705 /* OPC02 */
1706 goto invalid_opc;
1707 case 0x03:
1708 /* OPC03 */
1709 goto invalid_opc;
1710 case 0x04:
1711 /* OPC04 */
1712 goto invalid_opc;
1713 case 0x05:
1714 /* OPC05 */
1715 goto invalid_opc;
1716 case 0x06:
1717 /* OPC06 */
1718 goto invalid_opc;
1719 case 0x07:
1720 /* OPC07 */
1721 goto invalid_opc;
1722 case 0x08:
1723 /* LDA */
1724 if (likely(ra != 31)) {
1725 if (rb != 31)
1726 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1727 else
1728 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1730 break;
1731 case 0x09:
1732 /* LDAH */
1733 if (likely(ra != 31)) {
1734 if (rb != 31)
1735 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1736 else
1737 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1739 break;
1740 case 0x0A:
1741 /* LDBU */
1742 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1743 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1744 break;
1746 goto invalid_opc;
1747 case 0x0B:
1748 /* LDQ_U */
1749 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1750 break;
1751 case 0x0C:
1752 /* LDWU */
1753 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1754 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1755 break;
1757 goto invalid_opc;
1758 case 0x0D:
1759 /* STW */
1760 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1761 break;
1762 case 0x0E:
1763 /* STB */
1764 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1765 break;
1766 case 0x0F:
1767 /* STQ_U */
1768 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1769 break;
1770 case 0x10:
1771 switch (fn7) {
1772 case 0x00:
1773 /* ADDL */
1774 if (likely(rc != 31)) {
1775 if (ra != 31) {
1776 if (islit) {
1777 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1778 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1779 } else {
1780 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1781 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1783 } else {
1784 if (islit)
1785 tcg_gen_movi_i64(cpu_ir[rc], lit);
1786 else
1787 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1790 break;
1791 case 0x02:
1792 /* S4ADDL */
1793 if (likely(rc != 31)) {
1794 if (ra != 31) {
1795 TCGv tmp = tcg_temp_new();
1796 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1797 if (islit)
1798 tcg_gen_addi_i64(tmp, tmp, lit);
1799 else
1800 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1801 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1802 tcg_temp_free(tmp);
1803 } else {
1804 if (islit)
1805 tcg_gen_movi_i64(cpu_ir[rc], lit);
1806 else
1807 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1810 break;
1811 case 0x09:
1812 /* SUBL */
1813 if (likely(rc != 31)) {
1814 if (ra != 31) {
1815 if (islit)
1816 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1817 else
1818 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1819 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1820 } else {
1821 if (islit)
1822 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1823 else {
1824 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1825 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1828 break;
1829 case 0x0B:
1830 /* S4SUBL */
1831 if (likely(rc != 31)) {
1832 if (ra != 31) {
1833 TCGv tmp = tcg_temp_new();
1834 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1835 if (islit)
1836 tcg_gen_subi_i64(tmp, tmp, lit);
1837 else
1838 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1839 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1840 tcg_temp_free(tmp);
1841 } else {
1842 if (islit)
1843 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1844 else {
1845 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1846 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1850 break;
1851 case 0x0F:
1852 /* CMPBGE */
1853 gen_cmpbge(ra, rb, rc, islit, lit);
1854 break;
1855 case 0x12:
1856 /* S8ADDL */
1857 if (likely(rc != 31)) {
1858 if (ra != 31) {
1859 TCGv tmp = tcg_temp_new();
1860 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1861 if (islit)
1862 tcg_gen_addi_i64(tmp, tmp, lit);
1863 else
1864 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1865 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1866 tcg_temp_free(tmp);
1867 } else {
1868 if (islit)
1869 tcg_gen_movi_i64(cpu_ir[rc], lit);
1870 else
1871 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1874 break;
1875 case 0x1B:
1876 /* S8SUBL */
1877 if (likely(rc != 31)) {
1878 if (ra != 31) {
1879 TCGv tmp = tcg_temp_new();
1880 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1881 if (islit)
1882 tcg_gen_subi_i64(tmp, tmp, lit);
1883 else
1884 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1885 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1886 tcg_temp_free(tmp);
1887 } else {
1888 if (islit)
1889 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1890 else
1891 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1892 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1896 break;
1897 case 0x1D:
1898 /* CMPULT */
1899 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1900 break;
1901 case 0x20:
1902 /* ADDQ */
1903 if (likely(rc != 31)) {
1904 if (ra != 31) {
1905 if (islit)
1906 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1907 else
1908 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1909 } else {
1910 if (islit)
1911 tcg_gen_movi_i64(cpu_ir[rc], lit);
1912 else
1913 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1916 break;
1917 case 0x22:
1918 /* S4ADDQ */
1919 if (likely(rc != 31)) {
1920 if (ra != 31) {
1921 TCGv tmp = tcg_temp_new();
1922 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1923 if (islit)
1924 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1925 else
1926 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1927 tcg_temp_free(tmp);
1928 } else {
1929 if (islit)
1930 tcg_gen_movi_i64(cpu_ir[rc], lit);
1931 else
1932 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1935 break;
1936 case 0x29:
1937 /* SUBQ */
1938 if (likely(rc != 31)) {
1939 if (ra != 31) {
1940 if (islit)
1941 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1942 else
1943 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1944 } else {
1945 if (islit)
1946 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1947 else
1948 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1951 break;
1952 case 0x2B:
1953 /* S4SUBQ */
1954 if (likely(rc != 31)) {
1955 if (ra != 31) {
1956 TCGv tmp = tcg_temp_new();
1957 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1958 if (islit)
1959 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1960 else
1961 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1962 tcg_temp_free(tmp);
1963 } else {
1964 if (islit)
1965 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1966 else
1967 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1970 break;
1971 case 0x2D:
1972 /* CMPEQ */
1973 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1974 break;
1975 case 0x32:
1976 /* S8ADDQ */
1977 if (likely(rc != 31)) {
1978 if (ra != 31) {
1979 TCGv tmp = tcg_temp_new();
1980 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1981 if (islit)
1982 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1983 else
1984 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1985 tcg_temp_free(tmp);
1986 } else {
1987 if (islit)
1988 tcg_gen_movi_i64(cpu_ir[rc], lit);
1989 else
1990 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1993 break;
1994 case 0x3B:
1995 /* S8SUBQ */
1996 if (likely(rc != 31)) {
1997 if (ra != 31) {
1998 TCGv tmp = tcg_temp_new();
1999 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2000 if (islit)
2001 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2002 else
2003 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2004 tcg_temp_free(tmp);
2005 } else {
2006 if (islit)
2007 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2008 else
2009 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2012 break;
2013 case 0x3D:
2014 /* CMPULE */
2015 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2016 break;
2017 case 0x40:
2018 /* ADDL/V */
2019 gen_addlv(ra, rb, rc, islit, lit);
2020 break;
2021 case 0x49:
2022 /* SUBL/V */
2023 gen_sublv(ra, rb, rc, islit, lit);
2024 break;
2025 case 0x4D:
2026 /* CMPLT */
2027 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2028 break;
2029 case 0x60:
2030 /* ADDQ/V */
2031 gen_addqv(ra, rb, rc, islit, lit);
2032 break;
2033 case 0x69:
2034 /* SUBQ/V */
2035 gen_subqv(ra, rb, rc, islit, lit);
2036 break;
2037 case 0x6D:
2038 /* CMPLE */
2039 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2040 break;
2041 default:
2042 goto invalid_opc;
2044 break;
2045 case 0x11:
2046 switch (fn7) {
2047 case 0x00:
2048 /* AND */
2049 if (likely(rc != 31)) {
2050 if (ra == 31)
2051 tcg_gen_movi_i64(cpu_ir[rc], 0);
2052 else if (islit)
2053 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2054 else
2055 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2057 break;
2058 case 0x08:
2059 /* BIC */
2060 if (likely(rc != 31)) {
2061 if (ra != 31) {
2062 if (islit)
2063 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2064 else
2065 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2066 } else
2067 tcg_gen_movi_i64(cpu_ir[rc], 0);
2069 break;
2070 case 0x14:
2071 /* CMOVLBS */
2072 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2073 break;
2074 case 0x16:
2075 /* CMOVLBC */
2076 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2077 break;
2078 case 0x20:
2079 /* BIS */
2080 if (likely(rc != 31)) {
2081 if (ra != 31) {
2082 if (islit)
2083 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2084 else
2085 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2086 } else {
2087 if (islit)
2088 tcg_gen_movi_i64(cpu_ir[rc], lit);
2089 else
2090 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2093 break;
2094 case 0x24:
2095 /* CMOVEQ */
2096 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2097 break;
2098 case 0x26:
2099 /* CMOVNE */
2100 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2101 break;
2102 case 0x28:
2103 /* ORNOT */
2104 if (likely(rc != 31)) {
2105 if (ra != 31) {
2106 if (islit)
2107 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2108 else
2109 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2110 } else {
2111 if (islit)
2112 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2113 else
2114 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2117 break;
2118 case 0x40:
2119 /* XOR */
2120 if (likely(rc != 31)) {
2121 if (ra != 31) {
2122 if (islit)
2123 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2124 else
2125 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2126 } else {
2127 if (islit)
2128 tcg_gen_movi_i64(cpu_ir[rc], lit);
2129 else
2130 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2133 break;
2134 case 0x44:
2135 /* CMOVLT */
2136 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2137 break;
2138 case 0x46:
2139 /* CMOVGE */
2140 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2141 break;
2142 case 0x48:
2143 /* EQV */
2144 if (likely(rc != 31)) {
2145 if (ra != 31) {
2146 if (islit)
2147 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2148 else
2149 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2150 } else {
2151 if (islit)
2152 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2153 else
2154 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2157 break;
2158 case 0x61:
2159 /* AMASK */
2160 if (likely(rc != 31)) {
2161 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2163 if (islit) {
2164 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2165 } else {
2166 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2169 break;
2170 case 0x64:
2171 /* CMOVLE */
2172 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2173 break;
2174 case 0x66:
2175 /* CMOVGT */
2176 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2177 break;
2178 case 0x6C:
2179 /* IMPLVER */
2180 if (rc != 31)
2181 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2182 break;
2183 default:
2184 goto invalid_opc;
2186 break;
2187 case 0x12:
2188 switch (fn7) {
2189 case 0x02:
2190 /* MSKBL */
2191 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2192 break;
2193 case 0x06:
2194 /* EXTBL */
2195 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2196 break;
2197 case 0x0B:
2198 /* INSBL */
2199 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2200 break;
2201 case 0x12:
2202 /* MSKWL */
2203 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2204 break;
2205 case 0x16:
2206 /* EXTWL */
2207 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2208 break;
2209 case 0x1B:
2210 /* INSWL */
2211 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2212 break;
2213 case 0x22:
2214 /* MSKLL */
2215 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2216 break;
2217 case 0x26:
2218 /* EXTLL */
2219 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2220 break;
2221 case 0x2B:
2222 /* INSLL */
2223 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2224 break;
2225 case 0x30:
2226 /* ZAP */
2227 gen_zap(ra, rb, rc, islit, lit);
2228 break;
2229 case 0x31:
2230 /* ZAPNOT */
2231 gen_zapnot(ra, rb, rc, islit, lit);
2232 break;
2233 case 0x32:
2234 /* MSKQL */
2235 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2236 break;
2237 case 0x34:
2238 /* SRL */
2239 if (likely(rc != 31)) {
2240 if (ra != 31) {
2241 if (islit)
2242 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2243 else {
2244 TCGv shift = tcg_temp_new();
2245 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2246 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2247 tcg_temp_free(shift);
2249 } else
2250 tcg_gen_movi_i64(cpu_ir[rc], 0);
2252 break;
2253 case 0x36:
2254 /* EXTQL */
2255 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2256 break;
2257 case 0x39:
2258 /* SLL */
2259 if (likely(rc != 31)) {
2260 if (ra != 31) {
2261 if (islit)
2262 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2263 else {
2264 TCGv shift = tcg_temp_new();
2265 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2266 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2267 tcg_temp_free(shift);
2269 } else
2270 tcg_gen_movi_i64(cpu_ir[rc], 0);
2272 break;
2273 case 0x3B:
2274 /* INSQL */
2275 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2276 break;
2277 case 0x3C:
2278 /* SRA */
2279 if (likely(rc != 31)) {
2280 if (ra != 31) {
2281 if (islit)
2282 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2283 else {
2284 TCGv shift = tcg_temp_new();
2285 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2286 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2287 tcg_temp_free(shift);
2289 } else
2290 tcg_gen_movi_i64(cpu_ir[rc], 0);
2292 break;
2293 case 0x52:
2294 /* MSKWH */
2295 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2296 break;
2297 case 0x57:
2298 /* INSWH */
2299 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2300 break;
2301 case 0x5A:
2302 /* EXTWH */
2303 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2304 break;
2305 case 0x62:
2306 /* MSKLH */
2307 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2308 break;
2309 case 0x67:
2310 /* INSLH */
2311 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2312 break;
2313 case 0x6A:
2314 /* EXTLH */
2315 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2316 break;
2317 case 0x72:
2318 /* MSKQH */
2319 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2320 break;
2321 case 0x77:
2322 /* INSQH */
2323 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2324 break;
2325 case 0x7A:
2326 /* EXTQH */
2327 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2328 break;
2329 default:
2330 goto invalid_opc;
2332 break;
2333 case 0x13:
2334 switch (fn7) {
2335 case 0x00:
2336 /* MULL */
2337 if (likely(rc != 31)) {
2338 if (ra == 31)
2339 tcg_gen_movi_i64(cpu_ir[rc], 0);
2340 else {
2341 if (islit)
2342 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2343 else
2344 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2345 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2348 break;
2349 case 0x20:
2350 /* MULQ */
2351 if (likely(rc != 31)) {
2352 if (ra == 31)
2353 tcg_gen_movi_i64(cpu_ir[rc], 0);
2354 else if (islit)
2355 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2356 else
2357 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2359 break;
2360 case 0x30:
2361 /* UMULH */
2362 gen_umulh(ra, rb, rc, islit, lit);
2363 break;
2364 case 0x40:
2365 /* MULL/V */
2366 gen_mullv(ra, rb, rc, islit, lit);
2367 break;
2368 case 0x60:
2369 /* MULQ/V */
2370 gen_mulqv(ra, rb, rc, islit, lit);
2371 break;
2372 default:
2373 goto invalid_opc;
2375 break;
2376 case 0x14:
2377 switch (fpfn) { /* fn11 & 0x3F */
2378 case 0x04:
2379 /* ITOFS */
2380 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2381 goto invalid_opc;
2383 if (likely(rc != 31)) {
2384 if (ra != 31) {
2385 TCGv_i32 tmp = tcg_temp_new_i32();
2386 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2387 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2388 tcg_temp_free_i32(tmp);
2389 } else
2390 tcg_gen_movi_i64(cpu_fir[rc], 0);
2392 break;
2393 case 0x0A:
2394 /* SQRTF */
2395 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2396 gen_fsqrtf(rb, rc);
2397 break;
2399 goto invalid_opc;
2400 case 0x0B:
2401 /* SQRTS */
2402 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2403 gen_fsqrts(ctx, rb, rc, fn11);
2404 break;
2406 goto invalid_opc;
2407 case 0x14:
2408 /* ITOFF */
2409 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2410 goto invalid_opc;
2412 if (likely(rc != 31)) {
2413 if (ra != 31) {
2414 TCGv_i32 tmp = tcg_temp_new_i32();
2415 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2416 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2417 tcg_temp_free_i32(tmp);
2418 } else
2419 tcg_gen_movi_i64(cpu_fir[rc], 0);
2421 break;
2422 case 0x24:
2423 /* ITOFT */
2424 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2425 goto invalid_opc;
2427 if (likely(rc != 31)) {
2428 if (ra != 31)
2429 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2430 else
2431 tcg_gen_movi_i64(cpu_fir[rc], 0);
2433 break;
2434 case 0x2A:
2435 /* SQRTG */
2436 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2437 gen_fsqrtg(rb, rc);
2438 break;
2440 goto invalid_opc;
2441 case 0x02B:
2442 /* SQRTT */
2443 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2444 gen_fsqrtt(ctx, rb, rc, fn11);
2445 break;
2447 goto invalid_opc;
2448 default:
2449 goto invalid_opc;
2451 break;
2452 case 0x15:
2453 /* VAX floating point */
2454 /* XXX: rounding mode and trap are ignored (!) */
2455 switch (fpfn) { /* fn11 & 0x3F */
2456 case 0x00:
2457 /* ADDF */
2458 gen_faddf(ra, rb, rc);
2459 break;
2460 case 0x01:
2461 /* SUBF */
2462 gen_fsubf(ra, rb, rc);
2463 break;
2464 case 0x02:
2465 /* MULF */
2466 gen_fmulf(ra, rb, rc);
2467 break;
2468 case 0x03:
2469 /* DIVF */
2470 gen_fdivf(ra, rb, rc);
2471 break;
2472 case 0x1E:
2473 /* CVTDG */
2474 #if 0 // TODO
2475 gen_fcvtdg(rb, rc);
2476 #else
2477 goto invalid_opc;
2478 #endif
2479 break;
2480 case 0x20:
2481 /* ADDG */
2482 gen_faddg(ra, rb, rc);
2483 break;
2484 case 0x21:
2485 /* SUBG */
2486 gen_fsubg(ra, rb, rc);
2487 break;
2488 case 0x22:
2489 /* MULG */
2490 gen_fmulg(ra, rb, rc);
2491 break;
2492 case 0x23:
2493 /* DIVG */
2494 gen_fdivg(ra, rb, rc);
2495 break;
2496 case 0x25:
2497 /* CMPGEQ */
2498 gen_fcmpgeq(ra, rb, rc);
2499 break;
2500 case 0x26:
2501 /* CMPGLT */
2502 gen_fcmpglt(ra, rb, rc);
2503 break;
2504 case 0x27:
2505 /* CMPGLE */
2506 gen_fcmpgle(ra, rb, rc);
2507 break;
2508 case 0x2C:
2509 /* CVTGF */
2510 gen_fcvtgf(rb, rc);
2511 break;
2512 case 0x2D:
2513 /* CVTGD */
2514 #if 0 // TODO
2515 gen_fcvtgd(rb, rc);
2516 #else
2517 goto invalid_opc;
2518 #endif
2519 break;
2520 case 0x2F:
2521 /* CVTGQ */
2522 gen_fcvtgq(rb, rc);
2523 break;
2524 case 0x3C:
2525 /* CVTQF */
2526 gen_fcvtqf(rb, rc);
2527 break;
2528 case 0x3E:
2529 /* CVTQG */
2530 gen_fcvtqg(rb, rc);
2531 break;
2532 default:
2533 goto invalid_opc;
2535 break;
2536 case 0x16:
2537 /* IEEE floating-point */
2538 switch (fpfn) { /* fn11 & 0x3F */
2539 case 0x00:
2540 /* ADDS */
2541 gen_fadds(ctx, ra, rb, rc, fn11);
2542 break;
2543 case 0x01:
2544 /* SUBS */
2545 gen_fsubs(ctx, ra, rb, rc, fn11);
2546 break;
2547 case 0x02:
2548 /* MULS */
2549 gen_fmuls(ctx, ra, rb, rc, fn11);
2550 break;
2551 case 0x03:
2552 /* DIVS */
2553 gen_fdivs(ctx, ra, rb, rc, fn11);
2554 break;
2555 case 0x20:
2556 /* ADDT */
2557 gen_faddt(ctx, ra, rb, rc, fn11);
2558 break;
2559 case 0x21:
2560 /* SUBT */
2561 gen_fsubt(ctx, ra, rb, rc, fn11);
2562 break;
2563 case 0x22:
2564 /* MULT */
2565 gen_fmult(ctx, ra, rb, rc, fn11);
2566 break;
2567 case 0x23:
2568 /* DIVT */
2569 gen_fdivt(ctx, ra, rb, rc, fn11);
2570 break;
2571 case 0x24:
2572 /* CMPTUN */
2573 gen_fcmptun(ctx, ra, rb, rc, fn11);
2574 break;
2575 case 0x25:
2576 /* CMPTEQ */
2577 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2578 break;
2579 case 0x26:
2580 /* CMPTLT */
2581 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2582 break;
2583 case 0x27:
2584 /* CMPTLE */
2585 gen_fcmptle(ctx, ra, rb, rc, fn11);
2586 break;
2587 case 0x2C:
2588 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2589 /* CVTST */
2590 gen_fcvtst(ctx, rb, rc, fn11);
2591 } else {
2592 /* CVTTS */
2593 gen_fcvtts(ctx, rb, rc, fn11);
2595 break;
2596 case 0x2F:
2597 /* CVTTQ */
2598 gen_fcvttq(ctx, rb, rc, fn11);
2599 break;
2600 case 0x3C:
2601 /* CVTQS */
2602 gen_fcvtqs(ctx, rb, rc, fn11);
2603 break;
2604 case 0x3E:
2605 /* CVTQT */
2606 gen_fcvtqt(ctx, rb, rc, fn11);
2607 break;
2608 default:
2609 goto invalid_opc;
2611 break;
2612 case 0x17:
2613 switch (fn11) {
2614 case 0x010:
2615 /* CVTLQ */
2616 gen_fcvtlq(rb, rc);
2617 break;
2618 case 0x020:
2619 if (likely(rc != 31)) {
2620 if (ra == rb) {
2621 /* FMOV */
2622 if (ra == 31)
2623 tcg_gen_movi_i64(cpu_fir[rc], 0);
2624 else
2625 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2626 } else {
2627 /* CPYS */
2628 gen_fcpys(ra, rb, rc);
2631 break;
2632 case 0x021:
2633 /* CPYSN */
2634 gen_fcpysn(ra, rb, rc);
2635 break;
2636 case 0x022:
2637 /* CPYSE */
2638 gen_fcpyse(ra, rb, rc);
2639 break;
2640 case 0x024:
2641 /* MT_FPCR */
2642 if (likely(ra != 31))
2643 gen_helper_store_fpcr(cpu_fir[ra]);
2644 else {
2645 TCGv tmp = tcg_const_i64(0);
2646 gen_helper_store_fpcr(tmp);
2647 tcg_temp_free(tmp);
2649 break;
2650 case 0x025:
2651 /* MF_FPCR */
2652 if (likely(ra != 31))
2653 gen_helper_load_fpcr(cpu_fir[ra]);
2654 break;
2655 case 0x02A:
2656 /* FCMOVEQ */
2657 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2658 break;
2659 case 0x02B:
2660 /* FCMOVNE */
2661 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2662 break;
2663 case 0x02C:
2664 /* FCMOVLT */
2665 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2666 break;
2667 case 0x02D:
2668 /* FCMOVGE */
2669 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2670 break;
2671 case 0x02E:
2672 /* FCMOVLE */
2673 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2674 break;
2675 case 0x02F:
2676 /* FCMOVGT */
2677 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2678 break;
2679 case 0x030:
2680 /* CVTQL */
2681 gen_fcvtql(rb, rc);
2682 break;
2683 case 0x130:
2684 /* CVTQL/V */
2685 case 0x530:
2686 /* CVTQL/SV */
2687 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2688 /v doesn't do. The only thing I can think is that /sv is a
2689 valid instruction merely for completeness in the ISA. */
2690 gen_fcvtql_v(ctx, rb, rc);
2691 break;
2692 default:
2693 goto invalid_opc;
2695 break;
2696 case 0x18:
2697 switch ((uint16_t)disp16) {
2698 case 0x0000:
2699 /* TRAPB */
2700 /* No-op. */
2701 break;
2702 case 0x0400:
2703 /* EXCB */
2704 /* No-op. */
2705 break;
2706 case 0x4000:
2707 /* MB */
2708 /* No-op */
2709 break;
2710 case 0x4400:
2711 /* WMB */
2712 /* No-op */
2713 break;
2714 case 0x8000:
2715 /* FETCH */
2716 /* No-op */
2717 break;
2718 case 0xA000:
2719 /* FETCH_M */
2720 /* No-op */
2721 break;
2722 case 0xC000:
2723 /* RPCC */
2724 if (ra != 31)
2725 gen_helper_load_pcc(cpu_ir[ra]);
2726 break;
2727 case 0xE000:
2728 /* RC */
2729 gen_rx(ra, 0);
2730 break;
2731 case 0xE800:
2732 /* ECB */
2733 break;
2734 case 0xF000:
2735 /* RS */
2736 gen_rx(ra, 1);
2737 break;
2738 case 0xF800:
2739 /* WH64 */
2740 /* No-op */
2741 break;
2742 default:
2743 goto invalid_opc;
2745 break;
2746 case 0x19:
2747 /* HW_MFPR (PALcode) */
2748 #ifndef CONFIG_USER_ONLY
2749 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2750 gen_mfpr(ra, insn & 0xffff);
2751 break;
2753 #endif
2754 goto invalid_opc;
2755 case 0x1A:
2756 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2757 prediction stack action, which of course we don't implement. */
2758 if (rb != 31) {
2759 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2760 } else {
2761 tcg_gen_movi_i64(cpu_pc, 0);
2763 if (ra != 31) {
2764 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2766 ret = EXIT_PC_UPDATED;
2767 break;
2768 case 0x1B:
2769 /* HW_LD (PALcode) */
2770 #ifndef CONFIG_USER_ONLY
2771 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2772 TCGv addr;
2774 if (ra == 31) {
2775 break;
2778 addr = tcg_temp_new();
2779 if (rb != 31)
2780 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2781 else
2782 tcg_gen_movi_i64(addr, disp12);
2783 switch ((insn >> 12) & 0xF) {
2784 case 0x0:
2785 /* Longword physical access (hw_ldl/p) */
2786 gen_helper_ldl_phys(cpu_ir[ra], addr);
2787 break;
2788 case 0x1:
2789 /* Quadword physical access (hw_ldq/p) */
2790 gen_helper_ldq_phys(cpu_ir[ra], addr);
2791 break;
2792 case 0x2:
2793 /* Longword physical access with lock (hw_ldl_l/p) */
2794 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2795 break;
2796 case 0x3:
2797 /* Quadword physical access with lock (hw_ldq_l/p) */
2798 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2799 break;
2800 case 0x4:
2801 /* Longword virtual PTE fetch (hw_ldl/v) */
2802 goto invalid_opc;
2803 case 0x5:
2804 /* Quadword virtual PTE fetch (hw_ldq/v) */
2805 goto invalid_opc;
2806 break;
2807 case 0x6:
2808 /* Incpu_ir[ra]id */
2809 goto invalid_opc;
2810 case 0x7:
2811 /* Incpu_ir[ra]id */
2812 goto invalid_opc;
2813 case 0x8:
2814 /* Longword virtual access (hw_ldl) */
2815 goto invalid_opc;
2816 case 0x9:
2817 /* Quadword virtual access (hw_ldq) */
2818 goto invalid_opc;
2819 case 0xA:
2820 /* Longword virtual access with protection check (hw_ldl/w) */
2821 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2822 break;
2823 case 0xB:
2824 /* Quadword virtual access with protection check (hw_ldq/w) */
2825 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2826 break;
2827 case 0xC:
2828 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2829 goto invalid_opc;
2830 case 0xD:
2831 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2832 goto invalid_opc;
2833 case 0xE:
2834 /* Longword virtual access with alternate access mode and
2835 protection checks (hw_ldl/wa) */
2836 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2837 break;
2838 case 0xF:
2839 /* Quadword virtual access with alternate access mode and
2840 protection checks (hw_ldq/wa) */
2841 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2842 break;
2844 tcg_temp_free(addr);
2845 break;
2847 #endif
2848 goto invalid_opc;
2849 case 0x1C:
2850 switch (fn7) {
2851 case 0x00:
2852 /* SEXTB */
2853 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2854 goto invalid_opc;
2856 if (likely(rc != 31)) {
2857 if (islit)
2858 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2859 else
2860 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2862 break;
2863 case 0x01:
2864 /* SEXTW */
2865 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2866 if (likely(rc != 31)) {
2867 if (islit) {
2868 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2869 } else {
2870 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2873 break;
2875 goto invalid_opc;
2876 case 0x30:
2877 /* CTPOP */
2878 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2879 if (likely(rc != 31)) {
2880 if (islit) {
2881 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2882 } else {
2883 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2886 break;
2888 goto invalid_opc;
2889 case 0x31:
2890 /* PERR */
2891 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2892 gen_perr(ra, rb, rc, islit, lit);
2893 break;
2895 goto invalid_opc;
2896 case 0x32:
2897 /* CTLZ */
2898 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2899 if (likely(rc != 31)) {
2900 if (islit) {
2901 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2902 } else {
2903 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2906 break;
2908 goto invalid_opc;
2909 case 0x33:
2910 /* CTTZ */
2911 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2912 if (likely(rc != 31)) {
2913 if (islit) {
2914 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2915 } else {
2916 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2919 break;
2921 goto invalid_opc;
2922 case 0x34:
2923 /* UNPKBW */
2924 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2925 if (real_islit || ra != 31) {
2926 goto invalid_opc;
2928 gen_unpkbw(rb, rc);
2929 break;
2931 goto invalid_opc;
2932 case 0x35:
2933 /* UNPKBL */
2934 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2935 if (real_islit || ra != 31) {
2936 goto invalid_opc;
2938 gen_unpkbl(rb, rc);
2939 break;
2941 goto invalid_opc;
2942 case 0x36:
2943 /* PKWB */
2944 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2945 if (real_islit || ra != 31) {
2946 goto invalid_opc;
2948 gen_pkwb(rb, rc);
2949 break;
2951 goto invalid_opc;
2952 case 0x37:
2953 /* PKLB */
2954 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2955 if (real_islit || ra != 31) {
2956 goto invalid_opc;
2958 gen_pklb(rb, rc);
2959 break;
2961 goto invalid_opc;
2962 case 0x38:
2963 /* MINSB8 */
2964 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2965 gen_minsb8(ra, rb, rc, islit, lit);
2966 break;
2968 goto invalid_opc;
2969 case 0x39:
2970 /* MINSW4 */
2971 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2972 gen_minsw4(ra, rb, rc, islit, lit);
2973 break;
2975 goto invalid_opc;
2976 case 0x3A:
2977 /* MINUB8 */
2978 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2979 gen_minub8(ra, rb, rc, islit, lit);
2980 break;
2982 goto invalid_opc;
2983 case 0x3B:
2984 /* MINUW4 */
2985 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2986 gen_minuw4(ra, rb, rc, islit, lit);
2987 break;
2989 goto invalid_opc;
2990 case 0x3C:
2991 /* MAXUB8 */
2992 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2993 gen_maxub8(ra, rb, rc, islit, lit);
2994 break;
2996 goto invalid_opc;
2997 case 0x3D:
2998 /* MAXUW4 */
2999 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3000 gen_maxuw4(ra, rb, rc, islit, lit);
3001 break;
3003 goto invalid_opc;
3004 case 0x3E:
3005 /* MAXSB8 */
3006 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3007 gen_maxsb8(ra, rb, rc, islit, lit);
3008 break;
3010 goto invalid_opc;
3011 case 0x3F:
3012 /* MAXSW4 */
3013 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3014 gen_maxsw4(ra, rb, rc, islit, lit);
3015 break;
3017 goto invalid_opc;
3018 case 0x70:
3019 /* FTOIT */
3020 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3021 goto invalid_opc;
3023 if (likely(rc != 31)) {
3024 if (ra != 31)
3025 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3026 else
3027 tcg_gen_movi_i64(cpu_ir[rc], 0);
3029 break;
3030 case 0x78:
3031 /* FTOIS */
3032 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3033 goto invalid_opc;
3035 if (rc != 31) {
3036 TCGv_i32 tmp1 = tcg_temp_new_i32();
3037 if (ra != 31)
3038 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3039 else {
3040 TCGv tmp2 = tcg_const_i64(0);
3041 gen_helper_s_to_memory(tmp1, tmp2);
3042 tcg_temp_free(tmp2);
3044 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3045 tcg_temp_free_i32(tmp1);
3047 break;
3048 default:
3049 goto invalid_opc;
3051 break;
3052 case 0x1D:
3053 /* HW_MTPR (PALcode) */
3054 #ifndef CONFIG_USER_ONLY
3055 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3056 gen_mtpr(rb, insn & 0xffff);
3057 break;
3059 #endif
3060 goto invalid_opc;
3061 case 0x1E:
3062 /* HW_RET (PALcode) */
3063 #ifndef CONFIG_USER_ONLY
3064 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3065 if (rb == 31) {
3066 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3067 address from EXC_ADDR. This turns out to be useful for our
3068 emulation PALcode, so continue to accept it. */
3069 TCGv tmp = tcg_temp_new();
3070 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr));
3071 gen_helper_hw_ret(tmp);
3072 tcg_temp_free(tmp);
3073 } else {
3074 gen_helper_hw_ret(cpu_ir[rb]);
3076 ret = EXIT_PC_UPDATED;
3077 break;
3079 #endif
3080 goto invalid_opc;
3081 case 0x1F:
3082 /* HW_ST (PALcode) */
3083 #ifndef CONFIG_USER_ONLY
3084 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3085 TCGv addr, val;
3086 addr = tcg_temp_new();
3087 if (rb != 31)
3088 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3089 else
3090 tcg_gen_movi_i64(addr, disp12);
3091 if (ra != 31)
3092 val = cpu_ir[ra];
3093 else {
3094 val = tcg_temp_new();
3095 tcg_gen_movi_i64(val, 0);
3097 switch ((insn >> 12) & 0xF) {
3098 case 0x0:
3099 /* Longword physical access */
3100 gen_helper_stl_phys(addr, val);
3101 break;
3102 case 0x1:
3103 /* Quadword physical access */
3104 gen_helper_stq_phys(addr, val);
3105 break;
3106 case 0x2:
3107 /* Longword physical access with lock */
3108 gen_helper_stl_c_phys(val, addr, val);
3109 break;
3110 case 0x3:
3111 /* Quadword physical access with lock */
3112 gen_helper_stq_c_phys(val, addr, val);
3113 break;
3114 case 0x4:
3115 /* Longword virtual access */
3116 goto invalid_opc;
3117 case 0x5:
3118 /* Quadword virtual access */
3119 goto invalid_opc;
3120 case 0x6:
3121 /* Invalid */
3122 goto invalid_opc;
3123 case 0x7:
3124 /* Invalid */
3125 goto invalid_opc;
3126 case 0x8:
3127 /* Invalid */
3128 goto invalid_opc;
3129 case 0x9:
3130 /* Invalid */
3131 goto invalid_opc;
3132 case 0xA:
3133 /* Invalid */
3134 goto invalid_opc;
3135 case 0xB:
3136 /* Invalid */
3137 goto invalid_opc;
3138 case 0xC:
3139 /* Longword virtual access with alternate access mode */
3140 goto invalid_opc;
3141 case 0xD:
3142 /* Quadword virtual access with alternate access mode */
3143 goto invalid_opc;
3144 case 0xE:
3145 /* Invalid */
3146 goto invalid_opc;
3147 case 0xF:
3148 /* Invalid */
3149 goto invalid_opc;
3151 if (ra == 31)
3152 tcg_temp_free(val);
3153 tcg_temp_free(addr);
3154 break;
3156 #endif
3157 goto invalid_opc;
3158 case 0x20:
3159 /* LDF */
3160 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3161 break;
3162 case 0x21:
3163 /* LDG */
3164 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3165 break;
3166 case 0x22:
3167 /* LDS */
3168 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3169 break;
3170 case 0x23:
3171 /* LDT */
3172 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3173 break;
3174 case 0x24:
3175 /* STF */
3176 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3177 break;
3178 case 0x25:
3179 /* STG */
3180 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3181 break;
3182 case 0x26:
3183 /* STS */
3184 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3185 break;
3186 case 0x27:
3187 /* STT */
3188 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3189 break;
3190 case 0x28:
3191 /* LDL */
3192 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3193 break;
3194 case 0x29:
3195 /* LDQ */
3196 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3197 break;
3198 case 0x2A:
3199 /* LDL_L */
3200 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3201 break;
3202 case 0x2B:
3203 /* LDQ_L */
3204 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3205 break;
3206 case 0x2C:
3207 /* STL */
3208 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3209 break;
3210 case 0x2D:
3211 /* STQ */
3212 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3213 break;
3214 case 0x2E:
3215 /* STL_C */
3216 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3217 break;
3218 case 0x2F:
3219 /* STQ_C */
3220 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3221 break;
3222 case 0x30:
3223 /* BR */
3224 ret = gen_bdirect(ctx, ra, disp21);
3225 break;
3226 case 0x31: /* FBEQ */
3227 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3228 break;
3229 case 0x32: /* FBLT */
3230 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3231 break;
3232 case 0x33: /* FBLE */
3233 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3234 break;
3235 case 0x34:
3236 /* BSR */
3237 ret = gen_bdirect(ctx, ra, disp21);
3238 break;
3239 case 0x35: /* FBNE */
3240 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3241 break;
3242 case 0x36: /* FBGE */
3243 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3244 break;
3245 case 0x37: /* FBGT */
3246 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3247 break;
3248 case 0x38:
3249 /* BLBC */
3250 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3251 break;
3252 case 0x39:
3253 /* BEQ */
3254 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3255 break;
3256 case 0x3A:
3257 /* BLT */
3258 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3259 break;
3260 case 0x3B:
3261 /* BLE */
3262 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3263 break;
3264 case 0x3C:
3265 /* BLBS */
3266 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3267 break;
3268 case 0x3D:
3269 /* BNE */
3270 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3271 break;
3272 case 0x3E:
3273 /* BGE */
3274 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3275 break;
3276 case 0x3F:
3277 /* BGT */
3278 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3279 break;
3280 invalid_opc:
3281 ret = gen_invalid(ctx);
3282 break;
3285 return ret;
3288 static inline void gen_intermediate_code_internal(CPUState *env,
3289 TranslationBlock *tb,
3290 int search_pc)
3292 DisasContext ctx, *ctxp = &ctx;
3293 target_ulong pc_start;
3294 uint32_t insn;
3295 uint16_t *gen_opc_end;
3296 CPUBreakpoint *bp;
3297 int j, lj = -1;
3298 ExitStatus ret;
3299 int num_insns;
3300 int max_insns;
3302 pc_start = tb->pc;
3303 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3305 ctx.tb = tb;
3306 ctx.env = env;
3307 ctx.pc = pc_start;
3308 ctx.mem_idx = cpu_mmu_index(env);
3310 /* ??? Every TB begins with unset rounding mode, to be initialized on
3311 the first fp insn of the TB. Alternately we could define a proper
3312 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3313 to reset the FP_STATUS to that default at the end of any TB that
3314 changes the default. We could even (gasp) dynamiclly figure out
3315 what default would be most efficient given the running program. */
3316 ctx.tb_rm = -1;
3317 /* Similarly for flush-to-zero. */
3318 ctx.tb_ftz = -1;
3320 num_insns = 0;
3321 max_insns = tb->cflags & CF_COUNT_MASK;
3322 if (max_insns == 0)
3323 max_insns = CF_COUNT_MASK;
3325 gen_icount_start();
3326 do {
3327 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3328 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3329 if (bp->pc == ctx.pc) {
3330 gen_excp(&ctx, EXCP_DEBUG, 0);
3331 break;
3335 if (search_pc) {
3336 j = gen_opc_ptr - gen_opc_buf;
3337 if (lj < j) {
3338 lj++;
3339 while (lj < j)
3340 gen_opc_instr_start[lj++] = 0;
3342 gen_opc_pc[lj] = ctx.pc;
3343 gen_opc_instr_start[lj] = 1;
3344 gen_opc_icount[lj] = num_insns;
3346 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3347 gen_io_start();
3348 insn = ldl_code(ctx.pc);
3349 num_insns++;
3351 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3352 tcg_gen_debug_insn_start(ctx.pc);
3355 ctx.pc += 4;
3356 ret = translate_one(ctxp, insn);
3358 /* If we reach a page boundary, are single stepping,
3359 or exhaust instruction count, stop generation. */
3360 if (ret == NO_EXIT
3361 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3362 || gen_opc_ptr >= gen_opc_end
3363 || num_insns >= max_insns
3364 || singlestep
3365 || env->singlestep_enabled)) {
3366 ret = EXIT_PC_STALE;
3368 } while (ret == NO_EXIT);
3370 if (tb->cflags & CF_LAST_IO) {
3371 gen_io_end();
3374 switch (ret) {
3375 case EXIT_GOTO_TB:
3376 case EXIT_NORETURN:
3377 break;
3378 case EXIT_PC_STALE:
3379 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3380 /* FALLTHRU */
3381 case EXIT_PC_UPDATED:
3382 if (env->singlestep_enabled) {
3383 gen_excp_1(EXCP_DEBUG, 0);
3384 } else {
3385 tcg_gen_exit_tb(0);
3387 break;
3388 default:
3389 abort();
3392 gen_icount_end(tb, num_insns);
3393 *gen_opc_ptr = INDEX_op_end;
3394 if (search_pc) {
3395 j = gen_opc_ptr - gen_opc_buf;
3396 lj++;
3397 while (lj <= j)
3398 gen_opc_instr_start[lj++] = 0;
3399 } else {
3400 tb->size = ctx.pc - pc_start;
3401 tb->icount = num_insns;
3404 #ifdef DEBUG_DISAS
3405 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3406 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3407 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3408 qemu_log("\n");
3410 #endif
3413 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3415 gen_intermediate_code_internal(env, tb, 0);
3418 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3420 gen_intermediate_code_internal(env, tb, 1);
3423 struct cpu_def_t {
3424 const char *name;
3425 int implver, amask;
3428 static const struct cpu_def_t cpu_defs[] = {
3429 { "ev4", IMPLVER_2106x, 0 },
3430 { "ev5", IMPLVER_21164, 0 },
3431 { "ev56", IMPLVER_21164, AMASK_BWX },
3432 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3433 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3434 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3435 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3436 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3437 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3438 { "21064", IMPLVER_2106x, 0 },
3439 { "21164", IMPLVER_21164, 0 },
3440 { "21164a", IMPLVER_21164, AMASK_BWX },
3441 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3442 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3443 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3444 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3447 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3449 CPUAlphaState *env;
3450 int implver, amask, i, max;
3452 env = qemu_mallocz(sizeof(CPUAlphaState));
3453 cpu_exec_init(env);
3454 alpha_translate_init();
3455 tlb_flush(env, 1);
3457 /* Default to ev67; no reason not to emulate insns by default. */
3458 implver = IMPLVER_21264;
3459 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3460 | AMASK_TRAP | AMASK_PREFETCH);
3462 max = ARRAY_SIZE(cpu_defs);
3463 for (i = 0; i < max; i++) {
3464 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3465 implver = cpu_defs[i].implver;
3466 amask = cpu_defs[i].amask;
3467 break;
3470 env->implver = implver;
3471 env->amask = amask;
3473 #if defined (CONFIG_USER_ONLY)
3474 env->ps = PS_USER_MODE;
3475 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3476 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3477 #endif
3478 env->lock_addr = -1;
3479 env->fen = 1;
3481 qemu_init_vcpu(env);
3482 return env;
3485 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3487 env->pc = gen_opc_pc[pc_pos];