virtio-serial: Use MSI vectors for port virtqueues
[qemu.git] / target-alpha / translate.c
blob87813e7dd339f4eca2a9e246af8b5d970a47f743
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
35 #undef ALPHA_DEBUG_DISAS
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DISAS(...) do { } while (0)
41 #endif
43 typedef struct DisasContext DisasContext;
44 struct DisasContext {
45 uint64_t pc;
46 int mem_idx;
47 #if !defined (CONFIG_USER_ONLY)
48 int pal_mode;
49 #endif
50 CPUAlphaState *env;
51 uint32_t amask;
54 /* global register indexes */
55 static TCGv_ptr cpu_env;
56 static TCGv cpu_ir[31];
57 static TCGv cpu_fir[31];
58 static TCGv cpu_pc;
59 static TCGv cpu_lock;
60 #ifdef CONFIG_USER_ONLY
61 static TCGv cpu_uniq;
62 #endif
64 /* register names */
65 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
67 #include "gen-icount.h"
69 static void alpha_translate_init(void)
71 int i;
72 char *p;
73 static int done_init = 0;
75 if (done_init)
76 return;
78 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
80 p = cpu_reg_names;
81 for (i = 0; i < 31; i++) {
82 sprintf(p, "ir%d", i);
83 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
84 offsetof(CPUState, ir[i]), p);
85 p += (i < 10) ? 4 : 5;
87 sprintf(p, "fir%d", i);
88 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
89 offsetof(CPUState, fir[i]), p);
90 p += (i < 10) ? 5 : 6;
93 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
94 offsetof(CPUState, pc), "pc");
96 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
97 offsetof(CPUState, lock), "lock");
99 #ifdef CONFIG_USER_ONLY
100 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
101 offsetof(CPUState, unique), "uniq");
102 #endif
104 /* register helpers */
105 #define GEN_HELPER 2
106 #include "helper.h"
108 done_init = 1;
111 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
113 TCGv_i32 tmp1, tmp2;
115 tcg_gen_movi_i64(cpu_pc, ctx->pc);
116 tmp1 = tcg_const_i32(exception);
117 tmp2 = tcg_const_i32(error_code);
118 gen_helper_excp(tmp1, tmp2);
119 tcg_temp_free_i32(tmp2);
120 tcg_temp_free_i32(tmp1);
123 static inline void gen_invalid(DisasContext *ctx)
125 gen_excp(ctx, EXCP_OPCDEC, 0);
128 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
130 TCGv tmp = tcg_temp_new();
131 TCGv_i32 tmp32 = tcg_temp_new_i32();
132 tcg_gen_qemu_ld32u(tmp, t1, flags);
133 tcg_gen_trunc_i64_i32(tmp32, tmp);
134 gen_helper_memory_to_f(t0, tmp32);
135 tcg_temp_free_i32(tmp32);
136 tcg_temp_free(tmp);
139 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
141 TCGv tmp = tcg_temp_new();
142 tcg_gen_qemu_ld64(tmp, t1, flags);
143 gen_helper_memory_to_g(t0, tmp);
144 tcg_temp_free(tmp);
147 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
149 TCGv tmp = tcg_temp_new();
150 TCGv_i32 tmp32 = tcg_temp_new_i32();
151 tcg_gen_qemu_ld32u(tmp, t1, flags);
152 tcg_gen_trunc_i64_i32(tmp32, tmp);
153 gen_helper_memory_to_s(t0, tmp32);
154 tcg_temp_free_i32(tmp32);
155 tcg_temp_free(tmp);
158 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
160 tcg_gen_mov_i64(cpu_lock, t1);
161 tcg_gen_qemu_ld32s(t0, t1, flags);
164 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
166 tcg_gen_mov_i64(cpu_lock, t1);
167 tcg_gen_qemu_ld64(t0, t1, flags);
170 static inline void gen_load_mem(DisasContext *ctx,
171 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
172 int flags),
173 int ra, int rb, int32_t disp16, int fp,
174 int clear)
176 TCGv addr;
178 if (unlikely(ra == 31))
179 return;
181 addr = tcg_temp_new();
182 if (rb != 31) {
183 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
184 if (clear)
185 tcg_gen_andi_i64(addr, addr, ~0x7);
186 } else {
187 if (clear)
188 disp16 &= ~0x7;
189 tcg_gen_movi_i64(addr, disp16);
191 if (fp)
192 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
193 else
194 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
195 tcg_temp_free(addr);
198 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
200 TCGv_i32 tmp32 = tcg_temp_new_i32();
201 TCGv tmp = tcg_temp_new();
202 gen_helper_f_to_memory(tmp32, t0);
203 tcg_gen_extu_i32_i64(tmp, tmp32);
204 tcg_gen_qemu_st32(tmp, t1, flags);
205 tcg_temp_free(tmp);
206 tcg_temp_free_i32(tmp32);
209 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
211 TCGv tmp = tcg_temp_new();
212 gen_helper_g_to_memory(tmp, t0);
213 tcg_gen_qemu_st64(tmp, t1, flags);
214 tcg_temp_free(tmp);
217 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
219 TCGv_i32 tmp32 = tcg_temp_new_i32();
220 TCGv tmp = tcg_temp_new();
221 gen_helper_s_to_memory(tmp32, t0);
222 tcg_gen_extu_i32_i64(tmp, tmp32);
223 tcg_gen_qemu_st32(tmp, t1, flags);
224 tcg_temp_free(tmp);
225 tcg_temp_free_i32(tmp32);
228 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
230 int l1, l2;
232 l1 = gen_new_label();
233 l2 = gen_new_label();
234 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
235 tcg_gen_qemu_st32(t0, t1, flags);
236 tcg_gen_movi_i64(t0, 1);
237 tcg_gen_br(l2);
238 gen_set_label(l1);
239 tcg_gen_movi_i64(t0, 0);
240 gen_set_label(l2);
241 tcg_gen_movi_i64(cpu_lock, -1);
244 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
246 int l1, l2;
248 l1 = gen_new_label();
249 l2 = gen_new_label();
250 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
251 tcg_gen_qemu_st64(t0, t1, flags);
252 tcg_gen_movi_i64(t0, 1);
253 tcg_gen_br(l2);
254 gen_set_label(l1);
255 tcg_gen_movi_i64(t0, 0);
256 gen_set_label(l2);
257 tcg_gen_movi_i64(cpu_lock, -1);
260 static inline void gen_store_mem(DisasContext *ctx,
261 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
262 int flags),
263 int ra, int rb, int32_t disp16, int fp,
264 int clear, int local)
266 TCGv addr;
267 if (local)
268 addr = tcg_temp_local_new();
269 else
270 addr = tcg_temp_new();
271 if (rb != 31) {
272 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
273 if (clear)
274 tcg_gen_andi_i64(addr, addr, ~0x7);
275 } else {
276 if (clear)
277 disp16 &= ~0x7;
278 tcg_gen_movi_i64(addr, disp16);
280 if (ra != 31) {
281 if (fp)
282 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
283 else
284 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
285 } else {
286 TCGv zero;
287 if (local)
288 zero = tcg_const_local_i64(0);
289 else
290 zero = tcg_const_i64(0);
291 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
292 tcg_temp_free(zero);
294 tcg_temp_free(addr);
297 static void gen_bcond_pcload(DisasContext *ctx, int32_t disp, int lab_true)
299 int lab_over = gen_new_label();
301 tcg_gen_movi_i64(cpu_pc, ctx->pc);
302 tcg_gen_br(lab_over);
303 gen_set_label(lab_true);
304 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
305 gen_set_label(lab_over);
308 static void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
309 int32_t disp, int mask)
311 int lab_true = gen_new_label();
313 if (likely(ra != 31)) {
314 if (mask) {
315 TCGv tmp = tcg_temp_new();
316 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
317 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
318 tcg_temp_free(tmp);
319 } else {
320 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, lab_true);
322 } else {
323 /* Very uncommon case - Do not bother to optimize. */
324 TCGv tmp = tcg_const_i64(0);
325 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
326 tcg_temp_free(tmp);
328 gen_bcond_pcload(ctx, disp, lab_true);
331 /* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
332 This is complicated by the fact that -0.0 compares the same as +0.0. */
334 static void gen_fbcond_internal(TCGCond cond, TCGv src, int lab_true)
336 int lab_false = -1;
337 uint64_t mzero = 1ull << 63;
338 TCGv tmp;
340 switch (cond) {
341 case TCG_COND_LE:
342 case TCG_COND_GT:
343 /* For <= or >, the -0.0 value directly compares the way we want. */
344 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
345 break;
347 case TCG_COND_EQ:
348 case TCG_COND_NE:
349 /* For == or !=, we can simply mask off the sign bit and compare. */
350 /* ??? Assume that the temporary is reclaimed at the branch. */
351 tmp = tcg_temp_new();
352 tcg_gen_andi_i64(tmp, src, mzero - 1);
353 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
354 break;
356 case TCG_COND_GE:
357 /* For >=, emit two branches to the destination. */
358 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
359 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_true);
360 break;
362 case TCG_COND_LT:
363 /* For <, first filter out -0.0 to what will be the fallthru. */
364 lab_false = gen_new_label();
365 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_false);
366 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
367 gen_set_label(lab_false);
368 break;
370 default:
371 abort();
375 static void gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp)
377 int lab_true;
379 if (unlikely(ra == 31)) {
380 /* Very uncommon case, but easier to optimize it to an integer
381 comparison than continuing with the floating point comparison. */
382 gen_bcond(ctx, cond, ra, disp, 0);
383 return;
386 lab_true = gen_new_label();
387 gen_fbcond_internal(cond, cpu_fir[ra], lab_true);
388 gen_bcond_pcload(ctx, disp, lab_true);
391 static inline void gen_cmov(TCGCond inv_cond, int ra, int rb, int rc,
392 int islit, uint8_t lit, int mask)
394 int l1;
396 if (unlikely(rc == 31))
397 return;
399 l1 = gen_new_label();
401 if (ra != 31) {
402 if (mask) {
403 TCGv tmp = tcg_temp_new();
404 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
405 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
406 tcg_temp_free(tmp);
407 } else
408 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
409 } else {
410 /* Very uncommon case - Do not bother to optimize. */
411 TCGv tmp = tcg_const_i64(0);
412 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
413 tcg_temp_free(tmp);
416 if (islit)
417 tcg_gen_movi_i64(cpu_ir[rc], lit);
418 else
419 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
420 gen_set_label(l1);
423 static void gen_fcmov(TCGCond inv_cond, int ra, int rb, int rc)
425 TCGv va = cpu_fir[ra];
426 int l1;
428 if (unlikely(rc == 31))
429 return;
430 if (unlikely(ra == 31)) {
431 /* ??? Assume that the temporary is reclaimed at the branch. */
432 va = tcg_const_i64(0);
435 l1 = gen_new_label();
436 gen_fbcond_internal(inv_cond, va, l1);
438 if (rb != 31)
439 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
440 else
441 tcg_gen_movi_i64(cpu_fir[rc], 0);
442 gen_set_label(l1);
445 #define FARITH2(name) \
446 static inline void glue(gen_f, name)(int rb, int rc) \
448 if (unlikely(rc == 31)) \
449 return; \
451 if (rb != 31) \
452 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
453 else { \
454 TCGv tmp = tcg_const_i64(0); \
455 gen_helper_ ## name (cpu_fir[rc], tmp); \
456 tcg_temp_free(tmp); \
459 FARITH2(sqrts)
460 FARITH2(sqrtf)
461 FARITH2(sqrtg)
462 FARITH2(sqrtt)
463 FARITH2(cvtgf)
464 FARITH2(cvtgq)
465 FARITH2(cvtqf)
466 FARITH2(cvtqg)
467 FARITH2(cvtst)
468 FARITH2(cvtts)
469 FARITH2(cvttq)
470 FARITH2(cvtqs)
471 FARITH2(cvtqt)
472 FARITH2(cvtlq)
473 FARITH2(cvtql)
474 FARITH2(cvtqlv)
475 FARITH2(cvtqlsv)
477 #define FARITH3(name) \
478 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
480 if (unlikely(rc == 31)) \
481 return; \
483 if (ra != 31) { \
484 if (rb != 31) \
485 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
486 else { \
487 TCGv tmp = tcg_const_i64(0); \
488 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
489 tcg_temp_free(tmp); \
491 } else { \
492 TCGv tmp = tcg_const_i64(0); \
493 if (rb != 31) \
494 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
495 else \
496 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
497 tcg_temp_free(tmp); \
501 FARITH3(addf)
502 FARITH3(subf)
503 FARITH3(mulf)
504 FARITH3(divf)
505 FARITH3(addg)
506 FARITH3(subg)
507 FARITH3(mulg)
508 FARITH3(divg)
509 FARITH3(cmpgeq)
510 FARITH3(cmpglt)
511 FARITH3(cmpgle)
512 FARITH3(adds)
513 FARITH3(subs)
514 FARITH3(muls)
515 FARITH3(divs)
516 FARITH3(addt)
517 FARITH3(subt)
518 FARITH3(mult)
519 FARITH3(divt)
520 FARITH3(cmptun)
521 FARITH3(cmpteq)
522 FARITH3(cmptlt)
523 FARITH3(cmptle)
524 FARITH3(cpys)
525 FARITH3(cpysn)
526 FARITH3(cpyse)
528 static inline uint64_t zapnot_mask(uint8_t lit)
530 uint64_t mask = 0;
531 int i;
533 for (i = 0; i < 8; ++i) {
534 if ((lit >> i) & 1)
535 mask |= 0xffull << (i * 8);
537 return mask;
540 /* Implement zapnot with an immediate operand, which expands to some
541 form of immediate AND. This is a basic building block in the
542 definition of many of the other byte manipulation instructions. */
543 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
545 switch (lit) {
546 case 0x00:
547 tcg_gen_movi_i64(dest, 0);
548 break;
549 case 0x01:
550 tcg_gen_ext8u_i64(dest, src);
551 break;
552 case 0x03:
553 tcg_gen_ext16u_i64(dest, src);
554 break;
555 case 0x0f:
556 tcg_gen_ext32u_i64(dest, src);
557 break;
558 case 0xff:
559 tcg_gen_mov_i64(dest, src);
560 break;
561 default:
562 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
563 break;
567 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
569 if (unlikely(rc == 31))
570 return;
571 else if (unlikely(ra == 31))
572 tcg_gen_movi_i64(cpu_ir[rc], 0);
573 else if (islit)
574 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
575 else
576 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
579 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
581 if (unlikely(rc == 31))
582 return;
583 else if (unlikely(ra == 31))
584 tcg_gen_movi_i64(cpu_ir[rc], 0);
585 else if (islit)
586 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
587 else
588 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
592 /* EXTWH, EXTLH, EXTQH */
593 static void gen_ext_h(int ra, int rb, int rc, int islit,
594 uint8_t lit, uint8_t byte_mask)
596 if (unlikely(rc == 31))
597 return;
598 else if (unlikely(ra == 31))
599 tcg_gen_movi_i64(cpu_ir[rc], 0);
600 else {
601 if (islit) {
602 lit = (64 - (lit & 7) * 8) & 0x3f;
603 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
604 } else {
605 TCGv tmp1 = tcg_temp_new();
606 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
607 tcg_gen_shli_i64(tmp1, tmp1, 3);
608 tcg_gen_neg_i64(tmp1, tmp1);
609 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
610 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
611 tcg_temp_free(tmp1);
613 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
617 /* EXTBL, EXTWL, EXTLL, EXTQL */
618 static void gen_ext_l(int ra, int rb, int rc, int islit,
619 uint8_t lit, uint8_t byte_mask)
621 if (unlikely(rc == 31))
622 return;
623 else if (unlikely(ra == 31))
624 tcg_gen_movi_i64(cpu_ir[rc], 0);
625 else {
626 if (islit) {
627 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
628 } else {
629 TCGv tmp = tcg_temp_new();
630 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
631 tcg_gen_shli_i64(tmp, tmp, 3);
632 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
633 tcg_temp_free(tmp);
635 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
639 /* INSWH, INSLH, INSQH */
640 static void gen_ins_h(int ra, int rb, int rc, int islit,
641 uint8_t lit, uint8_t byte_mask)
643 if (unlikely(rc == 31))
644 return;
645 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
646 tcg_gen_movi_i64(cpu_ir[rc], 0);
647 else {
648 TCGv tmp = tcg_temp_new();
650 /* The instruction description has us left-shift the byte mask
651 and extract bits <15:8> and apply that zap at the end. This
652 is equivalent to simply performing the zap first and shifting
653 afterward. */
654 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
656 if (islit) {
657 /* Note that we have handled the lit==0 case above. */
658 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
659 } else {
660 TCGv shift = tcg_temp_new();
662 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
663 Do this portably by splitting the shift into two parts:
664 shift_count-1 and 1. Arrange for the -1 by using
665 ones-complement instead of twos-complement in the negation:
666 ~((B & 7) * 8) & 63. */
668 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
669 tcg_gen_shli_i64(shift, shift, 3);
670 tcg_gen_not_i64(shift, shift);
671 tcg_gen_andi_i64(shift, shift, 0x3f);
673 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
674 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
675 tcg_temp_free(shift);
677 tcg_temp_free(tmp);
681 /* INSBL, INSWL, INSLL, INSQL */
682 static void gen_ins_l(int ra, int rb, int rc, int islit,
683 uint8_t lit, uint8_t byte_mask)
685 if (unlikely(rc == 31))
686 return;
687 else if (unlikely(ra == 31))
688 tcg_gen_movi_i64(cpu_ir[rc], 0);
689 else {
690 TCGv tmp = tcg_temp_new();
692 /* The instruction description has us left-shift the byte mask
693 the same number of byte slots as the data and apply the zap
694 at the end. This is equivalent to simply performing the zap
695 first and shifting afterward. */
696 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
698 if (islit) {
699 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
700 } else {
701 TCGv shift = tcg_temp_new();
702 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
703 tcg_gen_shli_i64(shift, shift, 3);
704 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
705 tcg_temp_free(shift);
707 tcg_temp_free(tmp);
711 /* MSKWH, MSKLH, MSKQH */
712 static void gen_msk_h(int ra, int rb, int rc, int islit,
713 uint8_t lit, uint8_t byte_mask)
715 if (unlikely(rc == 31))
716 return;
717 else if (unlikely(ra == 31))
718 tcg_gen_movi_i64(cpu_ir[rc], 0);
719 else if (islit) {
720 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
721 } else {
722 TCGv shift = tcg_temp_new();
723 TCGv mask = tcg_temp_new();
725 /* The instruction description is as above, where the byte_mask
726 is shifted left, and then we extract bits <15:8>. This can be
727 emulated with a right-shift on the expanded byte mask. This
728 requires extra care because for an input <2:0> == 0 we need a
729 shift of 64 bits in order to generate a zero. This is done by
730 splitting the shift into two parts, the variable shift - 1
731 followed by a constant 1 shift. The code we expand below is
732 equivalent to ~((B & 7) * 8) & 63. */
734 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
735 tcg_gen_shli_i64(shift, shift, 3);
736 tcg_gen_not_i64(shift, shift);
737 tcg_gen_andi_i64(shift, shift, 0x3f);
738 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
739 tcg_gen_shr_i64(mask, mask, shift);
740 tcg_gen_shri_i64(mask, mask, 1);
742 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
744 tcg_temp_free(mask);
745 tcg_temp_free(shift);
749 /* MSKBL, MSKWL, MSKLL, MSKQL */
750 static void gen_msk_l(int ra, int rb, int rc, int islit,
751 uint8_t lit, uint8_t byte_mask)
753 if (unlikely(rc == 31))
754 return;
755 else if (unlikely(ra == 31))
756 tcg_gen_movi_i64(cpu_ir[rc], 0);
757 else if (islit) {
758 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
759 } else {
760 TCGv shift = tcg_temp_new();
761 TCGv mask = tcg_temp_new();
763 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
764 tcg_gen_shli_i64(shift, shift, 3);
765 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
766 tcg_gen_shl_i64(mask, mask, shift);
768 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
770 tcg_temp_free(mask);
771 tcg_temp_free(shift);
775 /* Code to call arith3 helpers */
776 #define ARITH3(name) \
777 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
778 uint8_t lit) \
780 if (unlikely(rc == 31)) \
781 return; \
783 if (ra != 31) { \
784 if (islit) { \
785 TCGv tmp = tcg_const_i64(lit); \
786 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
787 tcg_temp_free(tmp); \
788 } else \
789 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
790 } else { \
791 TCGv tmp1 = tcg_const_i64(0); \
792 if (islit) { \
793 TCGv tmp2 = tcg_const_i64(lit); \
794 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
795 tcg_temp_free(tmp2); \
796 } else \
797 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
798 tcg_temp_free(tmp1); \
801 ARITH3(cmpbge)
802 ARITH3(addlv)
803 ARITH3(sublv)
804 ARITH3(addqv)
805 ARITH3(subqv)
806 ARITH3(umulh)
807 ARITH3(mullv)
808 ARITH3(mulqv)
809 ARITH3(minub8)
810 ARITH3(minsb8)
811 ARITH3(minuw4)
812 ARITH3(minsw4)
813 ARITH3(maxub8)
814 ARITH3(maxsb8)
815 ARITH3(maxuw4)
816 ARITH3(maxsw4)
817 ARITH3(perr)
819 #define MVIOP2(name) \
820 static inline void glue(gen_, name)(int rb, int rc) \
822 if (unlikely(rc == 31)) \
823 return; \
824 if (unlikely(rb == 31)) \
825 tcg_gen_movi_i64(cpu_ir[rc], 0); \
826 else \
827 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
829 MVIOP2(pklb)
830 MVIOP2(pkwb)
831 MVIOP2(unpkbl)
832 MVIOP2(unpkbw)
834 static inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, int islit,
835 uint8_t lit)
837 int l1, l2;
838 TCGv tmp;
840 if (unlikely(rc == 31))
841 return;
843 l1 = gen_new_label();
844 l2 = gen_new_label();
846 if (ra != 31) {
847 tmp = tcg_temp_new();
848 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
849 } else
850 tmp = tcg_const_i64(0);
851 if (islit)
852 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
853 else
854 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
856 tcg_gen_movi_i64(cpu_ir[rc], 0);
857 tcg_gen_br(l2);
858 gen_set_label(l1);
859 tcg_gen_movi_i64(cpu_ir[rc], 1);
860 gen_set_label(l2);
863 static inline int translate_one(DisasContext *ctx, uint32_t insn)
865 uint32_t palcode;
866 int32_t disp21, disp16, disp12;
867 uint16_t fn11, fn16;
868 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
869 uint8_t lit;
870 int ret;
872 /* Decode all instruction fields */
873 opc = insn >> 26;
874 ra = (insn >> 21) & 0x1F;
875 rb = (insn >> 16) & 0x1F;
876 rc = insn & 0x1F;
877 sbz = (insn >> 13) & 0x07;
878 real_islit = islit = (insn >> 12) & 1;
879 if (rb == 31 && !islit) {
880 islit = 1;
881 lit = 0;
882 } else
883 lit = (insn >> 13) & 0xFF;
884 palcode = insn & 0x03FFFFFF;
885 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
886 disp16 = (int16_t)(insn & 0x0000FFFF);
887 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
888 fn16 = insn & 0x0000FFFF;
889 fn11 = (insn >> 5) & 0x000007FF;
890 fpfn = fn11 & 0x3F;
891 fn7 = (insn >> 5) & 0x0000007F;
892 fn2 = (insn >> 5) & 0x00000003;
893 ret = 0;
894 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
895 opc, ra, rb, rc, disp16);
897 switch (opc) {
898 case 0x00:
899 /* CALL_PAL */
900 #ifdef CONFIG_USER_ONLY
901 if (palcode == 0x9E) {
902 /* RDUNIQUE */
903 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
904 break;
905 } else if (palcode == 0x9F) {
906 /* WRUNIQUE */
907 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
908 break;
910 #endif
911 if (palcode >= 0x80 && palcode < 0xC0) {
912 /* Unprivileged PAL call */
913 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
914 ret = 3;
915 break;
917 #ifndef CONFIG_USER_ONLY
918 if (palcode < 0x40) {
919 /* Privileged PAL code */
920 if (ctx->mem_idx & 1)
921 goto invalid_opc;
922 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
923 ret = 3;
925 #endif
926 /* Invalid PAL call */
927 goto invalid_opc;
928 case 0x01:
929 /* OPC01 */
930 goto invalid_opc;
931 case 0x02:
932 /* OPC02 */
933 goto invalid_opc;
934 case 0x03:
935 /* OPC03 */
936 goto invalid_opc;
937 case 0x04:
938 /* OPC04 */
939 goto invalid_opc;
940 case 0x05:
941 /* OPC05 */
942 goto invalid_opc;
943 case 0x06:
944 /* OPC06 */
945 goto invalid_opc;
946 case 0x07:
947 /* OPC07 */
948 goto invalid_opc;
949 case 0x08:
950 /* LDA */
951 if (likely(ra != 31)) {
952 if (rb != 31)
953 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
954 else
955 tcg_gen_movi_i64(cpu_ir[ra], disp16);
957 break;
958 case 0x09:
959 /* LDAH */
960 if (likely(ra != 31)) {
961 if (rb != 31)
962 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
963 else
964 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
966 break;
967 case 0x0A:
968 /* LDBU */
969 if (!(ctx->amask & AMASK_BWX))
970 goto invalid_opc;
971 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
972 break;
973 case 0x0B:
974 /* LDQ_U */
975 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
976 break;
977 case 0x0C:
978 /* LDWU */
979 if (!(ctx->amask & AMASK_BWX))
980 goto invalid_opc;
981 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
982 break;
983 case 0x0D:
984 /* STW */
985 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
986 break;
987 case 0x0E:
988 /* STB */
989 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
990 break;
991 case 0x0F:
992 /* STQ_U */
993 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
994 break;
995 case 0x10:
996 switch (fn7) {
997 case 0x00:
998 /* ADDL */
999 if (likely(rc != 31)) {
1000 if (ra != 31) {
1001 if (islit) {
1002 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1003 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1004 } else {
1005 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1006 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1008 } else {
1009 if (islit)
1010 tcg_gen_movi_i64(cpu_ir[rc], lit);
1011 else
1012 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1015 break;
1016 case 0x02:
1017 /* S4ADDL */
1018 if (likely(rc != 31)) {
1019 if (ra != 31) {
1020 TCGv tmp = tcg_temp_new();
1021 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1022 if (islit)
1023 tcg_gen_addi_i64(tmp, tmp, lit);
1024 else
1025 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1026 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1027 tcg_temp_free(tmp);
1028 } else {
1029 if (islit)
1030 tcg_gen_movi_i64(cpu_ir[rc], lit);
1031 else
1032 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1035 break;
1036 case 0x09:
1037 /* SUBL */
1038 if (likely(rc != 31)) {
1039 if (ra != 31) {
1040 if (islit)
1041 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1042 else
1043 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1044 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1045 } else {
1046 if (islit)
1047 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1048 else {
1049 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1050 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1053 break;
1054 case 0x0B:
1055 /* S4SUBL */
1056 if (likely(rc != 31)) {
1057 if (ra != 31) {
1058 TCGv tmp = tcg_temp_new();
1059 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1060 if (islit)
1061 tcg_gen_subi_i64(tmp, tmp, lit);
1062 else
1063 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1064 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1065 tcg_temp_free(tmp);
1066 } else {
1067 if (islit)
1068 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1069 else {
1070 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1071 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1075 break;
1076 case 0x0F:
1077 /* CMPBGE */
1078 gen_cmpbge(ra, rb, rc, islit, lit);
1079 break;
1080 case 0x12:
1081 /* S8ADDL */
1082 if (likely(rc != 31)) {
1083 if (ra != 31) {
1084 TCGv tmp = tcg_temp_new();
1085 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1086 if (islit)
1087 tcg_gen_addi_i64(tmp, tmp, lit);
1088 else
1089 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1090 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1091 tcg_temp_free(tmp);
1092 } else {
1093 if (islit)
1094 tcg_gen_movi_i64(cpu_ir[rc], lit);
1095 else
1096 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1099 break;
1100 case 0x1B:
1101 /* S8SUBL */
1102 if (likely(rc != 31)) {
1103 if (ra != 31) {
1104 TCGv tmp = tcg_temp_new();
1105 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1106 if (islit)
1107 tcg_gen_subi_i64(tmp, tmp, lit);
1108 else
1109 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1110 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1111 tcg_temp_free(tmp);
1112 } else {
1113 if (islit)
1114 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1115 else
1116 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1117 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1121 break;
1122 case 0x1D:
1123 /* CMPULT */
1124 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1125 break;
1126 case 0x20:
1127 /* ADDQ */
1128 if (likely(rc != 31)) {
1129 if (ra != 31) {
1130 if (islit)
1131 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1132 else
1133 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1134 } else {
1135 if (islit)
1136 tcg_gen_movi_i64(cpu_ir[rc], lit);
1137 else
1138 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1141 break;
1142 case 0x22:
1143 /* S4ADDQ */
1144 if (likely(rc != 31)) {
1145 if (ra != 31) {
1146 TCGv tmp = tcg_temp_new();
1147 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1148 if (islit)
1149 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1150 else
1151 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1152 tcg_temp_free(tmp);
1153 } else {
1154 if (islit)
1155 tcg_gen_movi_i64(cpu_ir[rc], lit);
1156 else
1157 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1160 break;
1161 case 0x29:
1162 /* SUBQ */
1163 if (likely(rc != 31)) {
1164 if (ra != 31) {
1165 if (islit)
1166 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1167 else
1168 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1169 } else {
1170 if (islit)
1171 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1172 else
1173 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1176 break;
1177 case 0x2B:
1178 /* S4SUBQ */
1179 if (likely(rc != 31)) {
1180 if (ra != 31) {
1181 TCGv tmp = tcg_temp_new();
1182 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1183 if (islit)
1184 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1185 else
1186 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1187 tcg_temp_free(tmp);
1188 } else {
1189 if (islit)
1190 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1191 else
1192 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1195 break;
1196 case 0x2D:
1197 /* CMPEQ */
1198 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1199 break;
1200 case 0x32:
1201 /* S8ADDQ */
1202 if (likely(rc != 31)) {
1203 if (ra != 31) {
1204 TCGv tmp = tcg_temp_new();
1205 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1206 if (islit)
1207 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1208 else
1209 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1210 tcg_temp_free(tmp);
1211 } else {
1212 if (islit)
1213 tcg_gen_movi_i64(cpu_ir[rc], lit);
1214 else
1215 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1218 break;
1219 case 0x3B:
1220 /* S8SUBQ */
1221 if (likely(rc != 31)) {
1222 if (ra != 31) {
1223 TCGv tmp = tcg_temp_new();
1224 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1225 if (islit)
1226 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1227 else
1228 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1229 tcg_temp_free(tmp);
1230 } else {
1231 if (islit)
1232 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1233 else
1234 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1237 break;
1238 case 0x3D:
1239 /* CMPULE */
1240 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1241 break;
1242 case 0x40:
1243 /* ADDL/V */
1244 gen_addlv(ra, rb, rc, islit, lit);
1245 break;
1246 case 0x49:
1247 /* SUBL/V */
1248 gen_sublv(ra, rb, rc, islit, lit);
1249 break;
1250 case 0x4D:
1251 /* CMPLT */
1252 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1253 break;
1254 case 0x60:
1255 /* ADDQ/V */
1256 gen_addqv(ra, rb, rc, islit, lit);
1257 break;
1258 case 0x69:
1259 /* SUBQ/V */
1260 gen_subqv(ra, rb, rc, islit, lit);
1261 break;
1262 case 0x6D:
1263 /* CMPLE */
1264 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1265 break;
1266 default:
1267 goto invalid_opc;
1269 break;
1270 case 0x11:
1271 switch (fn7) {
1272 case 0x00:
1273 /* AND */
1274 if (likely(rc != 31)) {
1275 if (ra == 31)
1276 tcg_gen_movi_i64(cpu_ir[rc], 0);
1277 else if (islit)
1278 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1279 else
1280 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1282 break;
1283 case 0x08:
1284 /* BIC */
1285 if (likely(rc != 31)) {
1286 if (ra != 31) {
1287 if (islit)
1288 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1289 else
1290 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1291 } else
1292 tcg_gen_movi_i64(cpu_ir[rc], 0);
1294 break;
1295 case 0x14:
1296 /* CMOVLBS */
1297 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1298 break;
1299 case 0x16:
1300 /* CMOVLBC */
1301 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1302 break;
1303 case 0x20:
1304 /* BIS */
1305 if (likely(rc != 31)) {
1306 if (ra != 31) {
1307 if (islit)
1308 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1309 else
1310 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1311 } else {
1312 if (islit)
1313 tcg_gen_movi_i64(cpu_ir[rc], lit);
1314 else
1315 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1318 break;
1319 case 0x24:
1320 /* CMOVEQ */
1321 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1322 break;
1323 case 0x26:
1324 /* CMOVNE */
1325 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1326 break;
1327 case 0x28:
1328 /* ORNOT */
1329 if (likely(rc != 31)) {
1330 if (ra != 31) {
1331 if (islit)
1332 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1333 else
1334 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1335 } else {
1336 if (islit)
1337 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1338 else
1339 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1342 break;
1343 case 0x40:
1344 /* XOR */
1345 if (likely(rc != 31)) {
1346 if (ra != 31) {
1347 if (islit)
1348 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1349 else
1350 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1351 } else {
1352 if (islit)
1353 tcg_gen_movi_i64(cpu_ir[rc], lit);
1354 else
1355 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1358 break;
1359 case 0x44:
1360 /* CMOVLT */
1361 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1362 break;
1363 case 0x46:
1364 /* CMOVGE */
1365 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1366 break;
1367 case 0x48:
1368 /* EQV */
1369 if (likely(rc != 31)) {
1370 if (ra != 31) {
1371 if (islit)
1372 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1373 else
1374 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1375 } else {
1376 if (islit)
1377 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1378 else
1379 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1382 break;
1383 case 0x61:
1384 /* AMASK */
1385 if (likely(rc != 31)) {
1386 if (islit)
1387 tcg_gen_movi_i64(cpu_ir[rc], lit);
1388 else
1389 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1390 switch (ctx->env->implver) {
1391 case IMPLVER_2106x:
1392 /* EV4, EV45, LCA, LCA45 & EV5 */
1393 break;
1394 case IMPLVER_21164:
1395 case IMPLVER_21264:
1396 case IMPLVER_21364:
1397 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1398 ~(uint64_t)ctx->amask);
1399 break;
1402 break;
1403 case 0x64:
1404 /* CMOVLE */
1405 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1406 break;
1407 case 0x66:
1408 /* CMOVGT */
1409 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1410 break;
1411 case 0x6C:
1412 /* IMPLVER */
1413 if (rc != 31)
1414 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1415 break;
1416 default:
1417 goto invalid_opc;
1419 break;
1420 case 0x12:
1421 switch (fn7) {
1422 case 0x02:
1423 /* MSKBL */
1424 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
1425 break;
1426 case 0x06:
1427 /* EXTBL */
1428 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1429 break;
1430 case 0x0B:
1431 /* INSBL */
1432 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
1433 break;
1434 case 0x12:
1435 /* MSKWL */
1436 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
1437 break;
1438 case 0x16:
1439 /* EXTWL */
1440 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1441 break;
1442 case 0x1B:
1443 /* INSWL */
1444 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
1445 break;
1446 case 0x22:
1447 /* MSKLL */
1448 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
1449 break;
1450 case 0x26:
1451 /* EXTLL */
1452 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
1453 break;
1454 case 0x2B:
1455 /* INSLL */
1456 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
1457 break;
1458 case 0x30:
1459 /* ZAP */
1460 gen_zap(ra, rb, rc, islit, lit);
1461 break;
1462 case 0x31:
1463 /* ZAPNOT */
1464 gen_zapnot(ra, rb, rc, islit, lit);
1465 break;
1466 case 0x32:
1467 /* MSKQL */
1468 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
1469 break;
1470 case 0x34:
1471 /* SRL */
1472 if (likely(rc != 31)) {
1473 if (ra != 31) {
1474 if (islit)
1475 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1476 else {
1477 TCGv shift = tcg_temp_new();
1478 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1479 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1480 tcg_temp_free(shift);
1482 } else
1483 tcg_gen_movi_i64(cpu_ir[rc], 0);
1485 break;
1486 case 0x36:
1487 /* EXTQL */
1488 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
1489 break;
1490 case 0x39:
1491 /* SLL */
1492 if (likely(rc != 31)) {
1493 if (ra != 31) {
1494 if (islit)
1495 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1496 else {
1497 TCGv shift = tcg_temp_new();
1498 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1499 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1500 tcg_temp_free(shift);
1502 } else
1503 tcg_gen_movi_i64(cpu_ir[rc], 0);
1505 break;
1506 case 0x3B:
1507 /* INSQL */
1508 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
1509 break;
1510 case 0x3C:
1511 /* SRA */
1512 if (likely(rc != 31)) {
1513 if (ra != 31) {
1514 if (islit)
1515 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1516 else {
1517 TCGv shift = tcg_temp_new();
1518 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1519 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1520 tcg_temp_free(shift);
1522 } else
1523 tcg_gen_movi_i64(cpu_ir[rc], 0);
1525 break;
1526 case 0x52:
1527 /* MSKWH */
1528 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
1529 break;
1530 case 0x57:
1531 /* INSWH */
1532 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
1533 break;
1534 case 0x5A:
1535 /* EXTWH */
1536 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
1537 break;
1538 case 0x62:
1539 /* MSKLH */
1540 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
1541 break;
1542 case 0x67:
1543 /* INSLH */
1544 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
1545 break;
1546 case 0x6A:
1547 /* EXTLH */
1548 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
1549 break;
1550 case 0x72:
1551 /* MSKQH */
1552 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
1553 break;
1554 case 0x77:
1555 /* INSQH */
1556 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
1557 break;
1558 case 0x7A:
1559 /* EXTQH */
1560 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
1561 break;
1562 default:
1563 goto invalid_opc;
1565 break;
1566 case 0x13:
1567 switch (fn7) {
1568 case 0x00:
1569 /* MULL */
1570 if (likely(rc != 31)) {
1571 if (ra == 31)
1572 tcg_gen_movi_i64(cpu_ir[rc], 0);
1573 else {
1574 if (islit)
1575 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1576 else
1577 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1578 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1581 break;
1582 case 0x20:
1583 /* MULQ */
1584 if (likely(rc != 31)) {
1585 if (ra == 31)
1586 tcg_gen_movi_i64(cpu_ir[rc], 0);
1587 else if (islit)
1588 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1589 else
1590 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1592 break;
1593 case 0x30:
1594 /* UMULH */
1595 gen_umulh(ra, rb, rc, islit, lit);
1596 break;
1597 case 0x40:
1598 /* MULL/V */
1599 gen_mullv(ra, rb, rc, islit, lit);
1600 break;
1601 case 0x60:
1602 /* MULQ/V */
1603 gen_mulqv(ra, rb, rc, islit, lit);
1604 break;
1605 default:
1606 goto invalid_opc;
1608 break;
1609 case 0x14:
1610 switch (fpfn) { /* f11 & 0x3F */
1611 case 0x04:
1612 /* ITOFS */
1613 if (!(ctx->amask & AMASK_FIX))
1614 goto invalid_opc;
1615 if (likely(rc != 31)) {
1616 if (ra != 31) {
1617 TCGv_i32 tmp = tcg_temp_new_i32();
1618 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1619 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1620 tcg_temp_free_i32(tmp);
1621 } else
1622 tcg_gen_movi_i64(cpu_fir[rc], 0);
1624 break;
1625 case 0x0A:
1626 /* SQRTF */
1627 if (!(ctx->amask & AMASK_FIX))
1628 goto invalid_opc;
1629 gen_fsqrtf(rb, rc);
1630 break;
1631 case 0x0B:
1632 /* SQRTS */
1633 if (!(ctx->amask & AMASK_FIX))
1634 goto invalid_opc;
1635 gen_fsqrts(rb, rc);
1636 break;
1637 case 0x14:
1638 /* ITOFF */
1639 if (!(ctx->amask & AMASK_FIX))
1640 goto invalid_opc;
1641 if (likely(rc != 31)) {
1642 if (ra != 31) {
1643 TCGv_i32 tmp = tcg_temp_new_i32();
1644 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1645 gen_helper_memory_to_f(cpu_fir[rc], tmp);
1646 tcg_temp_free_i32(tmp);
1647 } else
1648 tcg_gen_movi_i64(cpu_fir[rc], 0);
1650 break;
1651 case 0x24:
1652 /* ITOFT */
1653 if (!(ctx->amask & AMASK_FIX))
1654 goto invalid_opc;
1655 if (likely(rc != 31)) {
1656 if (ra != 31)
1657 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
1658 else
1659 tcg_gen_movi_i64(cpu_fir[rc], 0);
1661 break;
1662 case 0x2A:
1663 /* SQRTG */
1664 if (!(ctx->amask & AMASK_FIX))
1665 goto invalid_opc;
1666 gen_fsqrtg(rb, rc);
1667 break;
1668 case 0x02B:
1669 /* SQRTT */
1670 if (!(ctx->amask & AMASK_FIX))
1671 goto invalid_opc;
1672 gen_fsqrtt(rb, rc);
1673 break;
1674 default:
1675 goto invalid_opc;
1677 break;
1678 case 0x15:
1679 /* VAX floating point */
1680 /* XXX: rounding mode and trap are ignored (!) */
1681 switch (fpfn) { /* f11 & 0x3F */
1682 case 0x00:
1683 /* ADDF */
1684 gen_faddf(ra, rb, rc);
1685 break;
1686 case 0x01:
1687 /* SUBF */
1688 gen_fsubf(ra, rb, rc);
1689 break;
1690 case 0x02:
1691 /* MULF */
1692 gen_fmulf(ra, rb, rc);
1693 break;
1694 case 0x03:
1695 /* DIVF */
1696 gen_fdivf(ra, rb, rc);
1697 break;
1698 case 0x1E:
1699 /* CVTDG */
1700 #if 0 // TODO
1701 gen_fcvtdg(rb, rc);
1702 #else
1703 goto invalid_opc;
1704 #endif
1705 break;
1706 case 0x20:
1707 /* ADDG */
1708 gen_faddg(ra, rb, rc);
1709 break;
1710 case 0x21:
1711 /* SUBG */
1712 gen_fsubg(ra, rb, rc);
1713 break;
1714 case 0x22:
1715 /* MULG */
1716 gen_fmulg(ra, rb, rc);
1717 break;
1718 case 0x23:
1719 /* DIVG */
1720 gen_fdivg(ra, rb, rc);
1721 break;
1722 case 0x25:
1723 /* CMPGEQ */
1724 gen_fcmpgeq(ra, rb, rc);
1725 break;
1726 case 0x26:
1727 /* CMPGLT */
1728 gen_fcmpglt(ra, rb, rc);
1729 break;
1730 case 0x27:
1731 /* CMPGLE */
1732 gen_fcmpgle(ra, rb, rc);
1733 break;
1734 case 0x2C:
1735 /* CVTGF */
1736 gen_fcvtgf(rb, rc);
1737 break;
1738 case 0x2D:
1739 /* CVTGD */
1740 #if 0 // TODO
1741 gen_fcvtgd(rb, rc);
1742 #else
1743 goto invalid_opc;
1744 #endif
1745 break;
1746 case 0x2F:
1747 /* CVTGQ */
1748 gen_fcvtgq(rb, rc);
1749 break;
1750 case 0x3C:
1751 /* CVTQF */
1752 gen_fcvtqf(rb, rc);
1753 break;
1754 case 0x3E:
1755 /* CVTQG */
1756 gen_fcvtqg(rb, rc);
1757 break;
1758 default:
1759 goto invalid_opc;
1761 break;
1762 case 0x16:
1763 /* IEEE floating-point */
1764 /* XXX: rounding mode and traps are ignored (!) */
1765 switch (fpfn) { /* f11 & 0x3F */
1766 case 0x00:
1767 /* ADDS */
1768 gen_fadds(ra, rb, rc);
1769 break;
1770 case 0x01:
1771 /* SUBS */
1772 gen_fsubs(ra, rb, rc);
1773 break;
1774 case 0x02:
1775 /* MULS */
1776 gen_fmuls(ra, rb, rc);
1777 break;
1778 case 0x03:
1779 /* DIVS */
1780 gen_fdivs(ra, rb, rc);
1781 break;
1782 case 0x20:
1783 /* ADDT */
1784 gen_faddt(ra, rb, rc);
1785 break;
1786 case 0x21:
1787 /* SUBT */
1788 gen_fsubt(ra, rb, rc);
1789 break;
1790 case 0x22:
1791 /* MULT */
1792 gen_fmult(ra, rb, rc);
1793 break;
1794 case 0x23:
1795 /* DIVT */
1796 gen_fdivt(ra, rb, rc);
1797 break;
1798 case 0x24:
1799 /* CMPTUN */
1800 gen_fcmptun(ra, rb, rc);
1801 break;
1802 case 0x25:
1803 /* CMPTEQ */
1804 gen_fcmpteq(ra, rb, rc);
1805 break;
1806 case 0x26:
1807 /* CMPTLT */
1808 gen_fcmptlt(ra, rb, rc);
1809 break;
1810 case 0x27:
1811 /* CMPTLE */
1812 gen_fcmptle(ra, rb, rc);
1813 break;
1814 case 0x2C:
1815 /* XXX: incorrect */
1816 if (fn11 == 0x2AC || fn11 == 0x6AC) {
1817 /* CVTST */
1818 gen_fcvtst(rb, rc);
1819 } else {
1820 /* CVTTS */
1821 gen_fcvtts(rb, rc);
1823 break;
1824 case 0x2F:
1825 /* CVTTQ */
1826 gen_fcvttq(rb, rc);
1827 break;
1828 case 0x3C:
1829 /* CVTQS */
1830 gen_fcvtqs(rb, rc);
1831 break;
1832 case 0x3E:
1833 /* CVTQT */
1834 gen_fcvtqt(rb, rc);
1835 break;
1836 default:
1837 goto invalid_opc;
1839 break;
1840 case 0x17:
1841 switch (fn11) {
1842 case 0x010:
1843 /* CVTLQ */
1844 gen_fcvtlq(rb, rc);
1845 break;
1846 case 0x020:
1847 if (likely(rc != 31)) {
1848 if (ra == rb) {
1849 /* FMOV */
1850 if (ra == 31)
1851 tcg_gen_movi_i64(cpu_fir[rc], 0);
1852 else
1853 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
1854 } else {
1855 /* CPYS */
1856 gen_fcpys(ra, rb, rc);
1859 break;
1860 case 0x021:
1861 /* CPYSN */
1862 gen_fcpysn(ra, rb, rc);
1863 break;
1864 case 0x022:
1865 /* CPYSE */
1866 gen_fcpyse(ra, rb, rc);
1867 break;
1868 case 0x024:
1869 /* MT_FPCR */
1870 if (likely(ra != 31))
1871 gen_helper_store_fpcr(cpu_fir[ra]);
1872 else {
1873 TCGv tmp = tcg_const_i64(0);
1874 gen_helper_store_fpcr(tmp);
1875 tcg_temp_free(tmp);
1877 break;
1878 case 0x025:
1879 /* MF_FPCR */
1880 if (likely(ra != 31))
1881 gen_helper_load_fpcr(cpu_fir[ra]);
1882 break;
1883 case 0x02A:
1884 /* FCMOVEQ */
1885 gen_fcmov(TCG_COND_NE, ra, rb, rc);
1886 break;
1887 case 0x02B:
1888 /* FCMOVNE */
1889 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
1890 break;
1891 case 0x02C:
1892 /* FCMOVLT */
1893 gen_fcmov(TCG_COND_GE, ra, rb, rc);
1894 break;
1895 case 0x02D:
1896 /* FCMOVGE */
1897 gen_fcmov(TCG_COND_LT, ra, rb, rc);
1898 break;
1899 case 0x02E:
1900 /* FCMOVLE */
1901 gen_fcmov(TCG_COND_GT, ra, rb, rc);
1902 break;
1903 case 0x02F:
1904 /* FCMOVGT */
1905 gen_fcmov(TCG_COND_LE, ra, rb, rc);
1906 break;
1907 case 0x030:
1908 /* CVTQL */
1909 gen_fcvtql(rb, rc);
1910 break;
1911 case 0x130:
1912 /* CVTQL/V */
1913 gen_fcvtqlv(rb, rc);
1914 break;
1915 case 0x530:
1916 /* CVTQL/SV */
1917 gen_fcvtqlsv(rb, rc);
1918 break;
1919 default:
1920 goto invalid_opc;
1922 break;
1923 case 0x18:
1924 switch ((uint16_t)disp16) {
1925 case 0x0000:
1926 /* TRAPB */
1927 /* No-op. Just exit from the current tb */
1928 ret = 2;
1929 break;
1930 case 0x0400:
1931 /* EXCB */
1932 /* No-op. Just exit from the current tb */
1933 ret = 2;
1934 break;
1935 case 0x4000:
1936 /* MB */
1937 /* No-op */
1938 break;
1939 case 0x4400:
1940 /* WMB */
1941 /* No-op */
1942 break;
1943 case 0x8000:
1944 /* FETCH */
1945 /* No-op */
1946 break;
1947 case 0xA000:
1948 /* FETCH_M */
1949 /* No-op */
1950 break;
1951 case 0xC000:
1952 /* RPCC */
1953 if (ra != 31)
1954 gen_helper_load_pcc(cpu_ir[ra]);
1955 break;
1956 case 0xE000:
1957 /* RC */
1958 if (ra != 31)
1959 gen_helper_rc(cpu_ir[ra]);
1960 break;
1961 case 0xE800:
1962 /* ECB */
1963 break;
1964 case 0xF000:
1965 /* RS */
1966 if (ra != 31)
1967 gen_helper_rs(cpu_ir[ra]);
1968 break;
1969 case 0xF800:
1970 /* WH64 */
1971 /* No-op */
1972 break;
1973 default:
1974 goto invalid_opc;
1976 break;
1977 case 0x19:
1978 /* HW_MFPR (PALcode) */
1979 #if defined (CONFIG_USER_ONLY)
1980 goto invalid_opc;
1981 #else
1982 if (!ctx->pal_mode)
1983 goto invalid_opc;
1984 if (ra != 31) {
1985 TCGv tmp = tcg_const_i32(insn & 0xFF);
1986 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
1987 tcg_temp_free(tmp);
1989 break;
1990 #endif
1991 case 0x1A:
1992 if (rb != 31)
1993 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
1994 else
1995 tcg_gen_movi_i64(cpu_pc, 0);
1996 if (ra != 31)
1997 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
1998 /* Those four jumps only differ by the branch prediction hint */
1999 switch (fn2) {
2000 case 0x0:
2001 /* JMP */
2002 break;
2003 case 0x1:
2004 /* JSR */
2005 break;
2006 case 0x2:
2007 /* RET */
2008 break;
2009 case 0x3:
2010 /* JSR_COROUTINE */
2011 break;
2013 ret = 1;
2014 break;
2015 case 0x1B:
2016 /* HW_LD (PALcode) */
2017 #if defined (CONFIG_USER_ONLY)
2018 goto invalid_opc;
2019 #else
2020 if (!ctx->pal_mode)
2021 goto invalid_opc;
2022 if (ra != 31) {
2023 TCGv addr = tcg_temp_new();
2024 if (rb != 31)
2025 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2026 else
2027 tcg_gen_movi_i64(addr, disp12);
2028 switch ((insn >> 12) & 0xF) {
2029 case 0x0:
2030 /* Longword physical access (hw_ldl/p) */
2031 gen_helper_ldl_raw(cpu_ir[ra], addr);
2032 break;
2033 case 0x1:
2034 /* Quadword physical access (hw_ldq/p) */
2035 gen_helper_ldq_raw(cpu_ir[ra], addr);
2036 break;
2037 case 0x2:
2038 /* Longword physical access with lock (hw_ldl_l/p) */
2039 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
2040 break;
2041 case 0x3:
2042 /* Quadword physical access with lock (hw_ldq_l/p) */
2043 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
2044 break;
2045 case 0x4:
2046 /* Longword virtual PTE fetch (hw_ldl/v) */
2047 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2048 break;
2049 case 0x5:
2050 /* Quadword virtual PTE fetch (hw_ldq/v) */
2051 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2052 break;
2053 case 0x6:
2054 /* Incpu_ir[ra]id */
2055 goto invalid_opc;
2056 case 0x7:
2057 /* Incpu_ir[ra]id */
2058 goto invalid_opc;
2059 case 0x8:
2060 /* Longword virtual access (hw_ldl) */
2061 gen_helper_st_virt_to_phys(addr, addr);
2062 gen_helper_ldl_raw(cpu_ir[ra], addr);
2063 break;
2064 case 0x9:
2065 /* Quadword virtual access (hw_ldq) */
2066 gen_helper_st_virt_to_phys(addr, addr);
2067 gen_helper_ldq_raw(cpu_ir[ra], addr);
2068 break;
2069 case 0xA:
2070 /* Longword virtual access with protection check (hw_ldl/w) */
2071 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2072 break;
2073 case 0xB:
2074 /* Quadword virtual access with protection check (hw_ldq/w) */
2075 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2076 break;
2077 case 0xC:
2078 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2079 gen_helper_set_alt_mode();
2080 gen_helper_st_virt_to_phys(addr, addr);
2081 gen_helper_ldl_raw(cpu_ir[ra], addr);
2082 gen_helper_restore_mode();
2083 break;
2084 case 0xD:
2085 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2086 gen_helper_set_alt_mode();
2087 gen_helper_st_virt_to_phys(addr, addr);
2088 gen_helper_ldq_raw(cpu_ir[ra], addr);
2089 gen_helper_restore_mode();
2090 break;
2091 case 0xE:
2092 /* Longword virtual access with alternate access mode and
2093 * protection checks (hw_ldl/wa)
2095 gen_helper_set_alt_mode();
2096 gen_helper_ldl_data(cpu_ir[ra], addr);
2097 gen_helper_restore_mode();
2098 break;
2099 case 0xF:
2100 /* Quadword virtual access with alternate access mode and
2101 * protection checks (hw_ldq/wa)
2103 gen_helper_set_alt_mode();
2104 gen_helper_ldq_data(cpu_ir[ra], addr);
2105 gen_helper_restore_mode();
2106 break;
2108 tcg_temp_free(addr);
2110 break;
2111 #endif
2112 case 0x1C:
2113 switch (fn7) {
2114 case 0x00:
2115 /* SEXTB */
2116 if (!(ctx->amask & AMASK_BWX))
2117 goto invalid_opc;
2118 if (likely(rc != 31)) {
2119 if (islit)
2120 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2121 else
2122 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2124 break;
2125 case 0x01:
2126 /* SEXTW */
2127 if (!(ctx->amask & AMASK_BWX))
2128 goto invalid_opc;
2129 if (likely(rc != 31)) {
2130 if (islit)
2131 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2132 else
2133 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2135 break;
2136 case 0x30:
2137 /* CTPOP */
2138 if (!(ctx->amask & AMASK_CIX))
2139 goto invalid_opc;
2140 if (likely(rc != 31)) {
2141 if (islit)
2142 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2143 else
2144 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2146 break;
2147 case 0x31:
2148 /* PERR */
2149 if (!(ctx->amask & AMASK_MVI))
2150 goto invalid_opc;
2151 gen_perr(ra, rb, rc, islit, lit);
2152 break;
2153 case 0x32:
2154 /* CTLZ */
2155 if (!(ctx->amask & AMASK_CIX))
2156 goto invalid_opc;
2157 if (likely(rc != 31)) {
2158 if (islit)
2159 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2160 else
2161 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2163 break;
2164 case 0x33:
2165 /* CTTZ */
2166 if (!(ctx->amask & AMASK_CIX))
2167 goto invalid_opc;
2168 if (likely(rc != 31)) {
2169 if (islit)
2170 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2171 else
2172 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2174 break;
2175 case 0x34:
2176 /* UNPKBW */
2177 if (!(ctx->amask & AMASK_MVI))
2178 goto invalid_opc;
2179 if (real_islit || ra != 31)
2180 goto invalid_opc;
2181 gen_unpkbw (rb, rc);
2182 break;
2183 case 0x35:
2184 /* UNPKBL */
2185 if (!(ctx->amask & AMASK_MVI))
2186 goto invalid_opc;
2187 if (real_islit || ra != 31)
2188 goto invalid_opc;
2189 gen_unpkbl (rb, rc);
2190 break;
2191 case 0x36:
2192 /* PKWB */
2193 if (!(ctx->amask & AMASK_MVI))
2194 goto invalid_opc;
2195 if (real_islit || ra != 31)
2196 goto invalid_opc;
2197 gen_pkwb (rb, rc);
2198 break;
2199 case 0x37:
2200 /* PKLB */
2201 if (!(ctx->amask & AMASK_MVI))
2202 goto invalid_opc;
2203 if (real_islit || ra != 31)
2204 goto invalid_opc;
2205 gen_pklb (rb, rc);
2206 break;
2207 case 0x38:
2208 /* MINSB8 */
2209 if (!(ctx->amask & AMASK_MVI))
2210 goto invalid_opc;
2211 gen_minsb8 (ra, rb, rc, islit, lit);
2212 break;
2213 case 0x39:
2214 /* MINSW4 */
2215 if (!(ctx->amask & AMASK_MVI))
2216 goto invalid_opc;
2217 gen_minsw4 (ra, rb, rc, islit, lit);
2218 break;
2219 case 0x3A:
2220 /* MINUB8 */
2221 if (!(ctx->amask & AMASK_MVI))
2222 goto invalid_opc;
2223 gen_minub8 (ra, rb, rc, islit, lit);
2224 break;
2225 case 0x3B:
2226 /* MINUW4 */
2227 if (!(ctx->amask & AMASK_MVI))
2228 goto invalid_opc;
2229 gen_minuw4 (ra, rb, rc, islit, lit);
2230 break;
2231 case 0x3C:
2232 /* MAXUB8 */
2233 if (!(ctx->amask & AMASK_MVI))
2234 goto invalid_opc;
2235 gen_maxub8 (ra, rb, rc, islit, lit);
2236 break;
2237 case 0x3D:
2238 /* MAXUW4 */
2239 if (!(ctx->amask & AMASK_MVI))
2240 goto invalid_opc;
2241 gen_maxuw4 (ra, rb, rc, islit, lit);
2242 break;
2243 case 0x3E:
2244 /* MAXSB8 */
2245 if (!(ctx->amask & AMASK_MVI))
2246 goto invalid_opc;
2247 gen_maxsb8 (ra, rb, rc, islit, lit);
2248 break;
2249 case 0x3F:
2250 /* MAXSW4 */
2251 if (!(ctx->amask & AMASK_MVI))
2252 goto invalid_opc;
2253 gen_maxsw4 (ra, rb, rc, islit, lit);
2254 break;
2255 case 0x70:
2256 /* FTOIT */
2257 if (!(ctx->amask & AMASK_FIX))
2258 goto invalid_opc;
2259 if (likely(rc != 31)) {
2260 if (ra != 31)
2261 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2262 else
2263 tcg_gen_movi_i64(cpu_ir[rc], 0);
2265 break;
2266 case 0x78:
2267 /* FTOIS */
2268 if (!(ctx->amask & AMASK_FIX))
2269 goto invalid_opc;
2270 if (rc != 31) {
2271 TCGv_i32 tmp1 = tcg_temp_new_i32();
2272 if (ra != 31)
2273 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2274 else {
2275 TCGv tmp2 = tcg_const_i64(0);
2276 gen_helper_s_to_memory(tmp1, tmp2);
2277 tcg_temp_free(tmp2);
2279 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2280 tcg_temp_free_i32(tmp1);
2282 break;
2283 default:
2284 goto invalid_opc;
2286 break;
2287 case 0x1D:
2288 /* HW_MTPR (PALcode) */
2289 #if defined (CONFIG_USER_ONLY)
2290 goto invalid_opc;
2291 #else
2292 if (!ctx->pal_mode)
2293 goto invalid_opc;
2294 else {
2295 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2296 if (ra != 31)
2297 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2298 else {
2299 TCGv tmp2 = tcg_const_i64(0);
2300 gen_helper_mtpr(tmp1, tmp2);
2301 tcg_temp_free(tmp2);
2303 tcg_temp_free(tmp1);
2304 ret = 2;
2306 break;
2307 #endif
2308 case 0x1E:
2309 /* HW_REI (PALcode) */
2310 #if defined (CONFIG_USER_ONLY)
2311 goto invalid_opc;
2312 #else
2313 if (!ctx->pal_mode)
2314 goto invalid_opc;
2315 if (rb == 31) {
2316 /* "Old" alpha */
2317 gen_helper_hw_rei();
2318 } else {
2319 TCGv tmp;
2321 if (ra != 31) {
2322 tmp = tcg_temp_new();
2323 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2324 } else
2325 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2326 gen_helper_hw_ret(tmp);
2327 tcg_temp_free(tmp);
2329 ret = 2;
2330 break;
2331 #endif
2332 case 0x1F:
2333 /* HW_ST (PALcode) */
2334 #if defined (CONFIG_USER_ONLY)
2335 goto invalid_opc;
2336 #else
2337 if (!ctx->pal_mode)
2338 goto invalid_opc;
2339 else {
2340 TCGv addr, val;
2341 addr = tcg_temp_new();
2342 if (rb != 31)
2343 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2344 else
2345 tcg_gen_movi_i64(addr, disp12);
2346 if (ra != 31)
2347 val = cpu_ir[ra];
2348 else {
2349 val = tcg_temp_new();
2350 tcg_gen_movi_i64(val, 0);
2352 switch ((insn >> 12) & 0xF) {
2353 case 0x0:
2354 /* Longword physical access */
2355 gen_helper_stl_raw(val, addr);
2356 break;
2357 case 0x1:
2358 /* Quadword physical access */
2359 gen_helper_stq_raw(val, addr);
2360 break;
2361 case 0x2:
2362 /* Longword physical access with lock */
2363 gen_helper_stl_c_raw(val, val, addr);
2364 break;
2365 case 0x3:
2366 /* Quadword physical access with lock */
2367 gen_helper_stq_c_raw(val, val, addr);
2368 break;
2369 case 0x4:
2370 /* Longword virtual access */
2371 gen_helper_st_virt_to_phys(addr, addr);
2372 gen_helper_stl_raw(val, addr);
2373 break;
2374 case 0x5:
2375 /* Quadword virtual access */
2376 gen_helper_st_virt_to_phys(addr, addr);
2377 gen_helper_stq_raw(val, addr);
2378 break;
2379 case 0x6:
2380 /* Invalid */
2381 goto invalid_opc;
2382 case 0x7:
2383 /* Invalid */
2384 goto invalid_opc;
2385 case 0x8:
2386 /* Invalid */
2387 goto invalid_opc;
2388 case 0x9:
2389 /* Invalid */
2390 goto invalid_opc;
2391 case 0xA:
2392 /* Invalid */
2393 goto invalid_opc;
2394 case 0xB:
2395 /* Invalid */
2396 goto invalid_opc;
2397 case 0xC:
2398 /* Longword virtual access with alternate access mode */
2399 gen_helper_set_alt_mode();
2400 gen_helper_st_virt_to_phys(addr, addr);
2401 gen_helper_stl_raw(val, addr);
2402 gen_helper_restore_mode();
2403 break;
2404 case 0xD:
2405 /* Quadword virtual access with alternate access mode */
2406 gen_helper_set_alt_mode();
2407 gen_helper_st_virt_to_phys(addr, addr);
2408 gen_helper_stl_raw(val, addr);
2409 gen_helper_restore_mode();
2410 break;
2411 case 0xE:
2412 /* Invalid */
2413 goto invalid_opc;
2414 case 0xF:
2415 /* Invalid */
2416 goto invalid_opc;
2418 if (ra == 31)
2419 tcg_temp_free(val);
2420 tcg_temp_free(addr);
2422 break;
2423 #endif
2424 case 0x20:
2425 /* LDF */
2426 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2427 break;
2428 case 0x21:
2429 /* LDG */
2430 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2431 break;
2432 case 0x22:
2433 /* LDS */
2434 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2435 break;
2436 case 0x23:
2437 /* LDT */
2438 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2439 break;
2440 case 0x24:
2441 /* STF */
2442 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2443 break;
2444 case 0x25:
2445 /* STG */
2446 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2447 break;
2448 case 0x26:
2449 /* STS */
2450 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2451 break;
2452 case 0x27:
2453 /* STT */
2454 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2455 break;
2456 case 0x28:
2457 /* LDL */
2458 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2459 break;
2460 case 0x29:
2461 /* LDQ */
2462 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2463 break;
2464 case 0x2A:
2465 /* LDL_L */
2466 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2467 break;
2468 case 0x2B:
2469 /* LDQ_L */
2470 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2471 break;
2472 case 0x2C:
2473 /* STL */
2474 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2475 break;
2476 case 0x2D:
2477 /* STQ */
2478 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2479 break;
2480 case 0x2E:
2481 /* STL_C */
2482 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2483 break;
2484 case 0x2F:
2485 /* STQ_C */
2486 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2487 break;
2488 case 0x30:
2489 /* BR */
2490 if (ra != 31)
2491 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2492 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2493 ret = 1;
2494 break;
2495 case 0x31: /* FBEQ */
2496 gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2497 ret = 1;
2498 break;
2499 case 0x32: /* FBLT */
2500 gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2501 ret = 1;
2502 break;
2503 case 0x33: /* FBLE */
2504 gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2505 ret = 1;
2506 break;
2507 case 0x34:
2508 /* BSR */
2509 if (ra != 31)
2510 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2511 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2512 ret = 1;
2513 break;
2514 case 0x35: /* FBNE */
2515 gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2516 ret = 1;
2517 break;
2518 case 0x36: /* FBGE */
2519 gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2520 ret = 1;
2521 break;
2522 case 0x37: /* FBGT */
2523 gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2524 ret = 1;
2525 break;
2526 case 0x38:
2527 /* BLBC */
2528 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2529 ret = 1;
2530 break;
2531 case 0x39:
2532 /* BEQ */
2533 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2534 ret = 1;
2535 break;
2536 case 0x3A:
2537 /* BLT */
2538 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2539 ret = 1;
2540 break;
2541 case 0x3B:
2542 /* BLE */
2543 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2544 ret = 1;
2545 break;
2546 case 0x3C:
2547 /* BLBS */
2548 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2549 ret = 1;
2550 break;
2551 case 0x3D:
2552 /* BNE */
2553 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2554 ret = 1;
2555 break;
2556 case 0x3E:
2557 /* BGE */
2558 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2559 ret = 1;
2560 break;
2561 case 0x3F:
2562 /* BGT */
2563 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2564 ret = 1;
2565 break;
2566 invalid_opc:
2567 gen_invalid(ctx);
2568 ret = 3;
2569 break;
2572 return ret;
2575 static inline void gen_intermediate_code_internal(CPUState *env,
2576 TranslationBlock *tb,
2577 int search_pc)
2579 DisasContext ctx, *ctxp = &ctx;
2580 target_ulong pc_start;
2581 uint32_t insn;
2582 uint16_t *gen_opc_end;
2583 CPUBreakpoint *bp;
2584 int j, lj = -1;
2585 int ret;
2586 int num_insns;
2587 int max_insns;
2589 pc_start = tb->pc;
2590 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2591 ctx.pc = pc_start;
2592 ctx.amask = env->amask;
2593 ctx.env = env;
2594 #if defined (CONFIG_USER_ONLY)
2595 ctx.mem_idx = 0;
2596 #else
2597 ctx.mem_idx = ((env->ps >> 3) & 3);
2598 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2599 #endif
2600 num_insns = 0;
2601 max_insns = tb->cflags & CF_COUNT_MASK;
2602 if (max_insns == 0)
2603 max_insns = CF_COUNT_MASK;
2605 gen_icount_start();
2606 for (ret = 0; ret == 0;) {
2607 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2608 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2609 if (bp->pc == ctx.pc) {
2610 gen_excp(&ctx, EXCP_DEBUG, 0);
2611 break;
2615 if (search_pc) {
2616 j = gen_opc_ptr - gen_opc_buf;
2617 if (lj < j) {
2618 lj++;
2619 while (lj < j)
2620 gen_opc_instr_start[lj++] = 0;
2622 gen_opc_pc[lj] = ctx.pc;
2623 gen_opc_instr_start[lj] = 1;
2624 gen_opc_icount[lj] = num_insns;
2626 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2627 gen_io_start();
2628 insn = ldl_code(ctx.pc);
2629 num_insns++;
2631 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2632 tcg_gen_debug_insn_start(ctx.pc);
2635 ctx.pc += 4;
2636 ret = translate_one(ctxp, insn);
2637 if (ret != 0)
2638 break;
2639 /* if we reach a page boundary or are single stepping, stop
2640 * generation
2642 if (env->singlestep_enabled) {
2643 gen_excp(&ctx, EXCP_DEBUG, 0);
2644 break;
2647 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2648 break;
2650 if (gen_opc_ptr >= gen_opc_end)
2651 break;
2653 if (num_insns >= max_insns)
2654 break;
2656 if (singlestep) {
2657 break;
2660 if (ret != 1 && ret != 3) {
2661 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2663 if (tb->cflags & CF_LAST_IO)
2664 gen_io_end();
2665 /* Generate the return instruction */
2666 tcg_gen_exit_tb(0);
2667 gen_icount_end(tb, num_insns);
2668 *gen_opc_ptr = INDEX_op_end;
2669 if (search_pc) {
2670 j = gen_opc_ptr - gen_opc_buf;
2671 lj++;
2672 while (lj <= j)
2673 gen_opc_instr_start[lj++] = 0;
2674 } else {
2675 tb->size = ctx.pc - pc_start;
2676 tb->icount = num_insns;
2678 #ifdef DEBUG_DISAS
2679 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2680 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2681 log_target_disas(pc_start, ctx.pc - pc_start, 1);
2682 qemu_log("\n");
2684 #endif
2687 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
2689 gen_intermediate_code_internal(env, tb, 0);
2692 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
2694 gen_intermediate_code_internal(env, tb, 1);
2697 struct cpu_def_t {
2698 const char *name;
2699 int implver, amask;
2702 static const struct cpu_def_t cpu_defs[] = {
2703 { "ev4", IMPLVER_2106x, 0 },
2704 { "ev5", IMPLVER_21164, 0 },
2705 { "ev56", IMPLVER_21164, AMASK_BWX },
2706 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2707 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2708 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2709 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2710 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2711 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2712 { "21064", IMPLVER_2106x, 0 },
2713 { "21164", IMPLVER_21164, 0 },
2714 { "21164a", IMPLVER_21164, AMASK_BWX },
2715 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2716 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2717 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2718 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
2721 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
2723 CPUAlphaState *env;
2724 uint64_t hwpcb;
2725 int implver, amask, i, max;
2727 env = qemu_mallocz(sizeof(CPUAlphaState));
2728 cpu_exec_init(env);
2729 alpha_translate_init();
2730 tlb_flush(env, 1);
2732 /* Default to ev67; no reason not to emulate insns by default. */
2733 implver = IMPLVER_21264;
2734 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
2735 | AMASK_TRAP | AMASK_PREFETCH);
2737 max = ARRAY_SIZE(cpu_defs);
2738 for (i = 0; i < max; i++) {
2739 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
2740 implver = cpu_defs[i].implver;
2741 amask = cpu_defs[i].amask;
2742 break;
2745 env->implver = implver;
2746 env->amask = amask;
2748 env->ps = 0x1F00;
2749 #if defined (CONFIG_USER_ONLY)
2750 env->ps |= 1 << 3;
2751 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
2752 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
2753 #endif
2754 pal_init(env);
2755 /* Initialize IPR */
2756 hwpcb = env->ipr[IPR_PCBB];
2757 env->ipr[IPR_ASN] = 0;
2758 env->ipr[IPR_ASTEN] = 0;
2759 env->ipr[IPR_ASTSR] = 0;
2760 env->ipr[IPR_DATFX] = 0;
2761 /* XXX: fix this */
2762 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2763 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2764 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2765 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2766 env->ipr[IPR_FEN] = 0;
2767 env->ipr[IPR_IPL] = 31;
2768 env->ipr[IPR_MCES] = 0;
2769 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
2770 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2771 env->ipr[IPR_SISR] = 0;
2772 env->ipr[IPR_VIRBND] = -1ULL;
2774 qemu_init_vcpu(env);
2775 return env;
2778 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2779 unsigned long searched_pc, int pc_pos, void *puc)
2781 env->pc = gen_opc_pc[pc_pos];