target-alpha: Expand msk*h inline.
[qemu/aliguori-queue.git] / target-alpha / translate.c
blob1dc344821a9f0717a6009b3f5530380e0000879e
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
35 #undef ALPHA_DEBUG_DISAS
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DISAS(...) do { } while (0)
41 #endif
43 typedef struct DisasContext DisasContext;
44 struct DisasContext {
45 uint64_t pc;
46 int mem_idx;
47 #if !defined (CONFIG_USER_ONLY)
48 int pal_mode;
49 #endif
50 CPUAlphaState *env;
51 uint32_t amask;
54 /* global register indexes */
55 static TCGv_ptr cpu_env;
56 static TCGv cpu_ir[31];
57 static TCGv cpu_fir[31];
58 static TCGv cpu_pc;
59 static TCGv cpu_lock;
60 #ifdef CONFIG_USER_ONLY
61 static TCGv cpu_uniq;
62 #endif
64 /* register names */
65 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
67 #include "gen-icount.h"
69 static void alpha_translate_init(void)
71 int i;
72 char *p;
73 static int done_init = 0;
75 if (done_init)
76 return;
78 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
80 p = cpu_reg_names;
81 for (i = 0; i < 31; i++) {
82 sprintf(p, "ir%d", i);
83 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
84 offsetof(CPUState, ir[i]), p);
85 p += (i < 10) ? 4 : 5;
87 sprintf(p, "fir%d", i);
88 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
89 offsetof(CPUState, fir[i]), p);
90 p += (i < 10) ? 5 : 6;
93 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
94 offsetof(CPUState, pc), "pc");
96 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
97 offsetof(CPUState, lock), "lock");
99 #ifdef CONFIG_USER_ONLY
100 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
101 offsetof(CPUState, unique), "uniq");
102 #endif
104 /* register helpers */
105 #define GEN_HELPER 2
106 #include "helper.h"
108 done_init = 1;
111 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
113 TCGv_i32 tmp1, tmp2;
115 tcg_gen_movi_i64(cpu_pc, ctx->pc);
116 tmp1 = tcg_const_i32(exception);
117 tmp2 = tcg_const_i32(error_code);
118 gen_helper_excp(tmp1, tmp2);
119 tcg_temp_free_i32(tmp2);
120 tcg_temp_free_i32(tmp1);
123 static inline void gen_invalid(DisasContext *ctx)
125 gen_excp(ctx, EXCP_OPCDEC, 0);
128 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
130 TCGv tmp = tcg_temp_new();
131 TCGv_i32 tmp32 = tcg_temp_new_i32();
132 tcg_gen_qemu_ld32u(tmp, t1, flags);
133 tcg_gen_trunc_i64_i32(tmp32, tmp);
134 gen_helper_memory_to_f(t0, tmp32);
135 tcg_temp_free_i32(tmp32);
136 tcg_temp_free(tmp);
139 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
141 TCGv tmp = tcg_temp_new();
142 tcg_gen_qemu_ld64(tmp, t1, flags);
143 gen_helper_memory_to_g(t0, tmp);
144 tcg_temp_free(tmp);
147 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
149 TCGv tmp = tcg_temp_new();
150 TCGv_i32 tmp32 = tcg_temp_new_i32();
151 tcg_gen_qemu_ld32u(tmp, t1, flags);
152 tcg_gen_trunc_i64_i32(tmp32, tmp);
153 gen_helper_memory_to_s(t0, tmp32);
154 tcg_temp_free_i32(tmp32);
155 tcg_temp_free(tmp);
158 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
160 tcg_gen_mov_i64(cpu_lock, t1);
161 tcg_gen_qemu_ld32s(t0, t1, flags);
164 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
166 tcg_gen_mov_i64(cpu_lock, t1);
167 tcg_gen_qemu_ld64(t0, t1, flags);
170 static inline void gen_load_mem(DisasContext *ctx,
171 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
172 int flags),
173 int ra, int rb, int32_t disp16, int fp,
174 int clear)
176 TCGv addr;
178 if (unlikely(ra == 31))
179 return;
181 addr = tcg_temp_new();
182 if (rb != 31) {
183 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
184 if (clear)
185 tcg_gen_andi_i64(addr, addr, ~0x7);
186 } else {
187 if (clear)
188 disp16 &= ~0x7;
189 tcg_gen_movi_i64(addr, disp16);
191 if (fp)
192 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
193 else
194 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
195 tcg_temp_free(addr);
198 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
200 TCGv_i32 tmp32 = tcg_temp_new_i32();
201 TCGv tmp = tcg_temp_new();
202 gen_helper_f_to_memory(tmp32, t0);
203 tcg_gen_extu_i32_i64(tmp, tmp32);
204 tcg_gen_qemu_st32(tmp, t1, flags);
205 tcg_temp_free(tmp);
206 tcg_temp_free_i32(tmp32);
209 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
211 TCGv tmp = tcg_temp_new();
212 gen_helper_g_to_memory(tmp, t0);
213 tcg_gen_qemu_st64(tmp, t1, flags);
214 tcg_temp_free(tmp);
217 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
219 TCGv_i32 tmp32 = tcg_temp_new_i32();
220 TCGv tmp = tcg_temp_new();
221 gen_helper_s_to_memory(tmp32, t0);
222 tcg_gen_extu_i32_i64(tmp, tmp32);
223 tcg_gen_qemu_st32(tmp, t1, flags);
224 tcg_temp_free(tmp);
225 tcg_temp_free_i32(tmp32);
228 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
230 int l1, l2;
232 l1 = gen_new_label();
233 l2 = gen_new_label();
234 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
235 tcg_gen_qemu_st32(t0, t1, flags);
236 tcg_gen_movi_i64(t0, 1);
237 tcg_gen_br(l2);
238 gen_set_label(l1);
239 tcg_gen_movi_i64(t0, 0);
240 gen_set_label(l2);
241 tcg_gen_movi_i64(cpu_lock, -1);
244 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
246 int l1, l2;
248 l1 = gen_new_label();
249 l2 = gen_new_label();
250 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
251 tcg_gen_qemu_st64(t0, t1, flags);
252 tcg_gen_movi_i64(t0, 1);
253 tcg_gen_br(l2);
254 gen_set_label(l1);
255 tcg_gen_movi_i64(t0, 0);
256 gen_set_label(l2);
257 tcg_gen_movi_i64(cpu_lock, -1);
260 static inline void gen_store_mem(DisasContext *ctx,
261 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
262 int flags),
263 int ra, int rb, int32_t disp16, int fp,
264 int clear, int local)
266 TCGv addr;
267 if (local)
268 addr = tcg_temp_local_new();
269 else
270 addr = tcg_temp_new();
271 if (rb != 31) {
272 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
273 if (clear)
274 tcg_gen_andi_i64(addr, addr, ~0x7);
275 } else {
276 if (clear)
277 disp16 &= ~0x7;
278 tcg_gen_movi_i64(addr, disp16);
280 if (ra != 31) {
281 if (fp)
282 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
283 else
284 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
285 } else {
286 TCGv zero;
287 if (local)
288 zero = tcg_const_local_i64(0);
289 else
290 zero = tcg_const_i64(0);
291 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
292 tcg_temp_free(zero);
294 tcg_temp_free(addr);
297 static inline void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
298 int32_t disp, int mask)
300 int l1, l2;
302 l1 = gen_new_label();
303 l2 = gen_new_label();
304 if (likely(ra != 31)) {
305 if (mask) {
306 TCGv tmp = tcg_temp_new();
307 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
308 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
309 tcg_temp_free(tmp);
310 } else
311 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, l1);
312 } else {
313 /* Very uncommon case - Do not bother to optimize. */
314 TCGv tmp = tcg_const_i64(0);
315 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
316 tcg_temp_free(tmp);
318 tcg_gen_movi_i64(cpu_pc, ctx->pc);
319 tcg_gen_br(l2);
320 gen_set_label(l1);
321 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
322 gen_set_label(l2);
325 static inline void gen_fbcond(DisasContext *ctx, int opc, int ra, int32_t disp)
327 int l1, l2;
328 TCGv tmp;
329 TCGv src;
331 l1 = gen_new_label();
332 l2 = gen_new_label();
333 if (ra != 31) {
334 tmp = tcg_temp_new();
335 src = cpu_fir[ra];
336 } else {
337 tmp = tcg_const_i64(0);
338 src = tmp;
340 switch (opc) {
341 case 0x31: /* FBEQ */
342 gen_helper_cmpfeq(tmp, src);
343 break;
344 case 0x32: /* FBLT */
345 gen_helper_cmpflt(tmp, src);
346 break;
347 case 0x33: /* FBLE */
348 gen_helper_cmpfle(tmp, src);
349 break;
350 case 0x35: /* FBNE */
351 gen_helper_cmpfne(tmp, src);
352 break;
353 case 0x36: /* FBGE */
354 gen_helper_cmpfge(tmp, src);
355 break;
356 case 0x37: /* FBGT */
357 gen_helper_cmpfgt(tmp, src);
358 break;
359 default:
360 abort();
362 tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0, l1);
363 tcg_gen_movi_i64(cpu_pc, ctx->pc);
364 tcg_gen_br(l2);
365 gen_set_label(l1);
366 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
367 gen_set_label(l2);
370 static inline void gen_cmov(TCGCond inv_cond, int ra, int rb, int rc,
371 int islit, uint8_t lit, int mask)
373 int l1;
375 if (unlikely(rc == 31))
376 return;
378 l1 = gen_new_label();
380 if (ra != 31) {
381 if (mask) {
382 TCGv tmp = tcg_temp_new();
383 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
384 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
385 tcg_temp_free(tmp);
386 } else
387 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
388 } else {
389 /* Very uncommon case - Do not bother to optimize. */
390 TCGv tmp = tcg_const_i64(0);
391 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
392 tcg_temp_free(tmp);
395 if (islit)
396 tcg_gen_movi_i64(cpu_ir[rc], lit);
397 else
398 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
399 gen_set_label(l1);
402 #define FARITH2(name) \
403 static inline void glue(gen_f, name)(int rb, int rc) \
405 if (unlikely(rc == 31)) \
406 return; \
408 if (rb != 31) \
409 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
410 else { \
411 TCGv tmp = tcg_const_i64(0); \
412 gen_helper_ ## name (cpu_fir[rc], tmp); \
413 tcg_temp_free(tmp); \
416 FARITH2(sqrts)
417 FARITH2(sqrtf)
418 FARITH2(sqrtg)
419 FARITH2(sqrtt)
420 FARITH2(cvtgf)
421 FARITH2(cvtgq)
422 FARITH2(cvtqf)
423 FARITH2(cvtqg)
424 FARITH2(cvtst)
425 FARITH2(cvtts)
426 FARITH2(cvttq)
427 FARITH2(cvtqs)
428 FARITH2(cvtqt)
429 FARITH2(cvtlq)
430 FARITH2(cvtql)
431 FARITH2(cvtqlv)
432 FARITH2(cvtqlsv)
434 #define FARITH3(name) \
435 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
437 if (unlikely(rc == 31)) \
438 return; \
440 if (ra != 31) { \
441 if (rb != 31) \
442 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
443 else { \
444 TCGv tmp = tcg_const_i64(0); \
445 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
446 tcg_temp_free(tmp); \
448 } else { \
449 TCGv tmp = tcg_const_i64(0); \
450 if (rb != 31) \
451 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
452 else \
453 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
454 tcg_temp_free(tmp); \
458 FARITH3(addf)
459 FARITH3(subf)
460 FARITH3(mulf)
461 FARITH3(divf)
462 FARITH3(addg)
463 FARITH3(subg)
464 FARITH3(mulg)
465 FARITH3(divg)
466 FARITH3(cmpgeq)
467 FARITH3(cmpglt)
468 FARITH3(cmpgle)
469 FARITH3(adds)
470 FARITH3(subs)
471 FARITH3(muls)
472 FARITH3(divs)
473 FARITH3(addt)
474 FARITH3(subt)
475 FARITH3(mult)
476 FARITH3(divt)
477 FARITH3(cmptun)
478 FARITH3(cmpteq)
479 FARITH3(cmptlt)
480 FARITH3(cmptle)
481 FARITH3(cpys)
482 FARITH3(cpysn)
483 FARITH3(cpyse)
485 #define FCMOV(name) \
486 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
488 int l1; \
489 TCGv tmp; \
491 if (unlikely(rc == 31)) \
492 return; \
494 l1 = gen_new_label(); \
495 tmp = tcg_temp_new(); \
496 if (ra != 31) { \
497 tmp = tcg_temp_new(); \
498 gen_helper_ ## name (tmp, cpu_fir[ra]); \
499 } else { \
500 tmp = tcg_const_i64(0); \
501 gen_helper_ ## name (tmp, tmp); \
503 tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \
504 if (rb != 31) \
505 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \
506 else \
507 tcg_gen_movi_i64(cpu_fir[rc], 0); \
508 gen_set_label(l1); \
510 FCMOV(cmpfeq)
511 FCMOV(cmpfne)
512 FCMOV(cmpflt)
513 FCMOV(cmpfge)
514 FCMOV(cmpfle)
515 FCMOV(cmpfgt)
517 static inline uint64_t zapnot_mask(uint8_t lit)
519 uint64_t mask = 0;
520 int i;
522 for (i = 0; i < 8; ++i) {
523 if ((lit >> i) & 1)
524 mask |= 0xffull << (i * 8);
526 return mask;
529 /* Implement zapnot with an immediate operand, which expands to some
530 form of immediate AND. This is a basic building block in the
531 definition of many of the other byte manipulation instructions. */
532 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
534 switch (lit) {
535 case 0x00:
536 tcg_gen_movi_i64(dest, 0);
537 break;
538 case 0x01:
539 tcg_gen_ext8u_i64(dest, src);
540 break;
541 case 0x03:
542 tcg_gen_ext16u_i64(dest, src);
543 break;
544 case 0x0f:
545 tcg_gen_ext32u_i64(dest, src);
546 break;
547 case 0xff:
548 tcg_gen_mov_i64(dest, src);
549 break;
550 default:
551 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
552 break;
556 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
558 if (unlikely(rc == 31))
559 return;
560 else if (unlikely(ra == 31))
561 tcg_gen_movi_i64(cpu_ir[rc], 0);
562 else if (islit)
563 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
564 else
565 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
568 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
570 if (unlikely(rc == 31))
571 return;
572 else if (unlikely(ra == 31))
573 tcg_gen_movi_i64(cpu_ir[rc], 0);
574 else if (islit)
575 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
576 else
577 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
581 /* EXTWH, EXTLH, EXTQH */
582 static void gen_ext_h(int ra, int rb, int rc, int islit,
583 uint8_t lit, uint8_t byte_mask)
585 if (unlikely(rc == 31))
586 return;
587 else if (unlikely(ra == 31))
588 tcg_gen_movi_i64(cpu_ir[rc], 0);
589 else {
590 if (islit) {
591 lit = (64 - (lit & 7) * 8) & 0x3f;
592 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
593 } else {
594 TCGv tmp1 = tcg_temp_new();
595 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
596 tcg_gen_shli_i64(tmp1, tmp1, 3);
597 tcg_gen_neg_i64(tmp1, tmp1);
598 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
599 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
600 tcg_temp_free(tmp1);
602 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
606 /* EXTBL, EXTWL, EXTLL, EXTQL */
607 static void gen_ext_l(int ra, int rb, int rc, int islit,
608 uint8_t lit, uint8_t byte_mask)
610 if (unlikely(rc == 31))
611 return;
612 else if (unlikely(ra == 31))
613 tcg_gen_movi_i64(cpu_ir[rc], 0);
614 else {
615 if (islit) {
616 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
617 } else {
618 TCGv tmp = tcg_temp_new();
619 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
620 tcg_gen_shli_i64(tmp, tmp, 3);
621 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
622 tcg_temp_free(tmp);
624 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
628 /* INSBL, INSWL, INSLL, INSQL */
629 static void gen_ins_l(int ra, int rb, int rc, int islit,
630 uint8_t lit, uint8_t byte_mask)
632 if (unlikely(rc == 31))
633 return;
634 else if (unlikely(ra == 31))
635 tcg_gen_movi_i64(cpu_ir[rc], 0);
636 else {
637 TCGv tmp = tcg_temp_new();
639 /* The instruction description has us left-shift the byte mask
640 the same number of byte slots as the data and apply the zap
641 at the end. This is equivalent to simply performing the zap
642 first and shifting afterward. */
643 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
645 if (islit) {
646 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
647 } else {
648 TCGv shift = tcg_temp_new();
649 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
650 tcg_gen_shli_i64(shift, shift, 3);
651 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
652 tcg_temp_free(shift);
654 tcg_temp_free(tmp);
658 /* MSKWH, MSKLH, MSKQH */
659 static void gen_msk_h(int ra, int rb, int rc, int islit,
660 uint8_t lit, uint8_t byte_mask)
662 if (unlikely(rc == 31))
663 return;
664 else if (unlikely(ra == 31))
665 tcg_gen_movi_i64(cpu_ir[rc], 0);
666 else if (islit) {
667 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
668 } else {
669 TCGv shift = tcg_temp_new();
670 TCGv mask = tcg_temp_new();
672 /* The instruction description is as above, where the byte_mask
673 is shifted left, and then we extract bits <15:8>. This can be
674 emulated with a right-shift on the expanded byte mask. This
675 requires extra care because for an input <2:0> == 0 we need a
676 shift of 64 bits in order to generate a zero. This is done by
677 splitting the shift into two parts, the variable shift - 1
678 followed by a constant 1 shift. The code we expand below is
679 equivalent to ~((B & 7) * 8) & 63. */
681 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
682 tcg_gen_shli_i64(shift, shift, 3);
683 tcg_gen_not_i64(shift, shift);
684 tcg_gen_andi_i64(shift, shift, 0x3f);
685 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
686 tcg_gen_shr_i64(mask, mask, shift);
687 tcg_gen_shri_i64(mask, mask, 1);
689 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
691 tcg_temp_free(mask);
692 tcg_temp_free(shift);
696 /* MSKBL, MSKWL, MSKLL, MSKQL */
697 static void gen_msk_l(int ra, int rb, int rc, int islit,
698 uint8_t lit, uint8_t byte_mask)
700 if (unlikely(rc == 31))
701 return;
702 else if (unlikely(ra == 31))
703 tcg_gen_movi_i64(cpu_ir[rc], 0);
704 else if (islit) {
705 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
706 } else {
707 TCGv shift = tcg_temp_new();
708 TCGv mask = tcg_temp_new();
710 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
711 tcg_gen_shli_i64(shift, shift, 3);
712 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
713 tcg_gen_shl_i64(mask, mask, shift);
715 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
717 tcg_temp_free(mask);
718 tcg_temp_free(shift);
722 /* Code to call arith3 helpers */
723 #define ARITH3(name) \
724 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
725 uint8_t lit) \
727 if (unlikely(rc == 31)) \
728 return; \
730 if (ra != 31) { \
731 if (islit) { \
732 TCGv tmp = tcg_const_i64(lit); \
733 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
734 tcg_temp_free(tmp); \
735 } else \
736 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
737 } else { \
738 TCGv tmp1 = tcg_const_i64(0); \
739 if (islit) { \
740 TCGv tmp2 = tcg_const_i64(lit); \
741 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
742 tcg_temp_free(tmp2); \
743 } else \
744 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
745 tcg_temp_free(tmp1); \
748 ARITH3(cmpbge)
749 ARITH3(addlv)
750 ARITH3(sublv)
751 ARITH3(addqv)
752 ARITH3(subqv)
753 ARITH3(inswh)
754 ARITH3(inslh)
755 ARITH3(insqh)
756 ARITH3(umulh)
757 ARITH3(mullv)
758 ARITH3(mulqv)
759 ARITH3(minub8)
760 ARITH3(minsb8)
761 ARITH3(minuw4)
762 ARITH3(minsw4)
763 ARITH3(maxub8)
764 ARITH3(maxsb8)
765 ARITH3(maxuw4)
766 ARITH3(maxsw4)
767 ARITH3(perr)
769 #define MVIOP2(name) \
770 static inline void glue(gen_, name)(int rb, int rc) \
772 if (unlikely(rc == 31)) \
773 return; \
774 if (unlikely(rb == 31)) \
775 tcg_gen_movi_i64(cpu_ir[rc], 0); \
776 else \
777 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
779 MVIOP2(pklb)
780 MVIOP2(pkwb)
781 MVIOP2(unpkbl)
782 MVIOP2(unpkbw)
784 static inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, int islit,
785 uint8_t lit)
787 int l1, l2;
788 TCGv tmp;
790 if (unlikely(rc == 31))
791 return;
793 l1 = gen_new_label();
794 l2 = gen_new_label();
796 if (ra != 31) {
797 tmp = tcg_temp_new();
798 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
799 } else
800 tmp = tcg_const_i64(0);
801 if (islit)
802 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
803 else
804 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
806 tcg_gen_movi_i64(cpu_ir[rc], 0);
807 tcg_gen_br(l2);
808 gen_set_label(l1);
809 tcg_gen_movi_i64(cpu_ir[rc], 1);
810 gen_set_label(l2);
813 static inline int translate_one(DisasContext *ctx, uint32_t insn)
815 uint32_t palcode;
816 int32_t disp21, disp16, disp12;
817 uint16_t fn11, fn16;
818 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
819 uint8_t lit;
820 int ret;
822 /* Decode all instruction fields */
823 opc = insn >> 26;
824 ra = (insn >> 21) & 0x1F;
825 rb = (insn >> 16) & 0x1F;
826 rc = insn & 0x1F;
827 sbz = (insn >> 13) & 0x07;
828 real_islit = islit = (insn >> 12) & 1;
829 if (rb == 31 && !islit) {
830 islit = 1;
831 lit = 0;
832 } else
833 lit = (insn >> 13) & 0xFF;
834 palcode = insn & 0x03FFFFFF;
835 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
836 disp16 = (int16_t)(insn & 0x0000FFFF);
837 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
838 fn16 = insn & 0x0000FFFF;
839 fn11 = (insn >> 5) & 0x000007FF;
840 fpfn = fn11 & 0x3F;
841 fn7 = (insn >> 5) & 0x0000007F;
842 fn2 = (insn >> 5) & 0x00000003;
843 ret = 0;
844 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
845 opc, ra, rb, rc, disp16);
847 switch (opc) {
848 case 0x00:
849 /* CALL_PAL */
850 #ifdef CONFIG_USER_ONLY
851 if (palcode == 0x9E) {
852 /* RDUNIQUE */
853 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
854 break;
855 } else if (palcode == 0x9F) {
856 /* WRUNIQUE */
857 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
858 break;
860 #endif
861 if (palcode >= 0x80 && palcode < 0xC0) {
862 /* Unprivileged PAL call */
863 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
864 ret = 3;
865 break;
867 #ifndef CONFIG_USER_ONLY
868 if (palcode < 0x40) {
869 /* Privileged PAL code */
870 if (ctx->mem_idx & 1)
871 goto invalid_opc;
872 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
873 ret = 3;
875 #endif
876 /* Invalid PAL call */
877 goto invalid_opc;
878 case 0x01:
879 /* OPC01 */
880 goto invalid_opc;
881 case 0x02:
882 /* OPC02 */
883 goto invalid_opc;
884 case 0x03:
885 /* OPC03 */
886 goto invalid_opc;
887 case 0x04:
888 /* OPC04 */
889 goto invalid_opc;
890 case 0x05:
891 /* OPC05 */
892 goto invalid_opc;
893 case 0x06:
894 /* OPC06 */
895 goto invalid_opc;
896 case 0x07:
897 /* OPC07 */
898 goto invalid_opc;
899 case 0x08:
900 /* LDA */
901 if (likely(ra != 31)) {
902 if (rb != 31)
903 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
904 else
905 tcg_gen_movi_i64(cpu_ir[ra], disp16);
907 break;
908 case 0x09:
909 /* LDAH */
910 if (likely(ra != 31)) {
911 if (rb != 31)
912 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
913 else
914 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
916 break;
917 case 0x0A:
918 /* LDBU */
919 if (!(ctx->amask & AMASK_BWX))
920 goto invalid_opc;
921 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
922 break;
923 case 0x0B:
924 /* LDQ_U */
925 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
926 break;
927 case 0x0C:
928 /* LDWU */
929 if (!(ctx->amask & AMASK_BWX))
930 goto invalid_opc;
931 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
932 break;
933 case 0x0D:
934 /* STW */
935 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
936 break;
937 case 0x0E:
938 /* STB */
939 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
940 break;
941 case 0x0F:
942 /* STQ_U */
943 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
944 break;
945 case 0x10:
946 switch (fn7) {
947 case 0x00:
948 /* ADDL */
949 if (likely(rc != 31)) {
950 if (ra != 31) {
951 if (islit) {
952 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
953 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
954 } else {
955 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
956 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
958 } else {
959 if (islit)
960 tcg_gen_movi_i64(cpu_ir[rc], lit);
961 else
962 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
965 break;
966 case 0x02:
967 /* S4ADDL */
968 if (likely(rc != 31)) {
969 if (ra != 31) {
970 TCGv tmp = tcg_temp_new();
971 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
972 if (islit)
973 tcg_gen_addi_i64(tmp, tmp, lit);
974 else
975 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
976 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
977 tcg_temp_free(tmp);
978 } else {
979 if (islit)
980 tcg_gen_movi_i64(cpu_ir[rc], lit);
981 else
982 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
985 break;
986 case 0x09:
987 /* SUBL */
988 if (likely(rc != 31)) {
989 if (ra != 31) {
990 if (islit)
991 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
992 else
993 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
994 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
995 } else {
996 if (islit)
997 tcg_gen_movi_i64(cpu_ir[rc], -lit);
998 else {
999 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1000 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1003 break;
1004 case 0x0B:
1005 /* S4SUBL */
1006 if (likely(rc != 31)) {
1007 if (ra != 31) {
1008 TCGv tmp = tcg_temp_new();
1009 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1010 if (islit)
1011 tcg_gen_subi_i64(tmp, tmp, lit);
1012 else
1013 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1014 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1015 tcg_temp_free(tmp);
1016 } else {
1017 if (islit)
1018 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1019 else {
1020 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1021 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1025 break;
1026 case 0x0F:
1027 /* CMPBGE */
1028 gen_cmpbge(ra, rb, rc, islit, lit);
1029 break;
1030 case 0x12:
1031 /* S8ADDL */
1032 if (likely(rc != 31)) {
1033 if (ra != 31) {
1034 TCGv tmp = tcg_temp_new();
1035 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1036 if (islit)
1037 tcg_gen_addi_i64(tmp, tmp, lit);
1038 else
1039 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1040 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1041 tcg_temp_free(tmp);
1042 } else {
1043 if (islit)
1044 tcg_gen_movi_i64(cpu_ir[rc], lit);
1045 else
1046 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1049 break;
1050 case 0x1B:
1051 /* S8SUBL */
1052 if (likely(rc != 31)) {
1053 if (ra != 31) {
1054 TCGv tmp = tcg_temp_new();
1055 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1056 if (islit)
1057 tcg_gen_subi_i64(tmp, tmp, lit);
1058 else
1059 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1060 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1061 tcg_temp_free(tmp);
1062 } else {
1063 if (islit)
1064 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1065 else
1066 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1067 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1071 break;
1072 case 0x1D:
1073 /* CMPULT */
1074 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1075 break;
1076 case 0x20:
1077 /* ADDQ */
1078 if (likely(rc != 31)) {
1079 if (ra != 31) {
1080 if (islit)
1081 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1082 else
1083 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1084 } else {
1085 if (islit)
1086 tcg_gen_movi_i64(cpu_ir[rc], lit);
1087 else
1088 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1091 break;
1092 case 0x22:
1093 /* S4ADDQ */
1094 if (likely(rc != 31)) {
1095 if (ra != 31) {
1096 TCGv tmp = tcg_temp_new();
1097 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1098 if (islit)
1099 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1100 else
1101 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1102 tcg_temp_free(tmp);
1103 } else {
1104 if (islit)
1105 tcg_gen_movi_i64(cpu_ir[rc], lit);
1106 else
1107 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1110 break;
1111 case 0x29:
1112 /* SUBQ */
1113 if (likely(rc != 31)) {
1114 if (ra != 31) {
1115 if (islit)
1116 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1117 else
1118 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1119 } else {
1120 if (islit)
1121 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1122 else
1123 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1126 break;
1127 case 0x2B:
1128 /* S4SUBQ */
1129 if (likely(rc != 31)) {
1130 if (ra != 31) {
1131 TCGv tmp = tcg_temp_new();
1132 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1133 if (islit)
1134 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1135 else
1136 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1137 tcg_temp_free(tmp);
1138 } else {
1139 if (islit)
1140 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1141 else
1142 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1145 break;
1146 case 0x2D:
1147 /* CMPEQ */
1148 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1149 break;
1150 case 0x32:
1151 /* S8ADDQ */
1152 if (likely(rc != 31)) {
1153 if (ra != 31) {
1154 TCGv tmp = tcg_temp_new();
1155 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1156 if (islit)
1157 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1158 else
1159 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1160 tcg_temp_free(tmp);
1161 } else {
1162 if (islit)
1163 tcg_gen_movi_i64(cpu_ir[rc], lit);
1164 else
1165 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1168 break;
1169 case 0x3B:
1170 /* S8SUBQ */
1171 if (likely(rc != 31)) {
1172 if (ra != 31) {
1173 TCGv tmp = tcg_temp_new();
1174 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1175 if (islit)
1176 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1177 else
1178 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1179 tcg_temp_free(tmp);
1180 } else {
1181 if (islit)
1182 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1183 else
1184 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1187 break;
1188 case 0x3D:
1189 /* CMPULE */
1190 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1191 break;
1192 case 0x40:
1193 /* ADDL/V */
1194 gen_addlv(ra, rb, rc, islit, lit);
1195 break;
1196 case 0x49:
1197 /* SUBL/V */
1198 gen_sublv(ra, rb, rc, islit, lit);
1199 break;
1200 case 0x4D:
1201 /* CMPLT */
1202 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1203 break;
1204 case 0x60:
1205 /* ADDQ/V */
1206 gen_addqv(ra, rb, rc, islit, lit);
1207 break;
1208 case 0x69:
1209 /* SUBQ/V */
1210 gen_subqv(ra, rb, rc, islit, lit);
1211 break;
1212 case 0x6D:
1213 /* CMPLE */
1214 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1215 break;
1216 default:
1217 goto invalid_opc;
1219 break;
1220 case 0x11:
1221 switch (fn7) {
1222 case 0x00:
1223 /* AND */
1224 if (likely(rc != 31)) {
1225 if (ra == 31)
1226 tcg_gen_movi_i64(cpu_ir[rc], 0);
1227 else if (islit)
1228 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1229 else
1230 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1232 break;
1233 case 0x08:
1234 /* BIC */
1235 if (likely(rc != 31)) {
1236 if (ra != 31) {
1237 if (islit)
1238 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1239 else
1240 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1241 } else
1242 tcg_gen_movi_i64(cpu_ir[rc], 0);
1244 break;
1245 case 0x14:
1246 /* CMOVLBS */
1247 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1248 break;
1249 case 0x16:
1250 /* CMOVLBC */
1251 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1252 break;
1253 case 0x20:
1254 /* BIS */
1255 if (likely(rc != 31)) {
1256 if (ra != 31) {
1257 if (islit)
1258 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1259 else
1260 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1261 } else {
1262 if (islit)
1263 tcg_gen_movi_i64(cpu_ir[rc], lit);
1264 else
1265 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1268 break;
1269 case 0x24:
1270 /* CMOVEQ */
1271 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1272 break;
1273 case 0x26:
1274 /* CMOVNE */
1275 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1276 break;
1277 case 0x28:
1278 /* ORNOT */
1279 if (likely(rc != 31)) {
1280 if (ra != 31) {
1281 if (islit)
1282 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1283 else
1284 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1285 } else {
1286 if (islit)
1287 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1288 else
1289 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1292 break;
1293 case 0x40:
1294 /* XOR */
1295 if (likely(rc != 31)) {
1296 if (ra != 31) {
1297 if (islit)
1298 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1299 else
1300 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1301 } else {
1302 if (islit)
1303 tcg_gen_movi_i64(cpu_ir[rc], lit);
1304 else
1305 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1308 break;
1309 case 0x44:
1310 /* CMOVLT */
1311 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1312 break;
1313 case 0x46:
1314 /* CMOVGE */
1315 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1316 break;
1317 case 0x48:
1318 /* EQV */
1319 if (likely(rc != 31)) {
1320 if (ra != 31) {
1321 if (islit)
1322 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1323 else
1324 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1325 } else {
1326 if (islit)
1327 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1328 else
1329 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1332 break;
1333 case 0x61:
1334 /* AMASK */
1335 if (likely(rc != 31)) {
1336 if (islit)
1337 tcg_gen_movi_i64(cpu_ir[rc], lit);
1338 else
1339 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1340 switch (ctx->env->implver) {
1341 case IMPLVER_2106x:
1342 /* EV4, EV45, LCA, LCA45 & EV5 */
1343 break;
1344 case IMPLVER_21164:
1345 case IMPLVER_21264:
1346 case IMPLVER_21364:
1347 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1348 ~(uint64_t)ctx->amask);
1349 break;
1352 break;
1353 case 0x64:
1354 /* CMOVLE */
1355 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1356 break;
1357 case 0x66:
1358 /* CMOVGT */
1359 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1360 break;
1361 case 0x6C:
1362 /* IMPLVER */
1363 if (rc != 31)
1364 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1365 break;
1366 default:
1367 goto invalid_opc;
1369 break;
1370 case 0x12:
1371 switch (fn7) {
1372 case 0x02:
1373 /* MSKBL */
1374 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
1375 break;
1376 case 0x06:
1377 /* EXTBL */
1378 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1379 break;
1380 case 0x0B:
1381 /* INSBL */
1382 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
1383 break;
1384 case 0x12:
1385 /* MSKWL */
1386 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
1387 break;
1388 case 0x16:
1389 /* EXTWL */
1390 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1391 break;
1392 case 0x1B:
1393 /* INSWL */
1394 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
1395 break;
1396 case 0x22:
1397 /* MSKLL */
1398 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
1399 break;
1400 case 0x26:
1401 /* EXTLL */
1402 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
1403 break;
1404 case 0x2B:
1405 /* INSLL */
1406 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
1407 break;
1408 case 0x30:
1409 /* ZAP */
1410 gen_zap(ra, rb, rc, islit, lit);
1411 break;
1412 case 0x31:
1413 /* ZAPNOT */
1414 gen_zapnot(ra, rb, rc, islit, lit);
1415 break;
1416 case 0x32:
1417 /* MSKQL */
1418 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
1419 break;
1420 case 0x34:
1421 /* SRL */
1422 if (likely(rc != 31)) {
1423 if (ra != 31) {
1424 if (islit)
1425 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1426 else {
1427 TCGv shift = tcg_temp_new();
1428 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1429 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1430 tcg_temp_free(shift);
1432 } else
1433 tcg_gen_movi_i64(cpu_ir[rc], 0);
1435 break;
1436 case 0x36:
1437 /* EXTQL */
1438 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
1439 break;
1440 case 0x39:
1441 /* SLL */
1442 if (likely(rc != 31)) {
1443 if (ra != 31) {
1444 if (islit)
1445 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1446 else {
1447 TCGv shift = tcg_temp_new();
1448 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1449 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1450 tcg_temp_free(shift);
1452 } else
1453 tcg_gen_movi_i64(cpu_ir[rc], 0);
1455 break;
1456 case 0x3B:
1457 /* INSQL */
1458 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
1459 break;
1460 case 0x3C:
1461 /* SRA */
1462 if (likely(rc != 31)) {
1463 if (ra != 31) {
1464 if (islit)
1465 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1466 else {
1467 TCGv shift = tcg_temp_new();
1468 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1469 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1470 tcg_temp_free(shift);
1472 } else
1473 tcg_gen_movi_i64(cpu_ir[rc], 0);
1475 break;
1476 case 0x52:
1477 /* MSKWH */
1478 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
1479 break;
1480 case 0x57:
1481 /* INSWH */
1482 gen_inswh(ra, rb, rc, islit, lit);
1483 break;
1484 case 0x5A:
1485 /* EXTWH */
1486 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
1487 break;
1488 case 0x62:
1489 /* MSKLH */
1490 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
1491 break;
1492 case 0x67:
1493 /* INSLH */
1494 gen_inslh(ra, rb, rc, islit, lit);
1495 break;
1496 case 0x6A:
1497 /* EXTLH */
1498 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
1499 break;
1500 case 0x72:
1501 /* MSKQH */
1502 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
1503 break;
1504 case 0x77:
1505 /* INSQH */
1506 gen_insqh(ra, rb, rc, islit, lit);
1507 break;
1508 case 0x7A:
1509 /* EXTQH */
1510 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
1511 break;
1512 default:
1513 goto invalid_opc;
1515 break;
1516 case 0x13:
1517 switch (fn7) {
1518 case 0x00:
1519 /* MULL */
1520 if (likely(rc != 31)) {
1521 if (ra == 31)
1522 tcg_gen_movi_i64(cpu_ir[rc], 0);
1523 else {
1524 if (islit)
1525 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1526 else
1527 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1528 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1531 break;
1532 case 0x20:
1533 /* MULQ */
1534 if (likely(rc != 31)) {
1535 if (ra == 31)
1536 tcg_gen_movi_i64(cpu_ir[rc], 0);
1537 else if (islit)
1538 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1539 else
1540 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1542 break;
1543 case 0x30:
1544 /* UMULH */
1545 gen_umulh(ra, rb, rc, islit, lit);
1546 break;
1547 case 0x40:
1548 /* MULL/V */
1549 gen_mullv(ra, rb, rc, islit, lit);
1550 break;
1551 case 0x60:
1552 /* MULQ/V */
1553 gen_mulqv(ra, rb, rc, islit, lit);
1554 break;
1555 default:
1556 goto invalid_opc;
1558 break;
1559 case 0x14:
1560 switch (fpfn) { /* f11 & 0x3F */
1561 case 0x04:
1562 /* ITOFS */
1563 if (!(ctx->amask & AMASK_FIX))
1564 goto invalid_opc;
1565 if (likely(rc != 31)) {
1566 if (ra != 31) {
1567 TCGv_i32 tmp = tcg_temp_new_i32();
1568 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1569 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1570 tcg_temp_free_i32(tmp);
1571 } else
1572 tcg_gen_movi_i64(cpu_fir[rc], 0);
1574 break;
1575 case 0x0A:
1576 /* SQRTF */
1577 if (!(ctx->amask & AMASK_FIX))
1578 goto invalid_opc;
1579 gen_fsqrtf(rb, rc);
1580 break;
1581 case 0x0B:
1582 /* SQRTS */
1583 if (!(ctx->amask & AMASK_FIX))
1584 goto invalid_opc;
1585 gen_fsqrts(rb, rc);
1586 break;
1587 case 0x14:
1588 /* ITOFF */
1589 if (!(ctx->amask & AMASK_FIX))
1590 goto invalid_opc;
1591 if (likely(rc != 31)) {
1592 if (ra != 31) {
1593 TCGv_i32 tmp = tcg_temp_new_i32();
1594 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1595 gen_helper_memory_to_f(cpu_fir[rc], tmp);
1596 tcg_temp_free_i32(tmp);
1597 } else
1598 tcg_gen_movi_i64(cpu_fir[rc], 0);
1600 break;
1601 case 0x24:
1602 /* ITOFT */
1603 if (!(ctx->amask & AMASK_FIX))
1604 goto invalid_opc;
1605 if (likely(rc != 31)) {
1606 if (ra != 31)
1607 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
1608 else
1609 tcg_gen_movi_i64(cpu_fir[rc], 0);
1611 break;
1612 case 0x2A:
1613 /* SQRTG */
1614 if (!(ctx->amask & AMASK_FIX))
1615 goto invalid_opc;
1616 gen_fsqrtg(rb, rc);
1617 break;
1618 case 0x02B:
1619 /* SQRTT */
1620 if (!(ctx->amask & AMASK_FIX))
1621 goto invalid_opc;
1622 gen_fsqrtt(rb, rc);
1623 break;
1624 default:
1625 goto invalid_opc;
1627 break;
1628 case 0x15:
1629 /* VAX floating point */
1630 /* XXX: rounding mode and trap are ignored (!) */
1631 switch (fpfn) { /* f11 & 0x3F */
1632 case 0x00:
1633 /* ADDF */
1634 gen_faddf(ra, rb, rc);
1635 break;
1636 case 0x01:
1637 /* SUBF */
1638 gen_fsubf(ra, rb, rc);
1639 break;
1640 case 0x02:
1641 /* MULF */
1642 gen_fmulf(ra, rb, rc);
1643 break;
1644 case 0x03:
1645 /* DIVF */
1646 gen_fdivf(ra, rb, rc);
1647 break;
1648 case 0x1E:
1649 /* CVTDG */
1650 #if 0 // TODO
1651 gen_fcvtdg(rb, rc);
1652 #else
1653 goto invalid_opc;
1654 #endif
1655 break;
1656 case 0x20:
1657 /* ADDG */
1658 gen_faddg(ra, rb, rc);
1659 break;
1660 case 0x21:
1661 /* SUBG */
1662 gen_fsubg(ra, rb, rc);
1663 break;
1664 case 0x22:
1665 /* MULG */
1666 gen_fmulg(ra, rb, rc);
1667 break;
1668 case 0x23:
1669 /* DIVG */
1670 gen_fdivg(ra, rb, rc);
1671 break;
1672 case 0x25:
1673 /* CMPGEQ */
1674 gen_fcmpgeq(ra, rb, rc);
1675 break;
1676 case 0x26:
1677 /* CMPGLT */
1678 gen_fcmpglt(ra, rb, rc);
1679 break;
1680 case 0x27:
1681 /* CMPGLE */
1682 gen_fcmpgle(ra, rb, rc);
1683 break;
1684 case 0x2C:
1685 /* CVTGF */
1686 gen_fcvtgf(rb, rc);
1687 break;
1688 case 0x2D:
1689 /* CVTGD */
1690 #if 0 // TODO
1691 gen_fcvtgd(rb, rc);
1692 #else
1693 goto invalid_opc;
1694 #endif
1695 break;
1696 case 0x2F:
1697 /* CVTGQ */
1698 gen_fcvtgq(rb, rc);
1699 break;
1700 case 0x3C:
1701 /* CVTQF */
1702 gen_fcvtqf(rb, rc);
1703 break;
1704 case 0x3E:
1705 /* CVTQG */
1706 gen_fcvtqg(rb, rc);
1707 break;
1708 default:
1709 goto invalid_opc;
1711 break;
1712 case 0x16:
1713 /* IEEE floating-point */
1714 /* XXX: rounding mode and traps are ignored (!) */
1715 switch (fpfn) { /* f11 & 0x3F */
1716 case 0x00:
1717 /* ADDS */
1718 gen_fadds(ra, rb, rc);
1719 break;
1720 case 0x01:
1721 /* SUBS */
1722 gen_fsubs(ra, rb, rc);
1723 break;
1724 case 0x02:
1725 /* MULS */
1726 gen_fmuls(ra, rb, rc);
1727 break;
1728 case 0x03:
1729 /* DIVS */
1730 gen_fdivs(ra, rb, rc);
1731 break;
1732 case 0x20:
1733 /* ADDT */
1734 gen_faddt(ra, rb, rc);
1735 break;
1736 case 0x21:
1737 /* SUBT */
1738 gen_fsubt(ra, rb, rc);
1739 break;
1740 case 0x22:
1741 /* MULT */
1742 gen_fmult(ra, rb, rc);
1743 break;
1744 case 0x23:
1745 /* DIVT */
1746 gen_fdivt(ra, rb, rc);
1747 break;
1748 case 0x24:
1749 /* CMPTUN */
1750 gen_fcmptun(ra, rb, rc);
1751 break;
1752 case 0x25:
1753 /* CMPTEQ */
1754 gen_fcmpteq(ra, rb, rc);
1755 break;
1756 case 0x26:
1757 /* CMPTLT */
1758 gen_fcmptlt(ra, rb, rc);
1759 break;
1760 case 0x27:
1761 /* CMPTLE */
1762 gen_fcmptle(ra, rb, rc);
1763 break;
1764 case 0x2C:
1765 /* XXX: incorrect */
1766 if (fn11 == 0x2AC || fn11 == 0x6AC) {
1767 /* CVTST */
1768 gen_fcvtst(rb, rc);
1769 } else {
1770 /* CVTTS */
1771 gen_fcvtts(rb, rc);
1773 break;
1774 case 0x2F:
1775 /* CVTTQ */
1776 gen_fcvttq(rb, rc);
1777 break;
1778 case 0x3C:
1779 /* CVTQS */
1780 gen_fcvtqs(rb, rc);
1781 break;
1782 case 0x3E:
1783 /* CVTQT */
1784 gen_fcvtqt(rb, rc);
1785 break;
1786 default:
1787 goto invalid_opc;
1789 break;
1790 case 0x17:
1791 switch (fn11) {
1792 case 0x010:
1793 /* CVTLQ */
1794 gen_fcvtlq(rb, rc);
1795 break;
1796 case 0x020:
1797 if (likely(rc != 31)) {
1798 if (ra == rb)
1799 /* FMOV */
1800 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
1801 else
1802 /* CPYS */
1803 gen_fcpys(ra, rb, rc);
1805 break;
1806 case 0x021:
1807 /* CPYSN */
1808 gen_fcpysn(ra, rb, rc);
1809 break;
1810 case 0x022:
1811 /* CPYSE */
1812 gen_fcpyse(ra, rb, rc);
1813 break;
1814 case 0x024:
1815 /* MT_FPCR */
1816 if (likely(ra != 31))
1817 gen_helper_store_fpcr(cpu_fir[ra]);
1818 else {
1819 TCGv tmp = tcg_const_i64(0);
1820 gen_helper_store_fpcr(tmp);
1821 tcg_temp_free(tmp);
1823 break;
1824 case 0x025:
1825 /* MF_FPCR */
1826 if (likely(ra != 31))
1827 gen_helper_load_fpcr(cpu_fir[ra]);
1828 break;
1829 case 0x02A:
1830 /* FCMOVEQ */
1831 gen_fcmpfeq(ra, rb, rc);
1832 break;
1833 case 0x02B:
1834 /* FCMOVNE */
1835 gen_fcmpfne(ra, rb, rc);
1836 break;
1837 case 0x02C:
1838 /* FCMOVLT */
1839 gen_fcmpflt(ra, rb, rc);
1840 break;
1841 case 0x02D:
1842 /* FCMOVGE */
1843 gen_fcmpfge(ra, rb, rc);
1844 break;
1845 case 0x02E:
1846 /* FCMOVLE */
1847 gen_fcmpfle(ra, rb, rc);
1848 break;
1849 case 0x02F:
1850 /* FCMOVGT */
1851 gen_fcmpfgt(ra, rb, rc);
1852 break;
1853 case 0x030:
1854 /* CVTQL */
1855 gen_fcvtql(rb, rc);
1856 break;
1857 case 0x130:
1858 /* CVTQL/V */
1859 gen_fcvtqlv(rb, rc);
1860 break;
1861 case 0x530:
1862 /* CVTQL/SV */
1863 gen_fcvtqlsv(rb, rc);
1864 break;
1865 default:
1866 goto invalid_opc;
1868 break;
1869 case 0x18:
1870 switch ((uint16_t)disp16) {
1871 case 0x0000:
1872 /* TRAPB */
1873 /* No-op. Just exit from the current tb */
1874 ret = 2;
1875 break;
1876 case 0x0400:
1877 /* EXCB */
1878 /* No-op. Just exit from the current tb */
1879 ret = 2;
1880 break;
1881 case 0x4000:
1882 /* MB */
1883 /* No-op */
1884 break;
1885 case 0x4400:
1886 /* WMB */
1887 /* No-op */
1888 break;
1889 case 0x8000:
1890 /* FETCH */
1891 /* No-op */
1892 break;
1893 case 0xA000:
1894 /* FETCH_M */
1895 /* No-op */
1896 break;
1897 case 0xC000:
1898 /* RPCC */
1899 if (ra != 31)
1900 gen_helper_load_pcc(cpu_ir[ra]);
1901 break;
1902 case 0xE000:
1903 /* RC */
1904 if (ra != 31)
1905 gen_helper_rc(cpu_ir[ra]);
1906 break;
1907 case 0xE800:
1908 /* ECB */
1909 break;
1910 case 0xF000:
1911 /* RS */
1912 if (ra != 31)
1913 gen_helper_rs(cpu_ir[ra]);
1914 break;
1915 case 0xF800:
1916 /* WH64 */
1917 /* No-op */
1918 break;
1919 default:
1920 goto invalid_opc;
1922 break;
1923 case 0x19:
1924 /* HW_MFPR (PALcode) */
1925 #if defined (CONFIG_USER_ONLY)
1926 goto invalid_opc;
1927 #else
1928 if (!ctx->pal_mode)
1929 goto invalid_opc;
1930 if (ra != 31) {
1931 TCGv tmp = tcg_const_i32(insn & 0xFF);
1932 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
1933 tcg_temp_free(tmp);
1935 break;
1936 #endif
1937 case 0x1A:
1938 if (rb != 31)
1939 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
1940 else
1941 tcg_gen_movi_i64(cpu_pc, 0);
1942 if (ra != 31)
1943 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
1944 /* Those four jumps only differ by the branch prediction hint */
1945 switch (fn2) {
1946 case 0x0:
1947 /* JMP */
1948 break;
1949 case 0x1:
1950 /* JSR */
1951 break;
1952 case 0x2:
1953 /* RET */
1954 break;
1955 case 0x3:
1956 /* JSR_COROUTINE */
1957 break;
1959 ret = 1;
1960 break;
1961 case 0x1B:
1962 /* HW_LD (PALcode) */
1963 #if defined (CONFIG_USER_ONLY)
1964 goto invalid_opc;
1965 #else
1966 if (!ctx->pal_mode)
1967 goto invalid_opc;
1968 if (ra != 31) {
1969 TCGv addr = tcg_temp_new();
1970 if (rb != 31)
1971 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
1972 else
1973 tcg_gen_movi_i64(addr, disp12);
1974 switch ((insn >> 12) & 0xF) {
1975 case 0x0:
1976 /* Longword physical access (hw_ldl/p) */
1977 gen_helper_ldl_raw(cpu_ir[ra], addr);
1978 break;
1979 case 0x1:
1980 /* Quadword physical access (hw_ldq/p) */
1981 gen_helper_ldq_raw(cpu_ir[ra], addr);
1982 break;
1983 case 0x2:
1984 /* Longword physical access with lock (hw_ldl_l/p) */
1985 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
1986 break;
1987 case 0x3:
1988 /* Quadword physical access with lock (hw_ldq_l/p) */
1989 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
1990 break;
1991 case 0x4:
1992 /* Longword virtual PTE fetch (hw_ldl/v) */
1993 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
1994 break;
1995 case 0x5:
1996 /* Quadword virtual PTE fetch (hw_ldq/v) */
1997 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
1998 break;
1999 case 0x6:
2000 /* Incpu_ir[ra]id */
2001 goto invalid_opc;
2002 case 0x7:
2003 /* Incpu_ir[ra]id */
2004 goto invalid_opc;
2005 case 0x8:
2006 /* Longword virtual access (hw_ldl) */
2007 gen_helper_st_virt_to_phys(addr, addr);
2008 gen_helper_ldl_raw(cpu_ir[ra], addr);
2009 break;
2010 case 0x9:
2011 /* Quadword virtual access (hw_ldq) */
2012 gen_helper_st_virt_to_phys(addr, addr);
2013 gen_helper_ldq_raw(cpu_ir[ra], addr);
2014 break;
2015 case 0xA:
2016 /* Longword virtual access with protection check (hw_ldl/w) */
2017 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2018 break;
2019 case 0xB:
2020 /* Quadword virtual access with protection check (hw_ldq/w) */
2021 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2022 break;
2023 case 0xC:
2024 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2025 gen_helper_set_alt_mode();
2026 gen_helper_st_virt_to_phys(addr, addr);
2027 gen_helper_ldl_raw(cpu_ir[ra], addr);
2028 gen_helper_restore_mode();
2029 break;
2030 case 0xD:
2031 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2032 gen_helper_set_alt_mode();
2033 gen_helper_st_virt_to_phys(addr, addr);
2034 gen_helper_ldq_raw(cpu_ir[ra], addr);
2035 gen_helper_restore_mode();
2036 break;
2037 case 0xE:
2038 /* Longword virtual access with alternate access mode and
2039 * protection checks (hw_ldl/wa)
2041 gen_helper_set_alt_mode();
2042 gen_helper_ldl_data(cpu_ir[ra], addr);
2043 gen_helper_restore_mode();
2044 break;
2045 case 0xF:
2046 /* Quadword virtual access with alternate access mode and
2047 * protection checks (hw_ldq/wa)
2049 gen_helper_set_alt_mode();
2050 gen_helper_ldq_data(cpu_ir[ra], addr);
2051 gen_helper_restore_mode();
2052 break;
2054 tcg_temp_free(addr);
2056 break;
2057 #endif
2058 case 0x1C:
2059 switch (fn7) {
2060 case 0x00:
2061 /* SEXTB */
2062 if (!(ctx->amask & AMASK_BWX))
2063 goto invalid_opc;
2064 if (likely(rc != 31)) {
2065 if (islit)
2066 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2067 else
2068 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2070 break;
2071 case 0x01:
2072 /* SEXTW */
2073 if (!(ctx->amask & AMASK_BWX))
2074 goto invalid_opc;
2075 if (likely(rc != 31)) {
2076 if (islit)
2077 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2078 else
2079 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2081 break;
2082 case 0x30:
2083 /* CTPOP */
2084 if (!(ctx->amask & AMASK_CIX))
2085 goto invalid_opc;
2086 if (likely(rc != 31)) {
2087 if (islit)
2088 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2089 else
2090 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2092 break;
2093 case 0x31:
2094 /* PERR */
2095 if (!(ctx->amask & AMASK_MVI))
2096 goto invalid_opc;
2097 gen_perr(ra, rb, rc, islit, lit);
2098 break;
2099 case 0x32:
2100 /* CTLZ */
2101 if (!(ctx->amask & AMASK_CIX))
2102 goto invalid_opc;
2103 if (likely(rc != 31)) {
2104 if (islit)
2105 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2106 else
2107 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2109 break;
2110 case 0x33:
2111 /* CTTZ */
2112 if (!(ctx->amask & AMASK_CIX))
2113 goto invalid_opc;
2114 if (likely(rc != 31)) {
2115 if (islit)
2116 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2117 else
2118 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2120 break;
2121 case 0x34:
2122 /* UNPKBW */
2123 if (!(ctx->amask & AMASK_MVI))
2124 goto invalid_opc;
2125 if (real_islit || ra != 31)
2126 goto invalid_opc;
2127 gen_unpkbw (rb, rc);
2128 break;
2129 case 0x35:
2130 /* UNPKBL */
2131 if (!(ctx->amask & AMASK_MVI))
2132 goto invalid_opc;
2133 if (real_islit || ra != 31)
2134 goto invalid_opc;
2135 gen_unpkbl (rb, rc);
2136 break;
2137 case 0x36:
2138 /* PKWB */
2139 if (!(ctx->amask & AMASK_MVI))
2140 goto invalid_opc;
2141 if (real_islit || ra != 31)
2142 goto invalid_opc;
2143 gen_pkwb (rb, rc);
2144 break;
2145 case 0x37:
2146 /* PKLB */
2147 if (!(ctx->amask & AMASK_MVI))
2148 goto invalid_opc;
2149 if (real_islit || ra != 31)
2150 goto invalid_opc;
2151 gen_pklb (rb, rc);
2152 break;
2153 case 0x38:
2154 /* MINSB8 */
2155 if (!(ctx->amask & AMASK_MVI))
2156 goto invalid_opc;
2157 gen_minsb8 (ra, rb, rc, islit, lit);
2158 break;
2159 case 0x39:
2160 /* MINSW4 */
2161 if (!(ctx->amask & AMASK_MVI))
2162 goto invalid_opc;
2163 gen_minsw4 (ra, rb, rc, islit, lit);
2164 break;
2165 case 0x3A:
2166 /* MINUB8 */
2167 if (!(ctx->amask & AMASK_MVI))
2168 goto invalid_opc;
2169 gen_minub8 (ra, rb, rc, islit, lit);
2170 break;
2171 case 0x3B:
2172 /* MINUW4 */
2173 if (!(ctx->amask & AMASK_MVI))
2174 goto invalid_opc;
2175 gen_minuw4 (ra, rb, rc, islit, lit);
2176 break;
2177 case 0x3C:
2178 /* MAXUB8 */
2179 if (!(ctx->amask & AMASK_MVI))
2180 goto invalid_opc;
2181 gen_maxub8 (ra, rb, rc, islit, lit);
2182 break;
2183 case 0x3D:
2184 /* MAXUW4 */
2185 if (!(ctx->amask & AMASK_MVI))
2186 goto invalid_opc;
2187 gen_maxuw4 (ra, rb, rc, islit, lit);
2188 break;
2189 case 0x3E:
2190 /* MAXSB8 */
2191 if (!(ctx->amask & AMASK_MVI))
2192 goto invalid_opc;
2193 gen_maxsb8 (ra, rb, rc, islit, lit);
2194 break;
2195 case 0x3F:
2196 /* MAXSW4 */
2197 if (!(ctx->amask & AMASK_MVI))
2198 goto invalid_opc;
2199 gen_maxsw4 (ra, rb, rc, islit, lit);
2200 break;
2201 case 0x70:
2202 /* FTOIT */
2203 if (!(ctx->amask & AMASK_FIX))
2204 goto invalid_opc;
2205 if (likely(rc != 31)) {
2206 if (ra != 31)
2207 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2208 else
2209 tcg_gen_movi_i64(cpu_ir[rc], 0);
2211 break;
2212 case 0x78:
2213 /* FTOIS */
2214 if (!(ctx->amask & AMASK_FIX))
2215 goto invalid_opc;
2216 if (rc != 31) {
2217 TCGv_i32 tmp1 = tcg_temp_new_i32();
2218 if (ra != 31)
2219 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2220 else {
2221 TCGv tmp2 = tcg_const_i64(0);
2222 gen_helper_s_to_memory(tmp1, tmp2);
2223 tcg_temp_free(tmp2);
2225 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2226 tcg_temp_free_i32(tmp1);
2228 break;
2229 default:
2230 goto invalid_opc;
2232 break;
2233 case 0x1D:
2234 /* HW_MTPR (PALcode) */
2235 #if defined (CONFIG_USER_ONLY)
2236 goto invalid_opc;
2237 #else
2238 if (!ctx->pal_mode)
2239 goto invalid_opc;
2240 else {
2241 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2242 if (ra != 31)
2243 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2244 else {
2245 TCGv tmp2 = tcg_const_i64(0);
2246 gen_helper_mtpr(tmp1, tmp2);
2247 tcg_temp_free(tmp2);
2249 tcg_temp_free(tmp1);
2250 ret = 2;
2252 break;
2253 #endif
2254 case 0x1E:
2255 /* HW_REI (PALcode) */
2256 #if defined (CONFIG_USER_ONLY)
2257 goto invalid_opc;
2258 #else
2259 if (!ctx->pal_mode)
2260 goto invalid_opc;
2261 if (rb == 31) {
2262 /* "Old" alpha */
2263 gen_helper_hw_rei();
2264 } else {
2265 TCGv tmp;
2267 if (ra != 31) {
2268 tmp = tcg_temp_new();
2269 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2270 } else
2271 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2272 gen_helper_hw_ret(tmp);
2273 tcg_temp_free(tmp);
2275 ret = 2;
2276 break;
2277 #endif
2278 case 0x1F:
2279 /* HW_ST (PALcode) */
2280 #if defined (CONFIG_USER_ONLY)
2281 goto invalid_opc;
2282 #else
2283 if (!ctx->pal_mode)
2284 goto invalid_opc;
2285 else {
2286 TCGv addr, val;
2287 addr = tcg_temp_new();
2288 if (rb != 31)
2289 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2290 else
2291 tcg_gen_movi_i64(addr, disp12);
2292 if (ra != 31)
2293 val = cpu_ir[ra];
2294 else {
2295 val = tcg_temp_new();
2296 tcg_gen_movi_i64(val, 0);
2298 switch ((insn >> 12) & 0xF) {
2299 case 0x0:
2300 /* Longword physical access */
2301 gen_helper_stl_raw(val, addr);
2302 break;
2303 case 0x1:
2304 /* Quadword physical access */
2305 gen_helper_stq_raw(val, addr);
2306 break;
2307 case 0x2:
2308 /* Longword physical access with lock */
2309 gen_helper_stl_c_raw(val, val, addr);
2310 break;
2311 case 0x3:
2312 /* Quadword physical access with lock */
2313 gen_helper_stq_c_raw(val, val, addr);
2314 break;
2315 case 0x4:
2316 /* Longword virtual access */
2317 gen_helper_st_virt_to_phys(addr, addr);
2318 gen_helper_stl_raw(val, addr);
2319 break;
2320 case 0x5:
2321 /* Quadword virtual access */
2322 gen_helper_st_virt_to_phys(addr, addr);
2323 gen_helper_stq_raw(val, addr);
2324 break;
2325 case 0x6:
2326 /* Invalid */
2327 goto invalid_opc;
2328 case 0x7:
2329 /* Invalid */
2330 goto invalid_opc;
2331 case 0x8:
2332 /* Invalid */
2333 goto invalid_opc;
2334 case 0x9:
2335 /* Invalid */
2336 goto invalid_opc;
2337 case 0xA:
2338 /* Invalid */
2339 goto invalid_opc;
2340 case 0xB:
2341 /* Invalid */
2342 goto invalid_opc;
2343 case 0xC:
2344 /* Longword virtual access with alternate access mode */
2345 gen_helper_set_alt_mode();
2346 gen_helper_st_virt_to_phys(addr, addr);
2347 gen_helper_stl_raw(val, addr);
2348 gen_helper_restore_mode();
2349 break;
2350 case 0xD:
2351 /* Quadword virtual access with alternate access mode */
2352 gen_helper_set_alt_mode();
2353 gen_helper_st_virt_to_phys(addr, addr);
2354 gen_helper_stl_raw(val, addr);
2355 gen_helper_restore_mode();
2356 break;
2357 case 0xE:
2358 /* Invalid */
2359 goto invalid_opc;
2360 case 0xF:
2361 /* Invalid */
2362 goto invalid_opc;
2364 if (ra == 31)
2365 tcg_temp_free(val);
2366 tcg_temp_free(addr);
2368 break;
2369 #endif
2370 case 0x20:
2371 /* LDF */
2372 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2373 break;
2374 case 0x21:
2375 /* LDG */
2376 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2377 break;
2378 case 0x22:
2379 /* LDS */
2380 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2381 break;
2382 case 0x23:
2383 /* LDT */
2384 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2385 break;
2386 case 0x24:
2387 /* STF */
2388 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2389 break;
2390 case 0x25:
2391 /* STG */
2392 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2393 break;
2394 case 0x26:
2395 /* STS */
2396 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2397 break;
2398 case 0x27:
2399 /* STT */
2400 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2401 break;
2402 case 0x28:
2403 /* LDL */
2404 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2405 break;
2406 case 0x29:
2407 /* LDQ */
2408 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2409 break;
2410 case 0x2A:
2411 /* LDL_L */
2412 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2413 break;
2414 case 0x2B:
2415 /* LDQ_L */
2416 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2417 break;
2418 case 0x2C:
2419 /* STL */
2420 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2421 break;
2422 case 0x2D:
2423 /* STQ */
2424 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2425 break;
2426 case 0x2E:
2427 /* STL_C */
2428 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2429 break;
2430 case 0x2F:
2431 /* STQ_C */
2432 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2433 break;
2434 case 0x30:
2435 /* BR */
2436 if (ra != 31)
2437 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2438 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2439 ret = 1;
2440 break;
2441 case 0x31: /* FBEQ */
2442 case 0x32: /* FBLT */
2443 case 0x33: /* FBLE */
2444 gen_fbcond(ctx, opc, ra, disp21);
2445 ret = 1;
2446 break;
2447 case 0x34:
2448 /* BSR */
2449 if (ra != 31)
2450 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2451 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2452 ret = 1;
2453 break;
2454 case 0x35: /* FBNE */
2455 case 0x36: /* FBGE */
2456 case 0x37: /* FBGT */
2457 gen_fbcond(ctx, opc, ra, disp21);
2458 ret = 1;
2459 break;
2460 case 0x38:
2461 /* BLBC */
2462 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2463 ret = 1;
2464 break;
2465 case 0x39:
2466 /* BEQ */
2467 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2468 ret = 1;
2469 break;
2470 case 0x3A:
2471 /* BLT */
2472 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2473 ret = 1;
2474 break;
2475 case 0x3B:
2476 /* BLE */
2477 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2478 ret = 1;
2479 break;
2480 case 0x3C:
2481 /* BLBS */
2482 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2483 ret = 1;
2484 break;
2485 case 0x3D:
2486 /* BNE */
2487 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2488 ret = 1;
2489 break;
2490 case 0x3E:
2491 /* BGE */
2492 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2493 ret = 1;
2494 break;
2495 case 0x3F:
2496 /* BGT */
2497 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2498 ret = 1;
2499 break;
2500 invalid_opc:
2501 gen_invalid(ctx);
2502 ret = 3;
2503 break;
2506 return ret;
2509 static inline void gen_intermediate_code_internal(CPUState *env,
2510 TranslationBlock *tb,
2511 int search_pc)
2513 DisasContext ctx, *ctxp = &ctx;
2514 target_ulong pc_start;
2515 uint32_t insn;
2516 uint16_t *gen_opc_end;
2517 CPUBreakpoint *bp;
2518 int j, lj = -1;
2519 int ret;
2520 int num_insns;
2521 int max_insns;
2523 pc_start = tb->pc;
2524 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2525 ctx.pc = pc_start;
2526 ctx.amask = env->amask;
2527 ctx.env = env;
2528 #if defined (CONFIG_USER_ONLY)
2529 ctx.mem_idx = 0;
2530 #else
2531 ctx.mem_idx = ((env->ps >> 3) & 3);
2532 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2533 #endif
2534 num_insns = 0;
2535 max_insns = tb->cflags & CF_COUNT_MASK;
2536 if (max_insns == 0)
2537 max_insns = CF_COUNT_MASK;
2539 gen_icount_start();
2540 for (ret = 0; ret == 0;) {
2541 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2542 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2543 if (bp->pc == ctx.pc) {
2544 gen_excp(&ctx, EXCP_DEBUG, 0);
2545 break;
2549 if (search_pc) {
2550 j = gen_opc_ptr - gen_opc_buf;
2551 if (lj < j) {
2552 lj++;
2553 while (lj < j)
2554 gen_opc_instr_start[lj++] = 0;
2556 gen_opc_pc[lj] = ctx.pc;
2557 gen_opc_instr_start[lj] = 1;
2558 gen_opc_icount[lj] = num_insns;
2560 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2561 gen_io_start();
2562 insn = ldl_code(ctx.pc);
2563 num_insns++;
2564 ctx.pc += 4;
2565 ret = translate_one(ctxp, insn);
2566 if (ret != 0)
2567 break;
2568 /* if we reach a page boundary or are single stepping, stop
2569 * generation
2571 if (env->singlestep_enabled) {
2572 gen_excp(&ctx, EXCP_DEBUG, 0);
2573 break;
2576 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2577 break;
2579 if (gen_opc_ptr >= gen_opc_end)
2580 break;
2582 if (num_insns >= max_insns)
2583 break;
2585 if (singlestep) {
2586 break;
2589 if (ret != 1 && ret != 3) {
2590 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2592 if (tb->cflags & CF_LAST_IO)
2593 gen_io_end();
2594 /* Generate the return instruction */
2595 tcg_gen_exit_tb(0);
2596 gen_icount_end(tb, num_insns);
2597 *gen_opc_ptr = INDEX_op_end;
2598 if (search_pc) {
2599 j = gen_opc_ptr - gen_opc_buf;
2600 lj++;
2601 while (lj <= j)
2602 gen_opc_instr_start[lj++] = 0;
2603 } else {
2604 tb->size = ctx.pc - pc_start;
2605 tb->icount = num_insns;
2607 #ifdef DEBUG_DISAS
2608 log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
2609 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2610 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2611 log_target_disas(pc_start, ctx.pc - pc_start, 1);
2612 qemu_log("\n");
2614 #endif
2617 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
2619 gen_intermediate_code_internal(env, tb, 0);
2622 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
2624 gen_intermediate_code_internal(env, tb, 1);
2627 struct cpu_def_t {
2628 const char *name;
2629 int implver, amask;
2632 static const struct cpu_def_t cpu_defs[] = {
2633 { "ev4", IMPLVER_2106x, 0 },
2634 { "ev5", IMPLVER_21164, 0 },
2635 { "ev56", IMPLVER_21164, AMASK_BWX },
2636 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2637 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2638 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2639 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2640 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2641 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2642 { "21064", IMPLVER_2106x, 0 },
2643 { "21164", IMPLVER_21164, 0 },
2644 { "21164a", IMPLVER_21164, AMASK_BWX },
2645 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2646 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2647 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2648 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
2651 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
2653 CPUAlphaState *env;
2654 uint64_t hwpcb;
2655 int implver, amask, i, max;
2657 env = qemu_mallocz(sizeof(CPUAlphaState));
2658 cpu_exec_init(env);
2659 alpha_translate_init();
2660 tlb_flush(env, 1);
2662 /* Default to ev67; no reason not to emulate insns by default. */
2663 implver = IMPLVER_21264;
2664 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
2665 | AMASK_TRAP | AMASK_PREFETCH);
2667 max = ARRAY_SIZE(cpu_defs);
2668 for (i = 0; i < max; i++) {
2669 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
2670 implver = cpu_defs[i].implver;
2671 amask = cpu_defs[i].amask;
2672 break;
2675 env->implver = implver;
2676 env->amask = amask;
2678 env->ps = 0x1F00;
2679 #if defined (CONFIG_USER_ONLY)
2680 env->ps |= 1 << 3;
2681 #endif
2682 pal_init(env);
2683 /* Initialize IPR */
2684 hwpcb = env->ipr[IPR_PCBB];
2685 env->ipr[IPR_ASN] = 0;
2686 env->ipr[IPR_ASTEN] = 0;
2687 env->ipr[IPR_ASTSR] = 0;
2688 env->ipr[IPR_DATFX] = 0;
2689 /* XXX: fix this */
2690 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2691 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2692 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2693 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2694 env->ipr[IPR_FEN] = 0;
2695 env->ipr[IPR_IPL] = 31;
2696 env->ipr[IPR_MCES] = 0;
2697 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
2698 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2699 env->ipr[IPR_SISR] = 0;
2700 env->ipr[IPR_VIRBND] = -1ULL;
2702 qemu_init_vcpu(env);
2703 return env;
2706 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2707 unsigned long searched_pc, int pc_pos, void *puc)
2709 env->pc = gen_opc_pc[pc_pos];