omit 3DNOW! CPUID bits from qemu64 CPU model
[qemu-kvm/fedora.git] / target-alpha / translate.c
blob1fc5119cb534c04ca790b958616732d850e465a1
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
35 /* #define DO_SINGLE_STEP */
36 #define ALPHA_DEBUG_DISAS
37 /* #define DO_TB_FLUSH */
40 #ifdef ALPHA_DEBUG_DISAS
41 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
42 #else
43 # define LOG_DISAS(...) do { } while (0)
44 #endif
46 typedef struct DisasContext DisasContext;
47 struct DisasContext {
48 uint64_t pc;
49 int mem_idx;
50 #if !defined (CONFIG_USER_ONLY)
51 int pal_mode;
52 #endif
53 CPUAlphaState *env;
54 uint32_t amask;
57 /* global register indexes */
58 static TCGv_ptr cpu_env;
59 static TCGv cpu_ir[31];
60 static TCGv cpu_fir[31];
61 static TCGv cpu_pc;
62 static TCGv cpu_lock;
64 /* register names */
65 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
67 #include "gen-icount.h"
69 static void alpha_translate_init(void)
71 int i;
72 char *p;
73 static int done_init = 0;
75 if (done_init)
76 return;
78 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
80 p = cpu_reg_names;
81 for (i = 0; i < 31; i++) {
82 sprintf(p, "ir%d", i);
83 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
84 offsetof(CPUState, ir[i]), p);
85 p += (i < 10) ? 4 : 5;
87 sprintf(p, "fir%d", i);
88 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
89 offsetof(CPUState, fir[i]), p);
90 p += (i < 10) ? 5 : 6;
93 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
94 offsetof(CPUState, pc), "pc");
96 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
97 offsetof(CPUState, lock), "lock");
99 /* register helpers */
100 #define GEN_HELPER 2
101 #include "helper.h"
103 done_init = 1;
106 static always_inline void gen_excp (DisasContext *ctx,
107 int exception, int error_code)
109 TCGv_i32 tmp1, tmp2;
111 tcg_gen_movi_i64(cpu_pc, ctx->pc);
112 tmp1 = tcg_const_i32(exception);
113 tmp2 = tcg_const_i32(error_code);
114 gen_helper_excp(tmp1, tmp2);
115 tcg_temp_free_i32(tmp2);
116 tcg_temp_free_i32(tmp1);
119 static always_inline void gen_invalid (DisasContext *ctx)
121 gen_excp(ctx, EXCP_OPCDEC, 0);
124 static always_inline void gen_qemu_ldf (TCGv t0, TCGv t1, int flags)
126 TCGv tmp = tcg_temp_new();
127 TCGv_i32 tmp32 = tcg_temp_new_i32();
128 tcg_gen_qemu_ld32u(tmp, t1, flags);
129 tcg_gen_trunc_i64_i32(tmp32, tmp);
130 gen_helper_memory_to_f(t0, tmp32);
131 tcg_temp_free_i32(tmp32);
132 tcg_temp_free(tmp);
135 static always_inline void gen_qemu_ldg (TCGv t0, TCGv t1, int flags)
137 TCGv tmp = tcg_temp_new();
138 tcg_gen_qemu_ld64(tmp, t1, flags);
139 gen_helper_memory_to_g(t0, tmp);
140 tcg_temp_free(tmp);
143 static always_inline void gen_qemu_lds (TCGv t0, TCGv t1, int flags)
145 TCGv tmp = tcg_temp_new();
146 TCGv_i32 tmp32 = tcg_temp_new_i32();
147 tcg_gen_qemu_ld32u(tmp, t1, flags);
148 tcg_gen_trunc_i64_i32(tmp32, tmp);
149 gen_helper_memory_to_s(t0, tmp32);
150 tcg_temp_free_i32(tmp32);
151 tcg_temp_free(tmp);
154 static always_inline void gen_qemu_ldl_l (TCGv t0, TCGv t1, int flags)
156 tcg_gen_mov_i64(cpu_lock, t1);
157 tcg_gen_qemu_ld32s(t0, t1, flags);
160 static always_inline void gen_qemu_ldq_l (TCGv t0, TCGv t1, int flags)
162 tcg_gen_mov_i64(cpu_lock, t1);
163 tcg_gen_qemu_ld64(t0, t1, flags);
166 static always_inline void gen_load_mem (DisasContext *ctx,
167 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, int flags),
168 int ra, int rb, int32_t disp16,
169 int fp, int clear)
171 TCGv addr;
173 if (unlikely(ra == 31))
174 return;
176 addr = tcg_temp_new();
177 if (rb != 31) {
178 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
179 if (clear)
180 tcg_gen_andi_i64(addr, addr, ~0x7);
181 } else {
182 if (clear)
183 disp16 &= ~0x7;
184 tcg_gen_movi_i64(addr, disp16);
186 if (fp)
187 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
188 else
189 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
190 tcg_temp_free(addr);
193 static always_inline void gen_qemu_stf (TCGv t0, TCGv t1, int flags)
195 TCGv_i32 tmp32 = tcg_temp_new_i32();
196 TCGv tmp = tcg_temp_new();
197 gen_helper_f_to_memory(tmp32, t0);
198 tcg_gen_extu_i32_i64(tmp, tmp32);
199 tcg_gen_qemu_st32(tmp, t1, flags);
200 tcg_temp_free(tmp);
201 tcg_temp_free_i32(tmp32);
204 static always_inline void gen_qemu_stg (TCGv t0, TCGv t1, int flags)
206 TCGv tmp = tcg_temp_new();
207 gen_helper_g_to_memory(tmp, t0);
208 tcg_gen_qemu_st64(tmp, t1, flags);
209 tcg_temp_free(tmp);
212 static always_inline void gen_qemu_sts (TCGv t0, TCGv t1, int flags)
214 TCGv_i32 tmp32 = tcg_temp_new_i32();
215 TCGv tmp = tcg_temp_new();
216 gen_helper_s_to_memory(tmp32, t0);
217 tcg_gen_extu_i32_i64(tmp, tmp32);
218 tcg_gen_qemu_st32(tmp, t1, flags);
219 tcg_temp_free(tmp);
220 tcg_temp_free_i32(tmp32);
223 static always_inline void gen_qemu_stl_c (TCGv t0, TCGv t1, int flags)
225 int l1, l2;
227 l1 = gen_new_label();
228 l2 = gen_new_label();
229 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
230 tcg_gen_qemu_st32(t0, t1, flags);
231 tcg_gen_movi_i64(t0, 1);
232 tcg_gen_br(l2);
233 gen_set_label(l1);
234 tcg_gen_movi_i64(t0, 0);
235 gen_set_label(l2);
236 tcg_gen_movi_i64(cpu_lock, -1);
239 static always_inline void gen_qemu_stq_c (TCGv t0, TCGv t1, int flags)
241 int l1, l2;
243 l1 = gen_new_label();
244 l2 = gen_new_label();
245 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
246 tcg_gen_qemu_st64(t0, t1, flags);
247 tcg_gen_movi_i64(t0, 1);
248 tcg_gen_br(l2);
249 gen_set_label(l1);
250 tcg_gen_movi_i64(t0, 0);
251 gen_set_label(l2);
252 tcg_gen_movi_i64(cpu_lock, -1);
255 static always_inline void gen_store_mem (DisasContext *ctx,
256 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, int flags),
257 int ra, int rb, int32_t disp16,
258 int fp, int clear, int local)
260 TCGv addr;
261 if (local)
262 addr = tcg_temp_local_new();
263 else
264 addr = tcg_temp_new();
265 if (rb != 31) {
266 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
267 if (clear)
268 tcg_gen_andi_i64(addr, addr, ~0x7);
269 } else {
270 if (clear)
271 disp16 &= ~0x7;
272 tcg_gen_movi_i64(addr, disp16);
274 if (ra != 31) {
275 if (fp)
276 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
277 else
278 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
279 } else {
280 TCGv zero;
281 if (local)
282 zero = tcg_const_local_i64(0);
283 else
284 zero = tcg_const_i64(0);
285 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
286 tcg_temp_free(zero);
288 tcg_temp_free(addr);
291 static always_inline void gen_bcond (DisasContext *ctx,
292 TCGCond cond,
293 int ra, int32_t disp, int mask)
295 int l1, l2;
297 l1 = gen_new_label();
298 l2 = gen_new_label();
299 if (likely(ra != 31)) {
300 if (mask) {
301 TCGv tmp = tcg_temp_new();
302 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
303 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
304 tcg_temp_free(tmp);
305 } else
306 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, l1);
307 } else {
308 /* Very uncommon case - Do not bother to optimize. */
309 TCGv tmp = tcg_const_i64(0);
310 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
311 tcg_temp_free(tmp);
313 tcg_gen_movi_i64(cpu_pc, ctx->pc);
314 tcg_gen_br(l2);
315 gen_set_label(l1);
316 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
317 gen_set_label(l2);
320 static always_inline void gen_fbcond (DisasContext *ctx, int opc,
321 int ra, int32_t disp16)
323 int l1, l2;
324 TCGv tmp;
325 TCGv src;
327 l1 = gen_new_label();
328 l2 = gen_new_label();
329 if (ra != 31) {
330 tmp = tcg_temp_new();
331 src = cpu_fir[ra];
332 } else {
333 tmp = tcg_const_i64(0);
334 src = tmp;
336 switch (opc) {
337 case 0x31: /* FBEQ */
338 gen_helper_cmpfeq(tmp, src);
339 break;
340 case 0x32: /* FBLT */
341 gen_helper_cmpflt(tmp, src);
342 break;
343 case 0x33: /* FBLE */
344 gen_helper_cmpfle(tmp, src);
345 break;
346 case 0x35: /* FBNE */
347 gen_helper_cmpfne(tmp, src);
348 break;
349 case 0x36: /* FBGE */
350 gen_helper_cmpfge(tmp, src);
351 break;
352 case 0x37: /* FBGT */
353 gen_helper_cmpfgt(tmp, src);
354 break;
355 default:
356 abort();
358 tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0, l1);
359 tcg_gen_movi_i64(cpu_pc, ctx->pc);
360 tcg_gen_br(l2);
361 gen_set_label(l1);
362 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2));
363 gen_set_label(l2);
366 static always_inline void gen_cmov (TCGCond inv_cond,
367 int ra, int rb, int rc,
368 int islit, uint8_t lit, int mask)
370 int l1;
372 if (unlikely(rc == 31))
373 return;
375 l1 = gen_new_label();
377 if (ra != 31) {
378 if (mask) {
379 TCGv tmp = tcg_temp_new();
380 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
381 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
382 tcg_temp_free(tmp);
383 } else
384 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
385 } else {
386 /* Very uncommon case - Do not bother to optimize. */
387 TCGv tmp = tcg_const_i64(0);
388 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
389 tcg_temp_free(tmp);
392 if (islit)
393 tcg_gen_movi_i64(cpu_ir[rc], lit);
394 else
395 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
396 gen_set_label(l1);
399 #define FARITH2(name) \
400 static always_inline void glue(gen_f, name)(int rb, int rc) \
402 if (unlikely(rc == 31)) \
403 return; \
405 if (rb != 31) \
406 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
407 else { \
408 TCGv tmp = tcg_const_i64(0); \
409 gen_helper_ ## name (cpu_fir[rc], tmp); \
410 tcg_temp_free(tmp); \
413 FARITH2(sqrts)
414 FARITH2(sqrtf)
415 FARITH2(sqrtg)
416 FARITH2(sqrtt)
417 FARITH2(cvtgf)
418 FARITH2(cvtgq)
419 FARITH2(cvtqf)
420 FARITH2(cvtqg)
421 FARITH2(cvtst)
422 FARITH2(cvtts)
423 FARITH2(cvttq)
424 FARITH2(cvtqs)
425 FARITH2(cvtqt)
426 FARITH2(cvtlq)
427 FARITH2(cvtql)
428 FARITH2(cvtqlv)
429 FARITH2(cvtqlsv)
431 #define FARITH3(name) \
432 static always_inline void glue(gen_f, name) (int ra, int rb, int rc) \
434 if (unlikely(rc == 31)) \
435 return; \
437 if (ra != 31) { \
438 if (rb != 31) \
439 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
440 else { \
441 TCGv tmp = tcg_const_i64(0); \
442 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
443 tcg_temp_free(tmp); \
445 } else { \
446 TCGv tmp = tcg_const_i64(0); \
447 if (rb != 31) \
448 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
449 else \
450 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
451 tcg_temp_free(tmp); \
455 FARITH3(addf)
456 FARITH3(subf)
457 FARITH3(mulf)
458 FARITH3(divf)
459 FARITH3(addg)
460 FARITH3(subg)
461 FARITH3(mulg)
462 FARITH3(divg)
463 FARITH3(cmpgeq)
464 FARITH3(cmpglt)
465 FARITH3(cmpgle)
466 FARITH3(adds)
467 FARITH3(subs)
468 FARITH3(muls)
469 FARITH3(divs)
470 FARITH3(addt)
471 FARITH3(subt)
472 FARITH3(mult)
473 FARITH3(divt)
474 FARITH3(cmptun)
475 FARITH3(cmpteq)
476 FARITH3(cmptlt)
477 FARITH3(cmptle)
478 FARITH3(cpys)
479 FARITH3(cpysn)
480 FARITH3(cpyse)
482 #define FCMOV(name) \
483 static always_inline void glue(gen_f, name) (int ra, int rb, int rc) \
485 int l1; \
486 TCGv tmp; \
488 if (unlikely(rc == 31)) \
489 return; \
491 l1 = gen_new_label(); \
492 tmp = tcg_temp_new(); \
493 if (ra != 31) { \
494 tmp = tcg_temp_new(); \
495 gen_helper_ ## name (tmp, cpu_fir[ra]); \
496 } else { \
497 tmp = tcg_const_i64(0); \
498 gen_helper_ ## name (tmp, tmp); \
500 tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \
501 if (rb != 31) \
502 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \
503 else \
504 tcg_gen_movi_i64(cpu_fir[rc], 0); \
505 gen_set_label(l1); \
507 FCMOV(cmpfeq)
508 FCMOV(cmpfne)
509 FCMOV(cmpflt)
510 FCMOV(cmpfge)
511 FCMOV(cmpfle)
512 FCMOV(cmpfgt)
514 /* EXTWH, EXTWH, EXTLH, EXTQH */
515 static always_inline void gen_ext_h(void (*tcg_gen_ext_i64)(TCGv t0, TCGv t1),
516 int ra, int rb, int rc,
517 int islit, uint8_t lit)
519 if (unlikely(rc == 31))
520 return;
522 if (ra != 31) {
523 if (islit) {
524 if (lit != 0)
525 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], 64 - ((lit & 7) * 8));
526 else
527 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[ra]);
528 } else {
529 TCGv tmp1, tmp2;
530 tmp1 = tcg_temp_new();
531 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
532 tcg_gen_shli_i64(tmp1, tmp1, 3);
533 tmp2 = tcg_const_i64(64);
534 tcg_gen_sub_i64(tmp1, tmp2, tmp1);
535 tcg_temp_free(tmp2);
536 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
537 tcg_temp_free(tmp1);
539 if (tcg_gen_ext_i64)
540 tcg_gen_ext_i64(cpu_ir[rc], cpu_ir[rc]);
541 } else
542 tcg_gen_movi_i64(cpu_ir[rc], 0);
545 /* EXTBL, EXTWL, EXTWL, EXTLL, EXTQL */
546 static always_inline void gen_ext_l(void (*tcg_gen_ext_i64)(TCGv t0, TCGv t1),
547 int ra, int rb, int rc,
548 int islit, uint8_t lit)
550 if (unlikely(rc == 31))
551 return;
553 if (ra != 31) {
554 if (islit) {
555 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
556 } else {
557 TCGv tmp = tcg_temp_new();
558 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
559 tcg_gen_shli_i64(tmp, tmp, 3);
560 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
561 tcg_temp_free(tmp);
563 if (tcg_gen_ext_i64)
564 tcg_gen_ext_i64(cpu_ir[rc], cpu_ir[rc]);
565 } else
566 tcg_gen_movi_i64(cpu_ir[rc], 0);
569 /* Code to call arith3 helpers */
570 #define ARITH3(name) \
571 static always_inline void glue(gen_, name) (int ra, int rb, int rc, \
572 int islit, uint8_t lit) \
574 if (unlikely(rc == 31)) \
575 return; \
577 if (ra != 31) { \
578 if (islit) { \
579 TCGv tmp = tcg_const_i64(lit); \
580 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
581 tcg_temp_free(tmp); \
582 } else \
583 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
584 } else { \
585 TCGv tmp1 = tcg_const_i64(0); \
586 if (islit) { \
587 TCGv tmp2 = tcg_const_i64(lit); \
588 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
589 tcg_temp_free(tmp2); \
590 } else \
591 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
592 tcg_temp_free(tmp1); \
595 ARITH3(cmpbge)
596 ARITH3(addlv)
597 ARITH3(sublv)
598 ARITH3(addqv)
599 ARITH3(subqv)
600 ARITH3(mskbl)
601 ARITH3(insbl)
602 ARITH3(mskwl)
603 ARITH3(inswl)
604 ARITH3(mskll)
605 ARITH3(insll)
606 ARITH3(zap)
607 ARITH3(zapnot)
608 ARITH3(mskql)
609 ARITH3(insql)
610 ARITH3(mskwh)
611 ARITH3(inswh)
612 ARITH3(msklh)
613 ARITH3(inslh)
614 ARITH3(mskqh)
615 ARITH3(insqh)
616 ARITH3(umulh)
617 ARITH3(mullv)
618 ARITH3(mulqv)
620 static always_inline void gen_cmp(TCGCond cond,
621 int ra, int rb, int rc,
622 int islit, uint8_t lit)
624 int l1, l2;
625 TCGv tmp;
627 if (unlikely(rc == 31))
628 return;
630 l1 = gen_new_label();
631 l2 = gen_new_label();
633 if (ra != 31) {
634 tmp = tcg_temp_new();
635 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
636 } else
637 tmp = tcg_const_i64(0);
638 if (islit)
639 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
640 else
641 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
643 tcg_gen_movi_i64(cpu_ir[rc], 0);
644 tcg_gen_br(l2);
645 gen_set_label(l1);
646 tcg_gen_movi_i64(cpu_ir[rc], 1);
647 gen_set_label(l2);
650 static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
652 uint32_t palcode;
653 int32_t disp21, disp16, disp12;
654 uint16_t fn11, fn16;
655 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit;
656 uint8_t lit;
657 int ret;
659 /* Decode all instruction fields */
660 opc = insn >> 26;
661 ra = (insn >> 21) & 0x1F;
662 rb = (insn >> 16) & 0x1F;
663 rc = insn & 0x1F;
664 sbz = (insn >> 13) & 0x07;
665 islit = (insn >> 12) & 1;
666 if (rb == 31 && !islit) {
667 islit = 1;
668 lit = 0;
669 } else
670 lit = (insn >> 13) & 0xFF;
671 palcode = insn & 0x03FFFFFF;
672 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
673 disp16 = (int16_t)(insn & 0x0000FFFF);
674 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
675 fn16 = insn & 0x0000FFFF;
676 fn11 = (insn >> 5) & 0x000007FF;
677 fpfn = fn11 & 0x3F;
678 fn7 = (insn >> 5) & 0x0000007F;
679 fn2 = (insn >> 5) & 0x00000003;
680 ret = 0;
681 LOG_DISAS("opc %02x ra %d rb %d rc %d disp16 %04x\n",
682 opc, ra, rb, rc, disp16);
683 switch (opc) {
684 case 0x00:
685 /* CALL_PAL */
686 if (palcode >= 0x80 && palcode < 0xC0) {
687 /* Unprivileged PAL call */
688 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
689 #if !defined (CONFIG_USER_ONLY)
690 } else if (palcode < 0x40) {
691 /* Privileged PAL code */
692 if (ctx->mem_idx & 1)
693 goto invalid_opc;
694 else
695 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
696 #endif
697 } else {
698 /* Invalid PAL call */
699 goto invalid_opc;
701 ret = 3;
702 break;
703 case 0x01:
704 /* OPC01 */
705 goto invalid_opc;
706 case 0x02:
707 /* OPC02 */
708 goto invalid_opc;
709 case 0x03:
710 /* OPC03 */
711 goto invalid_opc;
712 case 0x04:
713 /* OPC04 */
714 goto invalid_opc;
715 case 0x05:
716 /* OPC05 */
717 goto invalid_opc;
718 case 0x06:
719 /* OPC06 */
720 goto invalid_opc;
721 case 0x07:
722 /* OPC07 */
723 goto invalid_opc;
724 case 0x08:
725 /* LDA */
726 if (likely(ra != 31)) {
727 if (rb != 31)
728 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
729 else
730 tcg_gen_movi_i64(cpu_ir[ra], disp16);
732 break;
733 case 0x09:
734 /* LDAH */
735 if (likely(ra != 31)) {
736 if (rb != 31)
737 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
738 else
739 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
741 break;
742 case 0x0A:
743 /* LDBU */
744 if (!(ctx->amask & AMASK_BWX))
745 goto invalid_opc;
746 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
747 break;
748 case 0x0B:
749 /* LDQ_U */
750 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
751 break;
752 case 0x0C:
753 /* LDWU */
754 if (!(ctx->amask & AMASK_BWX))
755 goto invalid_opc;
756 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
757 break;
758 case 0x0D:
759 /* STW */
760 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
761 break;
762 case 0x0E:
763 /* STB */
764 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
765 break;
766 case 0x0F:
767 /* STQ_U */
768 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
769 break;
770 case 0x10:
771 switch (fn7) {
772 case 0x00:
773 /* ADDL */
774 if (likely(rc != 31)) {
775 if (ra != 31) {
776 if (islit) {
777 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
778 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
779 } else {
780 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
781 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
783 } else {
784 if (islit)
785 tcg_gen_movi_i64(cpu_ir[rc], lit);
786 else
787 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
790 break;
791 case 0x02:
792 /* S4ADDL */
793 if (likely(rc != 31)) {
794 if (ra != 31) {
795 TCGv tmp = tcg_temp_new();
796 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
797 if (islit)
798 tcg_gen_addi_i64(tmp, tmp, lit);
799 else
800 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
801 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
802 tcg_temp_free(tmp);
803 } else {
804 if (islit)
805 tcg_gen_movi_i64(cpu_ir[rc], lit);
806 else
807 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
810 break;
811 case 0x09:
812 /* SUBL */
813 if (likely(rc != 31)) {
814 if (ra != 31) {
815 if (islit)
816 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
817 else
818 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
819 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
820 } else {
821 if (islit)
822 tcg_gen_movi_i64(cpu_ir[rc], -lit);
823 else {
824 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
825 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
828 break;
829 case 0x0B:
830 /* S4SUBL */
831 if (likely(rc != 31)) {
832 if (ra != 31) {
833 TCGv tmp = tcg_temp_new();
834 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
835 if (islit)
836 tcg_gen_subi_i64(tmp, tmp, lit);
837 else
838 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
839 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
840 tcg_temp_free(tmp);
841 } else {
842 if (islit)
843 tcg_gen_movi_i64(cpu_ir[rc], -lit);
844 else {
845 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
846 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
850 break;
851 case 0x0F:
852 /* CMPBGE */
853 gen_cmpbge(ra, rb, rc, islit, lit);
854 break;
855 case 0x12:
856 /* S8ADDL */
857 if (likely(rc != 31)) {
858 if (ra != 31) {
859 TCGv tmp = tcg_temp_new();
860 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
861 if (islit)
862 tcg_gen_addi_i64(tmp, tmp, lit);
863 else
864 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
865 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
866 tcg_temp_free(tmp);
867 } else {
868 if (islit)
869 tcg_gen_movi_i64(cpu_ir[rc], lit);
870 else
871 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
874 break;
875 case 0x1B:
876 /* S8SUBL */
877 if (likely(rc != 31)) {
878 if (ra != 31) {
879 TCGv tmp = tcg_temp_new();
880 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
881 if (islit)
882 tcg_gen_subi_i64(tmp, tmp, lit);
883 else
884 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
885 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
886 tcg_temp_free(tmp);
887 } else {
888 if (islit)
889 tcg_gen_movi_i64(cpu_ir[rc], -lit);
890 else
891 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
892 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
896 break;
897 case 0x1D:
898 /* CMPULT */
899 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
900 break;
901 case 0x20:
902 /* ADDQ */
903 if (likely(rc != 31)) {
904 if (ra != 31) {
905 if (islit)
906 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
907 else
908 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
909 } else {
910 if (islit)
911 tcg_gen_movi_i64(cpu_ir[rc], lit);
912 else
913 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
916 break;
917 case 0x22:
918 /* S4ADDQ */
919 if (likely(rc != 31)) {
920 if (ra != 31) {
921 TCGv tmp = tcg_temp_new();
922 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
923 if (islit)
924 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
925 else
926 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
927 tcg_temp_free(tmp);
928 } else {
929 if (islit)
930 tcg_gen_movi_i64(cpu_ir[rc], lit);
931 else
932 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
935 break;
936 case 0x29:
937 /* SUBQ */
938 if (likely(rc != 31)) {
939 if (ra != 31) {
940 if (islit)
941 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
942 else
943 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
944 } else {
945 if (islit)
946 tcg_gen_movi_i64(cpu_ir[rc], -lit);
947 else
948 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
951 break;
952 case 0x2B:
953 /* S4SUBQ */
954 if (likely(rc != 31)) {
955 if (ra != 31) {
956 TCGv tmp = tcg_temp_new();
957 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
958 if (islit)
959 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
960 else
961 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
962 tcg_temp_free(tmp);
963 } else {
964 if (islit)
965 tcg_gen_movi_i64(cpu_ir[rc], -lit);
966 else
967 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
970 break;
971 case 0x2D:
972 /* CMPEQ */
973 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
974 break;
975 case 0x32:
976 /* S8ADDQ */
977 if (likely(rc != 31)) {
978 if (ra != 31) {
979 TCGv tmp = tcg_temp_new();
980 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
981 if (islit)
982 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
983 else
984 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
985 tcg_temp_free(tmp);
986 } else {
987 if (islit)
988 tcg_gen_movi_i64(cpu_ir[rc], lit);
989 else
990 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
993 break;
994 case 0x3B:
995 /* S8SUBQ */
996 if (likely(rc != 31)) {
997 if (ra != 31) {
998 TCGv tmp = tcg_temp_new();
999 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1000 if (islit)
1001 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1002 else
1003 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1004 tcg_temp_free(tmp);
1005 } else {
1006 if (islit)
1007 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1008 else
1009 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1012 break;
1013 case 0x3D:
1014 /* CMPULE */
1015 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1016 break;
1017 case 0x40:
1018 /* ADDL/V */
1019 gen_addlv(ra, rb, rc, islit, lit);
1020 break;
1021 case 0x49:
1022 /* SUBL/V */
1023 gen_sublv(ra, rb, rc, islit, lit);
1024 break;
1025 case 0x4D:
1026 /* CMPLT */
1027 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1028 break;
1029 case 0x60:
1030 /* ADDQ/V */
1031 gen_addqv(ra, rb, rc, islit, lit);
1032 break;
1033 case 0x69:
1034 /* SUBQ/V */
1035 gen_subqv(ra, rb, rc, islit, lit);
1036 break;
1037 case 0x6D:
1038 /* CMPLE */
1039 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1040 break;
1041 default:
1042 goto invalid_opc;
1044 break;
1045 case 0x11:
1046 switch (fn7) {
1047 case 0x00:
1048 /* AND */
1049 if (likely(rc != 31)) {
1050 if (ra == 31)
1051 tcg_gen_movi_i64(cpu_ir[rc], 0);
1052 else if (islit)
1053 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1054 else
1055 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1057 break;
1058 case 0x08:
1059 /* BIC */
1060 if (likely(rc != 31)) {
1061 if (ra != 31) {
1062 if (islit)
1063 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1064 else
1065 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1066 } else
1067 tcg_gen_movi_i64(cpu_ir[rc], 0);
1069 break;
1070 case 0x14:
1071 /* CMOVLBS */
1072 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1073 break;
1074 case 0x16:
1075 /* CMOVLBC */
1076 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1077 break;
1078 case 0x20:
1079 /* BIS */
1080 if (likely(rc != 31)) {
1081 if (ra != 31) {
1082 if (islit)
1083 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1084 else
1085 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1086 } else {
1087 if (islit)
1088 tcg_gen_movi_i64(cpu_ir[rc], lit);
1089 else
1090 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1093 break;
1094 case 0x24:
1095 /* CMOVEQ */
1096 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1097 break;
1098 case 0x26:
1099 /* CMOVNE */
1100 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1101 break;
1102 case 0x28:
1103 /* ORNOT */
1104 if (likely(rc != 31)) {
1105 if (ra != 31) {
1106 if (islit)
1107 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1108 else
1109 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1110 } else {
1111 if (islit)
1112 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1113 else
1114 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1117 break;
1118 case 0x40:
1119 /* XOR */
1120 if (likely(rc != 31)) {
1121 if (ra != 31) {
1122 if (islit)
1123 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1124 else
1125 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1126 } else {
1127 if (islit)
1128 tcg_gen_movi_i64(cpu_ir[rc], lit);
1129 else
1130 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1133 break;
1134 case 0x44:
1135 /* CMOVLT */
1136 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1137 break;
1138 case 0x46:
1139 /* CMOVGE */
1140 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1141 break;
1142 case 0x48:
1143 /* EQV */
1144 if (likely(rc != 31)) {
1145 if (ra != 31) {
1146 if (islit)
1147 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1148 else
1149 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1150 } else {
1151 if (islit)
1152 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1153 else
1154 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1157 break;
1158 case 0x61:
1159 /* AMASK */
1160 if (likely(rc != 31)) {
1161 if (islit)
1162 tcg_gen_movi_i64(cpu_ir[rc], lit);
1163 else
1164 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1165 switch (ctx->env->implver) {
1166 case IMPLVER_2106x:
1167 /* EV4, EV45, LCA, LCA45 & EV5 */
1168 break;
1169 case IMPLVER_21164:
1170 case IMPLVER_21264:
1171 case IMPLVER_21364:
1172 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1173 ~(uint64_t)ctx->amask);
1174 break;
1177 break;
1178 case 0x64:
1179 /* CMOVLE */
1180 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1181 break;
1182 case 0x66:
1183 /* CMOVGT */
1184 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1185 break;
1186 case 0x6C:
1187 /* IMPLVER */
1188 if (rc != 31)
1189 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1190 break;
1191 default:
1192 goto invalid_opc;
1194 break;
1195 case 0x12:
1196 switch (fn7) {
1197 case 0x02:
1198 /* MSKBL */
1199 gen_mskbl(ra, rb, rc, islit, lit);
1200 break;
1201 case 0x06:
1202 /* EXTBL */
1203 gen_ext_l(&tcg_gen_ext8u_i64, ra, rb, rc, islit, lit);
1204 break;
1205 case 0x0B:
1206 /* INSBL */
1207 gen_insbl(ra, rb, rc, islit, lit);
1208 break;
1209 case 0x12:
1210 /* MSKWL */
1211 gen_mskwl(ra, rb, rc, islit, lit);
1212 break;
1213 case 0x16:
1214 /* EXTWL */
1215 gen_ext_l(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit);
1216 break;
1217 case 0x1B:
1218 /* INSWL */
1219 gen_inswl(ra, rb, rc, islit, lit);
1220 break;
1221 case 0x22:
1222 /* MSKLL */
1223 gen_mskll(ra, rb, rc, islit, lit);
1224 break;
1225 case 0x26:
1226 /* EXTLL */
1227 gen_ext_l(&tcg_gen_ext32u_i64, ra, rb, rc, islit, lit);
1228 break;
1229 case 0x2B:
1230 /* INSLL */
1231 gen_insll(ra, rb, rc, islit, lit);
1232 break;
1233 case 0x30:
1234 /* ZAP */
1235 gen_zap(ra, rb, rc, islit, lit);
1236 break;
1237 case 0x31:
1238 /* ZAPNOT */
1239 gen_zapnot(ra, rb, rc, islit, lit);
1240 break;
1241 case 0x32:
1242 /* MSKQL */
1243 gen_mskql(ra, rb, rc, islit, lit);
1244 break;
1245 case 0x34:
1246 /* SRL */
1247 if (likely(rc != 31)) {
1248 if (ra != 31) {
1249 if (islit)
1250 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1251 else {
1252 TCGv shift = tcg_temp_new();
1253 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1254 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1255 tcg_temp_free(shift);
1257 } else
1258 tcg_gen_movi_i64(cpu_ir[rc], 0);
1260 break;
1261 case 0x36:
1262 /* EXTQL */
1263 gen_ext_l(NULL, ra, rb, rc, islit, lit);
1264 break;
1265 case 0x39:
1266 /* SLL */
1267 if (likely(rc != 31)) {
1268 if (ra != 31) {
1269 if (islit)
1270 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1271 else {
1272 TCGv shift = tcg_temp_new();
1273 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1274 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1275 tcg_temp_free(shift);
1277 } else
1278 tcg_gen_movi_i64(cpu_ir[rc], 0);
1280 break;
1281 case 0x3B:
1282 /* INSQL */
1283 gen_insql(ra, rb, rc, islit, lit);
1284 break;
1285 case 0x3C:
1286 /* SRA */
1287 if (likely(rc != 31)) {
1288 if (ra != 31) {
1289 if (islit)
1290 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1291 else {
1292 TCGv shift = tcg_temp_new();
1293 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1294 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1295 tcg_temp_free(shift);
1297 } else
1298 tcg_gen_movi_i64(cpu_ir[rc], 0);
1300 break;
1301 case 0x52:
1302 /* MSKWH */
1303 gen_mskwh(ra, rb, rc, islit, lit);
1304 break;
1305 case 0x57:
1306 /* INSWH */
1307 gen_inswh(ra, rb, rc, islit, lit);
1308 break;
1309 case 0x5A:
1310 /* EXTWH */
1311 gen_ext_h(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit);
1312 break;
1313 case 0x62:
1314 /* MSKLH */
1315 gen_msklh(ra, rb, rc, islit, lit);
1316 break;
1317 case 0x67:
1318 /* INSLH */
1319 gen_inslh(ra, rb, rc, islit, lit);
1320 break;
1321 case 0x6A:
1322 /* EXTLH */
1323 gen_ext_h(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit);
1324 break;
1325 case 0x72:
1326 /* MSKQH */
1327 gen_mskqh(ra, rb, rc, islit, lit);
1328 break;
1329 case 0x77:
1330 /* INSQH */
1331 gen_insqh(ra, rb, rc, islit, lit);
1332 break;
1333 case 0x7A:
1334 /* EXTQH */
1335 gen_ext_h(NULL, ra, rb, rc, islit, lit);
1336 break;
1337 default:
1338 goto invalid_opc;
1340 break;
1341 case 0x13:
1342 switch (fn7) {
1343 case 0x00:
1344 /* MULL */
1345 if (likely(rc != 31)) {
1346 if (ra == 31)
1347 tcg_gen_movi_i64(cpu_ir[rc], 0);
1348 else {
1349 if (islit)
1350 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1351 else
1352 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1353 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1356 break;
1357 case 0x20:
1358 /* MULQ */
1359 if (likely(rc != 31)) {
1360 if (ra == 31)
1361 tcg_gen_movi_i64(cpu_ir[rc], 0);
1362 else if (islit)
1363 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1364 else
1365 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1367 break;
1368 case 0x30:
1369 /* UMULH */
1370 gen_umulh(ra, rb, rc, islit, lit);
1371 break;
1372 case 0x40:
1373 /* MULL/V */
1374 gen_mullv(ra, rb, rc, islit, lit);
1375 break;
1376 case 0x60:
1377 /* MULQ/V */
1378 gen_mulqv(ra, rb, rc, islit, lit);
1379 break;
1380 default:
1381 goto invalid_opc;
1383 break;
1384 case 0x14:
1385 switch (fpfn) { /* f11 & 0x3F */
1386 case 0x04:
1387 /* ITOFS */
1388 if (!(ctx->amask & AMASK_FIX))
1389 goto invalid_opc;
1390 if (likely(rc != 31)) {
1391 if (ra != 31) {
1392 TCGv_i32 tmp = tcg_temp_new_i32();
1393 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1394 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1395 tcg_temp_free_i32(tmp);
1396 } else
1397 tcg_gen_movi_i64(cpu_fir[rc], 0);
1399 break;
1400 case 0x0A:
1401 /* SQRTF */
1402 if (!(ctx->amask & AMASK_FIX))
1403 goto invalid_opc;
1404 gen_fsqrtf(rb, rc);
1405 break;
1406 case 0x0B:
1407 /* SQRTS */
1408 if (!(ctx->amask & AMASK_FIX))
1409 goto invalid_opc;
1410 gen_fsqrts(rb, rc);
1411 break;
1412 case 0x14:
1413 /* ITOFF */
1414 if (!(ctx->amask & AMASK_FIX))
1415 goto invalid_opc;
1416 if (likely(rc != 31)) {
1417 if (ra != 31) {
1418 TCGv_i32 tmp = tcg_temp_new_i32();
1419 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1420 gen_helper_memory_to_f(cpu_fir[rc], tmp);
1421 tcg_temp_free_i32(tmp);
1422 } else
1423 tcg_gen_movi_i64(cpu_fir[rc], 0);
1425 break;
1426 case 0x24:
1427 /* ITOFT */
1428 if (!(ctx->amask & AMASK_FIX))
1429 goto invalid_opc;
1430 if (likely(rc != 31)) {
1431 if (ra != 31)
1432 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
1433 else
1434 tcg_gen_movi_i64(cpu_fir[rc], 0);
1436 break;
1437 case 0x2A:
1438 /* SQRTG */
1439 if (!(ctx->amask & AMASK_FIX))
1440 goto invalid_opc;
1441 gen_fsqrtg(rb, rc);
1442 break;
1443 case 0x02B:
1444 /* SQRTT */
1445 if (!(ctx->amask & AMASK_FIX))
1446 goto invalid_opc;
1447 gen_fsqrtt(rb, rc);
1448 break;
1449 default:
1450 goto invalid_opc;
1452 break;
1453 case 0x15:
1454 /* VAX floating point */
1455 /* XXX: rounding mode and trap are ignored (!) */
1456 switch (fpfn) { /* f11 & 0x3F */
1457 case 0x00:
1458 /* ADDF */
1459 gen_faddf(ra, rb, rc);
1460 break;
1461 case 0x01:
1462 /* SUBF */
1463 gen_fsubf(ra, rb, rc);
1464 break;
1465 case 0x02:
1466 /* MULF */
1467 gen_fmulf(ra, rb, rc);
1468 break;
1469 case 0x03:
1470 /* DIVF */
1471 gen_fdivf(ra, rb, rc);
1472 break;
1473 case 0x1E:
1474 /* CVTDG */
1475 #if 0 // TODO
1476 gen_fcvtdg(rb, rc);
1477 #else
1478 goto invalid_opc;
1479 #endif
1480 break;
1481 case 0x20:
1482 /* ADDG */
1483 gen_faddg(ra, rb, rc);
1484 break;
1485 case 0x21:
1486 /* SUBG */
1487 gen_fsubg(ra, rb, rc);
1488 break;
1489 case 0x22:
1490 /* MULG */
1491 gen_fmulg(ra, rb, rc);
1492 break;
1493 case 0x23:
1494 /* DIVG */
1495 gen_fdivg(ra, rb, rc);
1496 break;
1497 case 0x25:
1498 /* CMPGEQ */
1499 gen_fcmpgeq(ra, rb, rc);
1500 break;
1501 case 0x26:
1502 /* CMPGLT */
1503 gen_fcmpglt(ra, rb, rc);
1504 break;
1505 case 0x27:
1506 /* CMPGLE */
1507 gen_fcmpgle(ra, rb, rc);
1508 break;
1509 case 0x2C:
1510 /* CVTGF */
1511 gen_fcvtgf(rb, rc);
1512 break;
1513 case 0x2D:
1514 /* CVTGD */
1515 #if 0 // TODO
1516 gen_fcvtgd(rb, rc);
1517 #else
1518 goto invalid_opc;
1519 #endif
1520 break;
1521 case 0x2F:
1522 /* CVTGQ */
1523 gen_fcvtgq(rb, rc);
1524 break;
1525 case 0x3C:
1526 /* CVTQF */
1527 gen_fcvtqf(rb, rc);
1528 break;
1529 case 0x3E:
1530 /* CVTQG */
1531 gen_fcvtqg(rb, rc);
1532 break;
1533 default:
1534 goto invalid_opc;
1536 break;
1537 case 0x16:
1538 /* IEEE floating-point */
1539 /* XXX: rounding mode and traps are ignored (!) */
1540 switch (fpfn) { /* f11 & 0x3F */
1541 case 0x00:
1542 /* ADDS */
1543 gen_fadds(ra, rb, rc);
1544 break;
1545 case 0x01:
1546 /* SUBS */
1547 gen_fsubs(ra, rb, rc);
1548 break;
1549 case 0x02:
1550 /* MULS */
1551 gen_fmuls(ra, rb, rc);
1552 break;
1553 case 0x03:
1554 /* DIVS */
1555 gen_fdivs(ra, rb, rc);
1556 break;
1557 case 0x20:
1558 /* ADDT */
1559 gen_faddt(ra, rb, rc);
1560 break;
1561 case 0x21:
1562 /* SUBT */
1563 gen_fsubt(ra, rb, rc);
1564 break;
1565 case 0x22:
1566 /* MULT */
1567 gen_fmult(ra, rb, rc);
1568 break;
1569 case 0x23:
1570 /* DIVT */
1571 gen_fdivt(ra, rb, rc);
1572 break;
1573 case 0x24:
1574 /* CMPTUN */
1575 gen_fcmptun(ra, rb, rc);
1576 break;
1577 case 0x25:
1578 /* CMPTEQ */
1579 gen_fcmpteq(ra, rb, rc);
1580 break;
1581 case 0x26:
1582 /* CMPTLT */
1583 gen_fcmptlt(ra, rb, rc);
1584 break;
1585 case 0x27:
1586 /* CMPTLE */
1587 gen_fcmptle(ra, rb, rc);
1588 break;
1589 case 0x2C:
1590 /* XXX: incorrect */
1591 if (fn11 == 0x2AC || fn11 == 0x6AC) {
1592 /* CVTST */
1593 gen_fcvtst(rb, rc);
1594 } else {
1595 /* CVTTS */
1596 gen_fcvtts(rb, rc);
1598 break;
1599 case 0x2F:
1600 /* CVTTQ */
1601 gen_fcvttq(rb, rc);
1602 break;
1603 case 0x3C:
1604 /* CVTQS */
1605 gen_fcvtqs(rb, rc);
1606 break;
1607 case 0x3E:
1608 /* CVTQT */
1609 gen_fcvtqt(rb, rc);
1610 break;
1611 default:
1612 goto invalid_opc;
1614 break;
1615 case 0x17:
1616 switch (fn11) {
1617 case 0x010:
1618 /* CVTLQ */
1619 gen_fcvtlq(rb, rc);
1620 break;
1621 case 0x020:
1622 if (likely(rc != 31)) {
1623 if (ra == rb)
1624 /* FMOV */
1625 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
1626 else
1627 /* CPYS */
1628 gen_fcpys(ra, rb, rc);
1630 break;
1631 case 0x021:
1632 /* CPYSN */
1633 gen_fcpysn(ra, rb, rc);
1634 break;
1635 case 0x022:
1636 /* CPYSE */
1637 gen_fcpyse(ra, rb, rc);
1638 break;
1639 case 0x024:
1640 /* MT_FPCR */
1641 if (likely(ra != 31))
1642 gen_helper_store_fpcr(cpu_fir[ra]);
1643 else {
1644 TCGv tmp = tcg_const_i64(0);
1645 gen_helper_store_fpcr(tmp);
1646 tcg_temp_free(tmp);
1648 break;
1649 case 0x025:
1650 /* MF_FPCR */
1651 if (likely(ra != 31))
1652 gen_helper_load_fpcr(cpu_fir[ra]);
1653 break;
1654 case 0x02A:
1655 /* FCMOVEQ */
1656 gen_fcmpfeq(ra, rb, rc);
1657 break;
1658 case 0x02B:
1659 /* FCMOVNE */
1660 gen_fcmpfne(ra, rb, rc);
1661 break;
1662 case 0x02C:
1663 /* FCMOVLT */
1664 gen_fcmpflt(ra, rb, rc);
1665 break;
1666 case 0x02D:
1667 /* FCMOVGE */
1668 gen_fcmpfge(ra, rb, rc);
1669 break;
1670 case 0x02E:
1671 /* FCMOVLE */
1672 gen_fcmpfle(ra, rb, rc);
1673 break;
1674 case 0x02F:
1675 /* FCMOVGT */
1676 gen_fcmpfgt(ra, rb, rc);
1677 break;
1678 case 0x030:
1679 /* CVTQL */
1680 gen_fcvtql(rb, rc);
1681 break;
1682 case 0x130:
1683 /* CVTQL/V */
1684 gen_fcvtqlv(rb, rc);
1685 break;
1686 case 0x530:
1687 /* CVTQL/SV */
1688 gen_fcvtqlsv(rb, rc);
1689 break;
1690 default:
1691 goto invalid_opc;
1693 break;
1694 case 0x18:
1695 switch ((uint16_t)disp16) {
1696 case 0x0000:
1697 /* TRAPB */
1698 /* No-op. Just exit from the current tb */
1699 ret = 2;
1700 break;
1701 case 0x0400:
1702 /* EXCB */
1703 /* No-op. Just exit from the current tb */
1704 ret = 2;
1705 break;
1706 case 0x4000:
1707 /* MB */
1708 /* No-op */
1709 break;
1710 case 0x4400:
1711 /* WMB */
1712 /* No-op */
1713 break;
1714 case 0x8000:
1715 /* FETCH */
1716 /* No-op */
1717 break;
1718 case 0xA000:
1719 /* FETCH_M */
1720 /* No-op */
1721 break;
1722 case 0xC000:
1723 /* RPCC */
1724 if (ra != 31)
1725 gen_helper_load_pcc(cpu_ir[ra]);
1726 break;
1727 case 0xE000:
1728 /* RC */
1729 if (ra != 31)
1730 gen_helper_rc(cpu_ir[ra]);
1731 break;
1732 case 0xE800:
1733 /* ECB */
1734 break;
1735 case 0xF000:
1736 /* RS */
1737 if (ra != 31)
1738 gen_helper_rs(cpu_ir[ra]);
1739 break;
1740 case 0xF800:
1741 /* WH64 */
1742 /* No-op */
1743 break;
1744 default:
1745 goto invalid_opc;
1747 break;
1748 case 0x19:
1749 /* HW_MFPR (PALcode) */
1750 #if defined (CONFIG_USER_ONLY)
1751 goto invalid_opc;
1752 #else
1753 if (!ctx->pal_mode)
1754 goto invalid_opc;
1755 if (ra != 31) {
1756 TCGv tmp = tcg_const_i32(insn & 0xFF);
1757 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
1758 tcg_temp_free(tmp);
1760 break;
1761 #endif
1762 case 0x1A:
1763 if (rb != 31)
1764 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
1765 else
1766 tcg_gen_movi_i64(cpu_pc, 0);
1767 if (ra != 31)
1768 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
1769 /* Those four jumps only differ by the branch prediction hint */
1770 switch (fn2) {
1771 case 0x0:
1772 /* JMP */
1773 break;
1774 case 0x1:
1775 /* JSR */
1776 break;
1777 case 0x2:
1778 /* RET */
1779 break;
1780 case 0x3:
1781 /* JSR_COROUTINE */
1782 break;
1784 ret = 1;
1785 break;
1786 case 0x1B:
1787 /* HW_LD (PALcode) */
1788 #if defined (CONFIG_USER_ONLY)
1789 goto invalid_opc;
1790 #else
1791 if (!ctx->pal_mode)
1792 goto invalid_opc;
1793 if (ra != 31) {
1794 TCGv addr = tcg_temp_new();
1795 if (rb != 31)
1796 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
1797 else
1798 tcg_gen_movi_i64(addr, disp12);
1799 switch ((insn >> 12) & 0xF) {
1800 case 0x0:
1801 /* Longword physical access (hw_ldl/p) */
1802 gen_helper_ldl_raw(cpu_ir[ra], addr);
1803 break;
1804 case 0x1:
1805 /* Quadword physical access (hw_ldq/p) */
1806 gen_helper_ldq_raw(cpu_ir[ra], addr);
1807 break;
1808 case 0x2:
1809 /* Longword physical access with lock (hw_ldl_l/p) */
1810 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
1811 break;
1812 case 0x3:
1813 /* Quadword physical access with lock (hw_ldq_l/p) */
1814 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
1815 break;
1816 case 0x4:
1817 /* Longword virtual PTE fetch (hw_ldl/v) */
1818 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
1819 break;
1820 case 0x5:
1821 /* Quadword virtual PTE fetch (hw_ldq/v) */
1822 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
1823 break;
1824 case 0x6:
1825 /* Incpu_ir[ra]id */
1826 goto invalid_opc;
1827 case 0x7:
1828 /* Incpu_ir[ra]id */
1829 goto invalid_opc;
1830 case 0x8:
1831 /* Longword virtual access (hw_ldl) */
1832 gen_helper_st_virt_to_phys(addr, addr);
1833 gen_helper_ldl_raw(cpu_ir[ra], addr);
1834 break;
1835 case 0x9:
1836 /* Quadword virtual access (hw_ldq) */
1837 gen_helper_st_virt_to_phys(addr, addr);
1838 gen_helper_ldq_raw(cpu_ir[ra], addr);
1839 break;
1840 case 0xA:
1841 /* Longword virtual access with protection check (hw_ldl/w) */
1842 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
1843 break;
1844 case 0xB:
1845 /* Quadword virtual access with protection check (hw_ldq/w) */
1846 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
1847 break;
1848 case 0xC:
1849 /* Longword virtual access with alt access mode (hw_ldl/a)*/
1850 gen_helper_set_alt_mode();
1851 gen_helper_st_virt_to_phys(addr, addr);
1852 gen_helper_ldl_raw(cpu_ir[ra], addr);
1853 gen_helper_restore_mode();
1854 break;
1855 case 0xD:
1856 /* Quadword virtual access with alt access mode (hw_ldq/a) */
1857 gen_helper_set_alt_mode();
1858 gen_helper_st_virt_to_phys(addr, addr);
1859 gen_helper_ldq_raw(cpu_ir[ra], addr);
1860 gen_helper_restore_mode();
1861 break;
1862 case 0xE:
1863 /* Longword virtual access with alternate access mode and
1864 * protection checks (hw_ldl/wa)
1866 gen_helper_set_alt_mode();
1867 gen_helper_ldl_data(cpu_ir[ra], addr);
1868 gen_helper_restore_mode();
1869 break;
1870 case 0xF:
1871 /* Quadword virtual access with alternate access mode and
1872 * protection checks (hw_ldq/wa)
1874 gen_helper_set_alt_mode();
1875 gen_helper_ldq_data(cpu_ir[ra], addr);
1876 gen_helper_restore_mode();
1877 break;
1879 tcg_temp_free(addr);
1881 break;
1882 #endif
1883 case 0x1C:
1884 switch (fn7) {
1885 case 0x00:
1886 /* SEXTB */
1887 if (!(ctx->amask & AMASK_BWX))
1888 goto invalid_opc;
1889 if (likely(rc != 31)) {
1890 if (islit)
1891 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
1892 else
1893 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
1895 break;
1896 case 0x01:
1897 /* SEXTW */
1898 if (!(ctx->amask & AMASK_BWX))
1899 goto invalid_opc;
1900 if (likely(rc != 31)) {
1901 if (islit)
1902 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
1903 else
1904 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
1906 break;
1907 case 0x30:
1908 /* CTPOP */
1909 if (!(ctx->amask & AMASK_CIX))
1910 goto invalid_opc;
1911 if (likely(rc != 31)) {
1912 if (islit)
1913 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
1914 else
1915 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
1917 break;
1918 case 0x31:
1919 /* PERR */
1920 if (!(ctx->amask & AMASK_MVI))
1921 goto invalid_opc;
1922 /* XXX: TODO */
1923 goto invalid_opc;
1924 break;
1925 case 0x32:
1926 /* CTLZ */
1927 if (!(ctx->amask & AMASK_CIX))
1928 goto invalid_opc;
1929 if (likely(rc != 31)) {
1930 if (islit)
1931 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
1932 else
1933 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
1935 break;
1936 case 0x33:
1937 /* CTTZ */
1938 if (!(ctx->amask & AMASK_CIX))
1939 goto invalid_opc;
1940 if (likely(rc != 31)) {
1941 if (islit)
1942 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
1943 else
1944 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
1946 break;
1947 case 0x34:
1948 /* UNPKBW */
1949 if (!(ctx->amask & AMASK_MVI))
1950 goto invalid_opc;
1951 /* XXX: TODO */
1952 goto invalid_opc;
1953 break;
1954 case 0x35:
1955 /* UNPKWL */
1956 if (!(ctx->amask & AMASK_MVI))
1957 goto invalid_opc;
1958 /* XXX: TODO */
1959 goto invalid_opc;
1960 break;
1961 case 0x36:
1962 /* PKWB */
1963 if (!(ctx->amask & AMASK_MVI))
1964 goto invalid_opc;
1965 /* XXX: TODO */
1966 goto invalid_opc;
1967 break;
1968 case 0x37:
1969 /* PKLB */
1970 if (!(ctx->amask & AMASK_MVI))
1971 goto invalid_opc;
1972 /* XXX: TODO */
1973 goto invalid_opc;
1974 break;
1975 case 0x38:
1976 /* MINSB8 */
1977 if (!(ctx->amask & AMASK_MVI))
1978 goto invalid_opc;
1979 /* XXX: TODO */
1980 goto invalid_opc;
1981 break;
1982 case 0x39:
1983 /* MINSW4 */
1984 if (!(ctx->amask & AMASK_MVI))
1985 goto invalid_opc;
1986 /* XXX: TODO */
1987 goto invalid_opc;
1988 break;
1989 case 0x3A:
1990 /* MINUB8 */
1991 if (!(ctx->amask & AMASK_MVI))
1992 goto invalid_opc;
1993 /* XXX: TODO */
1994 goto invalid_opc;
1995 break;
1996 case 0x3B:
1997 /* MINUW4 */
1998 if (!(ctx->amask & AMASK_MVI))
1999 goto invalid_opc;
2000 /* XXX: TODO */
2001 goto invalid_opc;
2002 break;
2003 case 0x3C:
2004 /* MAXUB8 */
2005 if (!(ctx->amask & AMASK_MVI))
2006 goto invalid_opc;
2007 /* XXX: TODO */
2008 goto invalid_opc;
2009 break;
2010 case 0x3D:
2011 /* MAXUW4 */
2012 if (!(ctx->amask & AMASK_MVI))
2013 goto invalid_opc;
2014 /* XXX: TODO */
2015 goto invalid_opc;
2016 break;
2017 case 0x3E:
2018 /* MAXSB8 */
2019 if (!(ctx->amask & AMASK_MVI))
2020 goto invalid_opc;
2021 /* XXX: TODO */
2022 goto invalid_opc;
2023 break;
2024 case 0x3F:
2025 /* MAXSW4 */
2026 if (!(ctx->amask & AMASK_MVI))
2027 goto invalid_opc;
2028 /* XXX: TODO */
2029 goto invalid_opc;
2030 break;
2031 case 0x70:
2032 /* FTOIT */
2033 if (!(ctx->amask & AMASK_FIX))
2034 goto invalid_opc;
2035 if (likely(rc != 31)) {
2036 if (ra != 31)
2037 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2038 else
2039 tcg_gen_movi_i64(cpu_ir[rc], 0);
2041 break;
2042 case 0x78:
2043 /* FTOIS */
2044 if (!(ctx->amask & AMASK_FIX))
2045 goto invalid_opc;
2046 if (rc != 31) {
2047 TCGv_i32 tmp1 = tcg_temp_new_i32();
2048 if (ra != 31)
2049 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2050 else {
2051 TCGv tmp2 = tcg_const_i64(0);
2052 gen_helper_s_to_memory(tmp1, tmp2);
2053 tcg_temp_free(tmp2);
2055 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2056 tcg_temp_free_i32(tmp1);
2058 break;
2059 default:
2060 goto invalid_opc;
2062 break;
2063 case 0x1D:
2064 /* HW_MTPR (PALcode) */
2065 #if defined (CONFIG_USER_ONLY)
2066 goto invalid_opc;
2067 #else
2068 if (!ctx->pal_mode)
2069 goto invalid_opc;
2070 else {
2071 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2072 if (ra != 31)
2073 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2074 else {
2075 TCGv tmp2 = tcg_const_i64(0);
2076 gen_helper_mtpr(tmp1, tmp2);
2077 tcg_temp_free(tmp2);
2079 tcg_temp_free(tmp1);
2080 ret = 2;
2082 break;
2083 #endif
2084 case 0x1E:
2085 /* HW_REI (PALcode) */
2086 #if defined (CONFIG_USER_ONLY)
2087 goto invalid_opc;
2088 #else
2089 if (!ctx->pal_mode)
2090 goto invalid_opc;
2091 if (rb == 31) {
2092 /* "Old" alpha */
2093 gen_helper_hw_rei();
2094 } else {
2095 TCGv tmp;
2097 if (ra != 31) {
2098 tmp = tcg_temp_new();
2099 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2100 } else
2101 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2102 gen_helper_hw_ret(tmp);
2103 tcg_temp_free(tmp);
2105 ret = 2;
2106 break;
2107 #endif
2108 case 0x1F:
2109 /* HW_ST (PALcode) */
2110 #if defined (CONFIG_USER_ONLY)
2111 goto invalid_opc;
2112 #else
2113 if (!ctx->pal_mode)
2114 goto invalid_opc;
2115 else {
2116 TCGv addr, val;
2117 addr = tcg_temp_new();
2118 if (rb != 31)
2119 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2120 else
2121 tcg_gen_movi_i64(addr, disp12);
2122 if (ra != 31)
2123 val = cpu_ir[ra];
2124 else {
2125 val = tcg_temp_new();
2126 tcg_gen_movi_i64(val, 0);
2128 switch ((insn >> 12) & 0xF) {
2129 case 0x0:
2130 /* Longword physical access */
2131 gen_helper_stl_raw(val, addr);
2132 break;
2133 case 0x1:
2134 /* Quadword physical access */
2135 gen_helper_stq_raw(val, addr);
2136 break;
2137 case 0x2:
2138 /* Longword physical access with lock */
2139 gen_helper_stl_c_raw(val, val, addr);
2140 break;
2141 case 0x3:
2142 /* Quadword physical access with lock */
2143 gen_helper_stq_c_raw(val, val, addr);
2144 break;
2145 case 0x4:
2146 /* Longword virtual access */
2147 gen_helper_st_virt_to_phys(addr, addr);
2148 gen_helper_stl_raw(val, addr);
2149 break;
2150 case 0x5:
2151 /* Quadword virtual access */
2152 gen_helper_st_virt_to_phys(addr, addr);
2153 gen_helper_stq_raw(val, addr);
2154 break;
2155 case 0x6:
2156 /* Invalid */
2157 goto invalid_opc;
2158 case 0x7:
2159 /* Invalid */
2160 goto invalid_opc;
2161 case 0x8:
2162 /* Invalid */
2163 goto invalid_opc;
2164 case 0x9:
2165 /* Invalid */
2166 goto invalid_opc;
2167 case 0xA:
2168 /* Invalid */
2169 goto invalid_opc;
2170 case 0xB:
2171 /* Invalid */
2172 goto invalid_opc;
2173 case 0xC:
2174 /* Longword virtual access with alternate access mode */
2175 gen_helper_set_alt_mode();
2176 gen_helper_st_virt_to_phys(addr, addr);
2177 gen_helper_stl_raw(val, addr);
2178 gen_helper_restore_mode();
2179 break;
2180 case 0xD:
2181 /* Quadword virtual access with alternate access mode */
2182 gen_helper_set_alt_mode();
2183 gen_helper_st_virt_to_phys(addr, addr);
2184 gen_helper_stl_raw(val, addr);
2185 gen_helper_restore_mode();
2186 break;
2187 case 0xE:
2188 /* Invalid */
2189 goto invalid_opc;
2190 case 0xF:
2191 /* Invalid */
2192 goto invalid_opc;
2194 if (ra == 31)
2195 tcg_temp_free(val);
2196 tcg_temp_free(addr);
2198 break;
2199 #endif
2200 case 0x20:
2201 /* LDF */
2202 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2203 break;
2204 case 0x21:
2205 /* LDG */
2206 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2207 break;
2208 case 0x22:
2209 /* LDS */
2210 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2211 break;
2212 case 0x23:
2213 /* LDT */
2214 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2215 break;
2216 case 0x24:
2217 /* STF */
2218 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2219 break;
2220 case 0x25:
2221 /* STG */
2222 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2223 break;
2224 case 0x26:
2225 /* STS */
2226 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2227 break;
2228 case 0x27:
2229 /* STT */
2230 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2231 break;
2232 case 0x28:
2233 /* LDL */
2234 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2235 break;
2236 case 0x29:
2237 /* LDQ */
2238 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2239 break;
2240 case 0x2A:
2241 /* LDL_L */
2242 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2243 break;
2244 case 0x2B:
2245 /* LDQ_L */
2246 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2247 break;
2248 case 0x2C:
2249 /* STL */
2250 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2251 break;
2252 case 0x2D:
2253 /* STQ */
2254 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2255 break;
2256 case 0x2E:
2257 /* STL_C */
2258 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2259 break;
2260 case 0x2F:
2261 /* STQ_C */
2262 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2263 break;
2264 case 0x30:
2265 /* BR */
2266 if (ra != 31)
2267 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2268 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2269 ret = 1;
2270 break;
2271 case 0x31: /* FBEQ */
2272 case 0x32: /* FBLT */
2273 case 0x33: /* FBLE */
2274 gen_fbcond(ctx, opc, ra, disp16);
2275 ret = 1;
2276 break;
2277 case 0x34:
2278 /* BSR */
2279 if (ra != 31)
2280 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2281 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2282 ret = 1;
2283 break;
2284 case 0x35: /* FBNE */
2285 case 0x36: /* FBGE */
2286 case 0x37: /* FBGT */
2287 gen_fbcond(ctx, opc, ra, disp16);
2288 ret = 1;
2289 break;
2290 case 0x38:
2291 /* BLBC */
2292 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2293 ret = 1;
2294 break;
2295 case 0x39:
2296 /* BEQ */
2297 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2298 ret = 1;
2299 break;
2300 case 0x3A:
2301 /* BLT */
2302 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2303 ret = 1;
2304 break;
2305 case 0x3B:
2306 /* BLE */
2307 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2308 ret = 1;
2309 break;
2310 case 0x3C:
2311 /* BLBS */
2312 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2313 ret = 1;
2314 break;
2315 case 0x3D:
2316 /* BNE */
2317 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2318 ret = 1;
2319 break;
2320 case 0x3E:
2321 /* BGE */
2322 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2323 ret = 1;
2324 break;
2325 case 0x3F:
2326 /* BGT */
2327 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2328 ret = 1;
2329 break;
2330 invalid_opc:
2331 gen_invalid(ctx);
2332 ret = 3;
2333 break;
2336 return ret;
2339 static always_inline void gen_intermediate_code_internal (CPUState *env,
2340 TranslationBlock *tb,
2341 int search_pc)
2343 #if defined ALPHA_DEBUG_DISAS
2344 static int insn_count;
2345 #endif
2346 DisasContext ctx, *ctxp = &ctx;
2347 target_ulong pc_start;
2348 uint32_t insn;
2349 uint16_t *gen_opc_end;
2350 CPUBreakpoint *bp;
2351 int j, lj = -1;
2352 int ret;
2353 int num_insns;
2354 int max_insns;
2356 pc_start = tb->pc;
2357 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2358 ctx.pc = pc_start;
2359 ctx.amask = env->amask;
2360 ctx.env = env;
2361 #if defined (CONFIG_USER_ONLY)
2362 ctx.mem_idx = 0;
2363 #else
2364 ctx.mem_idx = ((env->ps >> 3) & 3);
2365 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2366 #endif
2367 num_insns = 0;
2368 max_insns = tb->cflags & CF_COUNT_MASK;
2369 if (max_insns == 0)
2370 max_insns = CF_COUNT_MASK;
2372 gen_icount_start();
2373 for (ret = 0; ret == 0;) {
2374 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
2375 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
2376 if (bp->pc == ctx.pc) {
2377 gen_excp(&ctx, EXCP_DEBUG, 0);
2378 break;
2382 if (search_pc) {
2383 j = gen_opc_ptr - gen_opc_buf;
2384 if (lj < j) {
2385 lj++;
2386 while (lj < j)
2387 gen_opc_instr_start[lj++] = 0;
2389 gen_opc_pc[lj] = ctx.pc;
2390 gen_opc_instr_start[lj] = 1;
2391 gen_opc_icount[lj] = num_insns;
2393 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2394 gen_io_start();
2395 #if defined ALPHA_DEBUG_DISAS
2396 insn_count++;
2397 LOG_DISAS("pc " TARGET_FMT_lx " mem_idx %d\n",
2398 ctx.pc, ctx.mem_idx);
2399 #endif
2400 insn = ldl_code(ctx.pc);
2401 #if defined ALPHA_DEBUG_DISAS
2402 insn_count++;
2403 LOG_DISAS("opcode %08x %d\n", insn, insn_count);
2404 #endif
2405 num_insns++;
2406 ctx.pc += 4;
2407 ret = translate_one(ctxp, insn);
2408 if (ret != 0)
2409 break;
2410 /* if we reach a page boundary or are single stepping, stop
2411 * generation
2413 if (env->singlestep_enabled) {
2414 gen_excp(&ctx, EXCP_DEBUG, 0);
2415 break;
2418 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2419 break;
2421 if (gen_opc_ptr >= gen_opc_end)
2422 break;
2424 if (num_insns >= max_insns)
2425 break;
2427 if (singlestep) {
2428 break;
2431 if (ret != 1 && ret != 3) {
2432 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2434 #if defined (DO_TB_FLUSH)
2435 gen_helper_tb_flush();
2436 #endif
2437 if (tb->cflags & CF_LAST_IO)
2438 gen_io_end();
2439 /* Generate the return instruction */
2440 tcg_gen_exit_tb(0);
2441 gen_icount_end(tb, num_insns);
2442 *gen_opc_ptr = INDEX_op_end;
2443 if (search_pc) {
2444 j = gen_opc_ptr - gen_opc_buf;
2445 lj++;
2446 while (lj <= j)
2447 gen_opc_instr_start[lj++] = 0;
2448 } else {
2449 tb->size = ctx.pc - pc_start;
2450 tb->icount = num_insns;
2452 #if defined ALPHA_DEBUG_DISAS
2453 log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
2454 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2455 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2456 log_target_disas(pc_start, ctx.pc - pc_start, 1);
2457 qemu_log("\n");
2459 #endif
2462 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
2464 gen_intermediate_code_internal(env, tb, 0);
2467 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
2469 gen_intermediate_code_internal(env, tb, 1);
2472 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
2474 CPUAlphaState *env;
2475 uint64_t hwpcb;
2477 env = qemu_mallocz(sizeof(CPUAlphaState));
2478 cpu_exec_init(env);
2479 alpha_translate_init();
2480 tlb_flush(env, 1);
2481 /* XXX: should not be hardcoded */
2482 env->implver = IMPLVER_2106x;
2483 env->ps = 0x1F00;
2484 #if defined (CONFIG_USER_ONLY)
2485 env->ps |= 1 << 3;
2486 #endif
2487 pal_init(env);
2488 /* Initialize IPR */
2489 hwpcb = env->ipr[IPR_PCBB];
2490 env->ipr[IPR_ASN] = 0;
2491 env->ipr[IPR_ASTEN] = 0;
2492 env->ipr[IPR_ASTSR] = 0;
2493 env->ipr[IPR_DATFX] = 0;
2494 /* XXX: fix this */
2495 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2496 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2497 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2498 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2499 env->ipr[IPR_FEN] = 0;
2500 env->ipr[IPR_IPL] = 31;
2501 env->ipr[IPR_MCES] = 0;
2502 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
2503 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2504 env->ipr[IPR_SISR] = 0;
2505 env->ipr[IPR_VIRBND] = -1ULL;
2507 qemu_init_vcpu(env);
2508 return env;
2511 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2512 unsigned long searched_pc, int pc_pos, void *puc)
2514 env->pc = gen_opc_pc[pc_pos];