Add phenom CPU descriptor (Alexander Graf)
[qemu/mini2440/sniper_sniper_test.git] / target-alpha / translate.c
blob3affb219836160125c50b83e9013927803abe0b1
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
21 #include <stdint.h>
22 #include <stdlib.h>
23 #include <stdio.h>
25 #include "cpu.h"
26 #include "exec-all.h"
27 #include "disas.h"
28 #include "host-utils.h"
29 #include "tcg-op.h"
30 #include "qemu-common.h"
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
36 /* #define DO_SINGLE_STEP */
37 #define ALPHA_DEBUG_DISAS
38 /* #define DO_TB_FLUSH */
41 #ifdef ALPHA_DEBUG_DISAS
42 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
43 #else
44 # define LOG_DISAS(...) do { } while (0)
45 #endif
47 typedef struct DisasContext DisasContext;
48 struct DisasContext {
49 uint64_t pc;
50 int mem_idx;
51 #if !defined (CONFIG_USER_ONLY)
52 int pal_mode;
53 #endif
54 uint32_t amask;
57 /* global register indexes */
58 static TCGv_ptr cpu_env;
59 static TCGv cpu_ir[31];
60 static TCGv cpu_fir[31];
61 static TCGv cpu_pc;
62 static TCGv cpu_lock;
64 /* register names */
65 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
67 #include "gen-icount.h"
69 static void alpha_translate_init(void)
71 int i;
72 char *p;
73 static int done_init = 0;
75 if (done_init)
76 return;
78 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
80 p = cpu_reg_names;
81 for (i = 0; i < 31; i++) {
82 sprintf(p, "ir%d", i);
83 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
84 offsetof(CPUState, ir[i]), p);
85 p += (i < 10) ? 4 : 5;
87 sprintf(p, "fir%d", i);
88 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
89 offsetof(CPUState, fir[i]), p);
90 p += (i < 10) ? 5 : 6;
93 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
94 offsetof(CPUState, pc), "pc");
96 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
97 offsetof(CPUState, lock), "lock");
99 /* register helpers */
100 #define GEN_HELPER 2
101 #include "helper.h"
103 done_init = 1;
106 static always_inline void gen_excp (DisasContext *ctx,
107 int exception, int error_code)
109 TCGv_i32 tmp1, tmp2;
111 tcg_gen_movi_i64(cpu_pc, ctx->pc);
112 tmp1 = tcg_const_i32(exception);
113 tmp2 = tcg_const_i32(error_code);
114 gen_helper_excp(tmp1, tmp2);
115 tcg_temp_free_i32(tmp2);
116 tcg_temp_free_i32(tmp1);
119 static always_inline void gen_invalid (DisasContext *ctx)
121 gen_excp(ctx, EXCP_OPCDEC, 0);
124 static always_inline void gen_qemu_ldf (TCGv t0, TCGv t1, int flags)
126 TCGv tmp = tcg_temp_new();
127 TCGv_i32 tmp32 = tcg_temp_new_i32();
128 tcg_gen_qemu_ld32u(tmp, t1, flags);
129 tcg_gen_trunc_i64_i32(tmp32, tmp);
130 gen_helper_memory_to_f(t0, tmp32);
131 tcg_temp_free_i32(tmp32);
132 tcg_temp_free(tmp);
135 static always_inline void gen_qemu_ldg (TCGv t0, TCGv t1, int flags)
137 TCGv tmp = tcg_temp_new();
138 tcg_gen_qemu_ld64(tmp, t1, flags);
139 gen_helper_memory_to_g(t0, tmp);
140 tcg_temp_free(tmp);
143 static always_inline void gen_qemu_lds (TCGv t0, TCGv t1, int flags)
145 TCGv tmp = tcg_temp_new();
146 TCGv_i32 tmp32 = tcg_temp_new_i32();
147 tcg_gen_qemu_ld32u(tmp, t1, flags);
148 tcg_gen_trunc_i64_i32(tmp32, tmp);
149 gen_helper_memory_to_s(t0, tmp32);
150 tcg_temp_free_i32(tmp32);
151 tcg_temp_free(tmp);
154 static always_inline void gen_qemu_ldl_l (TCGv t0, TCGv t1, int flags)
156 tcg_gen_mov_i64(cpu_lock, t1);
157 tcg_gen_qemu_ld32s(t0, t1, flags);
160 static always_inline void gen_qemu_ldq_l (TCGv t0, TCGv t1, int flags)
162 tcg_gen_mov_i64(cpu_lock, t1);
163 tcg_gen_qemu_ld64(t0, t1, flags);
166 static always_inline void gen_load_mem (DisasContext *ctx,
167 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, int flags),
168 int ra, int rb, int32_t disp16,
169 int fp, int clear)
171 TCGv addr;
173 if (unlikely(ra == 31))
174 return;
176 addr = tcg_temp_new();
177 if (rb != 31) {
178 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
179 if (clear)
180 tcg_gen_andi_i64(addr, addr, ~0x7);
181 } else {
182 if (clear)
183 disp16 &= ~0x7;
184 tcg_gen_movi_i64(addr, disp16);
186 if (fp)
187 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
188 else
189 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
190 tcg_temp_free(addr);
193 static always_inline void gen_qemu_stf (TCGv t0, TCGv t1, int flags)
195 TCGv_i32 tmp32 = tcg_temp_new_i32();
196 TCGv tmp = tcg_temp_new();
197 gen_helper_f_to_memory(tmp32, t0);
198 tcg_gen_extu_i32_i64(tmp, tmp32);
199 tcg_gen_qemu_st32(tmp, t1, flags);
200 tcg_temp_free(tmp);
201 tcg_temp_free_i32(tmp32);
204 static always_inline void gen_qemu_stg (TCGv t0, TCGv t1, int flags)
206 TCGv tmp = tcg_temp_new();
207 gen_helper_g_to_memory(tmp, t0);
208 tcg_gen_qemu_st64(tmp, t1, flags);
209 tcg_temp_free(tmp);
212 static always_inline void gen_qemu_sts (TCGv t0, TCGv t1, int flags)
214 TCGv_i32 tmp32 = tcg_temp_new_i32();
215 TCGv tmp = tcg_temp_new();
216 gen_helper_s_to_memory(tmp32, t0);
217 tcg_gen_extu_i32_i64(tmp, tmp32);
218 tcg_gen_qemu_st32(tmp, t1, flags);
219 tcg_temp_free(tmp);
220 tcg_temp_free_i32(tmp32);
223 static always_inline void gen_qemu_stl_c (TCGv t0, TCGv t1, int flags)
225 int l1, l2;
227 l1 = gen_new_label();
228 l2 = gen_new_label();
229 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
230 tcg_gen_qemu_st32(t0, t1, flags);
231 tcg_gen_movi_i64(t0, 1);
232 tcg_gen_br(l2);
233 gen_set_label(l1);
234 tcg_gen_movi_i64(t0, 0);
235 gen_set_label(l2);
236 tcg_gen_movi_i64(cpu_lock, -1);
239 static always_inline void gen_qemu_stq_c (TCGv t0, TCGv t1, int flags)
241 int l1, l2;
243 l1 = gen_new_label();
244 l2 = gen_new_label();
245 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
246 tcg_gen_qemu_st64(t0, t1, flags);
247 tcg_gen_movi_i64(t0, 1);
248 tcg_gen_br(l2);
249 gen_set_label(l1);
250 tcg_gen_movi_i64(t0, 0);
251 gen_set_label(l2);
252 tcg_gen_movi_i64(cpu_lock, -1);
255 static always_inline void gen_store_mem (DisasContext *ctx,
256 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, int flags),
257 int ra, int rb, int32_t disp16,
258 int fp, int clear, int local)
260 TCGv addr;
261 if (local)
262 addr = tcg_temp_local_new();
263 else
264 addr = tcg_temp_new();
265 if (rb != 31) {
266 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
267 if (clear)
268 tcg_gen_andi_i64(addr, addr, ~0x7);
269 } else {
270 if (clear)
271 disp16 &= ~0x7;
272 tcg_gen_movi_i64(addr, disp16);
274 if (ra != 31) {
275 if (fp)
276 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
277 else
278 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
279 } else {
280 TCGv zero;
281 if (local)
282 zero = tcg_const_local_i64(0);
283 else
284 zero = tcg_const_i64(0);
285 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
286 tcg_temp_free(zero);
288 tcg_temp_free(addr);
291 static always_inline void gen_bcond (DisasContext *ctx,
292 TCGCond cond,
293 int ra, int32_t disp16, int mask)
295 int l1, l2;
297 l1 = gen_new_label();
298 l2 = gen_new_label();
299 if (likely(ra != 31)) {
300 if (mask) {
301 TCGv tmp = tcg_temp_new();
302 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
303 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
304 tcg_temp_free(tmp);
305 } else
306 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, l1);
307 } else {
308 /* Very uncommon case - Do not bother to optimize. */
309 TCGv tmp = tcg_const_i64(0);
310 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
311 tcg_temp_free(tmp);
313 tcg_gen_movi_i64(cpu_pc, ctx->pc);
314 tcg_gen_br(l2);
315 gen_set_label(l1);
316 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2));
317 gen_set_label(l2);
320 static always_inline void gen_fbcond (DisasContext *ctx, int opc,
321 int ra, int32_t disp16)
323 int l1, l2;
324 TCGv tmp;
325 TCGv src;
327 l1 = gen_new_label();
328 l2 = gen_new_label();
329 if (ra != 31) {
330 tmp = tcg_temp_new();
331 src = cpu_fir[ra];
332 } else {
333 tmp = tcg_const_i64(0);
334 src = tmp;
336 switch (opc) {
337 case 0x31: /* FBEQ */
338 gen_helper_cmpfeq(tmp, src);
339 break;
340 case 0x32: /* FBLT */
341 gen_helper_cmpflt(tmp, src);
342 break;
343 case 0x33: /* FBLE */
344 gen_helper_cmpfle(tmp, src);
345 break;
346 case 0x35: /* FBNE */
347 gen_helper_cmpfne(tmp, src);
348 break;
349 case 0x36: /* FBGE */
350 gen_helper_cmpfge(tmp, src);
351 break;
352 case 0x37: /* FBGT */
353 gen_helper_cmpfgt(tmp, src);
354 break;
355 default:
356 abort();
358 tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0, l1);
359 tcg_gen_movi_i64(cpu_pc, ctx->pc);
360 tcg_gen_br(l2);
361 gen_set_label(l1);
362 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2));
363 gen_set_label(l2);
366 static always_inline void gen_cmov (TCGCond inv_cond,
367 int ra, int rb, int rc,
368 int islit, uint8_t lit, int mask)
370 int l1;
372 if (unlikely(rc == 31))
373 return;
375 l1 = gen_new_label();
377 if (ra != 31) {
378 if (mask) {
379 TCGv tmp = tcg_temp_new();
380 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
381 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
382 tcg_temp_free(tmp);
383 } else
384 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
385 } else {
386 /* Very uncommon case - Do not bother to optimize. */
387 TCGv tmp = tcg_const_i64(0);
388 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
389 tcg_temp_free(tmp);
392 if (islit)
393 tcg_gen_movi_i64(cpu_ir[rc], lit);
394 else
395 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
396 gen_set_label(l1);
399 #define FARITH2(name) \
400 static always_inline void glue(gen_f, name)(int rb, int rc) \
402 if (unlikely(rc == 31)) \
403 return; \
405 if (rb != 31) \
406 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
407 else { \
408 TCGv tmp = tcg_const_i64(0); \
409 gen_helper_ ## name (cpu_fir[rc], tmp); \
410 tcg_temp_free(tmp); \
413 FARITH2(sqrts)
414 FARITH2(sqrtf)
415 FARITH2(sqrtg)
416 FARITH2(sqrtt)
417 FARITH2(cvtgf)
418 FARITH2(cvtgq)
419 FARITH2(cvtqf)
420 FARITH2(cvtqg)
421 FARITH2(cvtst)
422 FARITH2(cvtts)
423 FARITH2(cvttq)
424 FARITH2(cvtqs)
425 FARITH2(cvtqt)
426 FARITH2(cvtlq)
427 FARITH2(cvtql)
428 FARITH2(cvtqlv)
429 FARITH2(cvtqlsv)
431 #define FARITH3(name) \
432 static always_inline void glue(gen_f, name) (int ra, int rb, int rc) \
434 if (unlikely(rc == 31)) \
435 return; \
437 if (ra != 31) { \
438 if (rb != 31) \
439 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
440 else { \
441 TCGv tmp = tcg_const_i64(0); \
442 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
443 tcg_temp_free(tmp); \
445 } else { \
446 TCGv tmp = tcg_const_i64(0); \
447 if (rb != 31) \
448 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
449 else \
450 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
451 tcg_temp_free(tmp); \
455 FARITH3(addf)
456 FARITH3(subf)
457 FARITH3(mulf)
458 FARITH3(divf)
459 FARITH3(addg)
460 FARITH3(subg)
461 FARITH3(mulg)
462 FARITH3(divg)
463 FARITH3(cmpgeq)
464 FARITH3(cmpglt)
465 FARITH3(cmpgle)
466 FARITH3(adds)
467 FARITH3(subs)
468 FARITH3(muls)
469 FARITH3(divs)
470 FARITH3(addt)
471 FARITH3(subt)
472 FARITH3(mult)
473 FARITH3(divt)
474 FARITH3(cmptun)
475 FARITH3(cmpteq)
476 FARITH3(cmptlt)
477 FARITH3(cmptle)
478 FARITH3(cpys)
479 FARITH3(cpysn)
480 FARITH3(cpyse)
482 #define FCMOV(name) \
483 static always_inline void glue(gen_f, name) (int ra, int rb, int rc) \
485 int l1; \
486 TCGv tmp; \
488 if (unlikely(rc == 31)) \
489 return; \
491 l1 = gen_new_label(); \
492 tmp = tcg_temp_new(); \
493 if (ra != 31) { \
494 tmp = tcg_temp_new(); \
495 gen_helper_ ## name (tmp, cpu_fir[ra]); \
496 } else { \
497 tmp = tcg_const_i64(0); \
498 gen_helper_ ## name (tmp, tmp); \
500 tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \
501 if (rb != 31) \
502 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \
503 else \
504 tcg_gen_movi_i64(cpu_fir[rc], 0); \
505 gen_set_label(l1); \
507 FCMOV(cmpfeq)
508 FCMOV(cmpfne)
509 FCMOV(cmpflt)
510 FCMOV(cmpfge)
511 FCMOV(cmpfle)
512 FCMOV(cmpfgt)
514 /* EXTWH, EXTWH, EXTLH, EXTQH */
515 static always_inline void gen_ext_h(void (*tcg_gen_ext_i64)(TCGv t0, TCGv t1),
516 int ra, int rb, int rc,
517 int islit, uint8_t lit)
519 if (unlikely(rc == 31))
520 return;
522 if (ra != 31) {
523 if (islit) {
524 if (lit != 0)
525 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], 64 - ((lit & 7) * 8));
526 else
527 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[ra]);
528 } else {
529 TCGv tmp1, tmp2;
530 tmp1 = tcg_temp_new();
531 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
532 tcg_gen_shli_i64(tmp1, tmp1, 3);
533 tmp2 = tcg_const_i64(64);
534 tcg_gen_sub_i64(tmp1, tmp2, tmp1);
535 tcg_temp_free(tmp2);
536 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
537 tcg_temp_free(tmp1);
539 if (tcg_gen_ext_i64)
540 tcg_gen_ext_i64(cpu_ir[rc], cpu_ir[rc]);
541 } else
542 tcg_gen_movi_i64(cpu_ir[rc], 0);
545 /* EXTBL, EXTWL, EXTWL, EXTLL, EXTQL */
546 static always_inline void gen_ext_l(void (*tcg_gen_ext_i64)(TCGv t0, TCGv t1),
547 int ra, int rb, int rc,
548 int islit, uint8_t lit)
550 if (unlikely(rc == 31))
551 return;
553 if (ra != 31) {
554 if (islit) {
555 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
556 } else {
557 TCGv tmp = tcg_temp_new();
558 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
559 tcg_gen_shli_i64(tmp, tmp, 3);
560 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
561 tcg_temp_free(tmp);
563 if (tcg_gen_ext_i64)
564 tcg_gen_ext_i64(cpu_ir[rc], cpu_ir[rc]);
565 } else
566 tcg_gen_movi_i64(cpu_ir[rc], 0);
569 /* Code to call arith3 helpers */
570 #define ARITH3(name) \
571 static always_inline void glue(gen_, name) (int ra, int rb, int rc, \
572 int islit, uint8_t lit) \
574 if (unlikely(rc == 31)) \
575 return; \
577 if (ra != 31) { \
578 if (islit) { \
579 TCGv tmp = tcg_const_i64(lit); \
580 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
581 tcg_temp_free(tmp); \
582 } else \
583 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
584 } else { \
585 TCGv tmp1 = tcg_const_i64(0); \
586 if (islit) { \
587 TCGv tmp2 = tcg_const_i64(lit); \
588 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
589 tcg_temp_free(tmp2); \
590 } else \
591 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
592 tcg_temp_free(tmp1); \
595 ARITH3(cmpbge)
596 ARITH3(addlv)
597 ARITH3(sublv)
598 ARITH3(addqv)
599 ARITH3(subqv)
600 ARITH3(mskbl)
601 ARITH3(insbl)
602 ARITH3(mskwl)
603 ARITH3(inswl)
604 ARITH3(mskll)
605 ARITH3(insll)
606 ARITH3(zap)
607 ARITH3(zapnot)
608 ARITH3(mskql)
609 ARITH3(insql)
610 ARITH3(mskwh)
611 ARITH3(inswh)
612 ARITH3(msklh)
613 ARITH3(inslh)
614 ARITH3(mskqh)
615 ARITH3(insqh)
616 ARITH3(umulh)
617 ARITH3(mullv)
618 ARITH3(mulqv)
620 static always_inline void gen_cmp(TCGCond cond,
621 int ra, int rb, int rc,
622 int islit, uint8_t lit)
624 int l1, l2;
625 TCGv tmp;
627 if (unlikely(rc == 31))
628 return;
630 l1 = gen_new_label();
631 l2 = gen_new_label();
633 if (ra != 31) {
634 tmp = tcg_temp_new();
635 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
636 } else
637 tmp = tcg_const_i64(0);
638 if (islit)
639 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
640 else
641 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
643 tcg_gen_movi_i64(cpu_ir[rc], 0);
644 tcg_gen_br(l2);
645 gen_set_label(l1);
646 tcg_gen_movi_i64(cpu_ir[rc], 1);
647 gen_set_label(l2);
650 static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
652 uint32_t palcode;
653 int32_t disp21, disp16, disp12;
654 uint16_t fn11, fn16;
655 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit;
656 uint8_t lit;
657 int ret;
659 /* Decode all instruction fields */
660 opc = insn >> 26;
661 ra = (insn >> 21) & 0x1F;
662 rb = (insn >> 16) & 0x1F;
663 rc = insn & 0x1F;
664 sbz = (insn >> 13) & 0x07;
665 islit = (insn >> 12) & 1;
666 if (rb == 31 && !islit) {
667 islit = 1;
668 lit = 0;
669 } else
670 lit = (insn >> 13) & 0xFF;
671 palcode = insn & 0x03FFFFFF;
672 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
673 disp16 = (int16_t)(insn & 0x0000FFFF);
674 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
675 fn16 = insn & 0x0000FFFF;
676 fn11 = (insn >> 5) & 0x000007FF;
677 fpfn = fn11 & 0x3F;
678 fn7 = (insn >> 5) & 0x0000007F;
679 fn2 = (insn >> 5) & 0x00000003;
680 ret = 0;
681 LOG_DISAS("opc %02x ra %d rb %d rc %d disp16 %04x\n",
682 opc, ra, rb, rc, disp16);
683 switch (opc) {
684 case 0x00:
685 /* CALL_PAL */
686 if (palcode >= 0x80 && palcode < 0xC0) {
687 /* Unprivileged PAL call */
688 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x1F) << 6), 0);
689 #if !defined (CONFIG_USER_ONLY)
690 } else if (palcode < 0x40) {
691 /* Privileged PAL code */
692 if (ctx->mem_idx & 1)
693 goto invalid_opc;
694 else
695 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x1F) << 6), 0);
696 #endif
697 } else {
698 /* Invalid PAL call */
699 goto invalid_opc;
701 ret = 3;
702 break;
703 case 0x01:
704 /* OPC01 */
705 goto invalid_opc;
706 case 0x02:
707 /* OPC02 */
708 goto invalid_opc;
709 case 0x03:
710 /* OPC03 */
711 goto invalid_opc;
712 case 0x04:
713 /* OPC04 */
714 goto invalid_opc;
715 case 0x05:
716 /* OPC05 */
717 goto invalid_opc;
718 case 0x06:
719 /* OPC06 */
720 goto invalid_opc;
721 case 0x07:
722 /* OPC07 */
723 goto invalid_opc;
724 case 0x08:
725 /* LDA */
726 if (likely(ra != 31)) {
727 if (rb != 31)
728 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
729 else
730 tcg_gen_movi_i64(cpu_ir[ra], disp16);
732 break;
733 case 0x09:
734 /* LDAH */
735 if (likely(ra != 31)) {
736 if (rb != 31)
737 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
738 else
739 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
741 break;
742 case 0x0A:
743 /* LDBU */
744 if (!(ctx->amask & AMASK_BWX))
745 goto invalid_opc;
746 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
747 break;
748 case 0x0B:
749 /* LDQ_U */
750 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
751 break;
752 case 0x0C:
753 /* LDWU */
754 if (!(ctx->amask & AMASK_BWX))
755 goto invalid_opc;
756 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 1);
757 break;
758 case 0x0D:
759 /* STW */
760 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
761 break;
762 case 0x0E:
763 /* STB */
764 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
765 break;
766 case 0x0F:
767 /* STQ_U */
768 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
769 break;
770 case 0x10:
771 switch (fn7) {
772 case 0x00:
773 /* ADDL */
774 if (likely(rc != 31)) {
775 if (ra != 31) {
776 if (islit) {
777 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
778 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
779 } else {
780 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
781 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
783 } else {
784 if (islit)
785 tcg_gen_movi_i64(cpu_ir[rc], lit);
786 else
787 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
790 break;
791 case 0x02:
792 /* S4ADDL */
793 if (likely(rc != 31)) {
794 if (ra != 31) {
795 TCGv tmp = tcg_temp_new();
796 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
797 if (islit)
798 tcg_gen_addi_i64(tmp, tmp, lit);
799 else
800 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
801 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
802 tcg_temp_free(tmp);
803 } else {
804 if (islit)
805 tcg_gen_movi_i64(cpu_ir[rc], lit);
806 else
807 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
810 break;
811 case 0x09:
812 /* SUBL */
813 if (likely(rc != 31)) {
814 if (ra != 31) {
815 if (islit)
816 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
817 else
818 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
819 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
820 } else {
821 if (islit)
822 tcg_gen_movi_i64(cpu_ir[rc], -lit);
823 else {
824 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
825 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
828 break;
829 case 0x0B:
830 /* S4SUBL */
831 if (likely(rc != 31)) {
832 if (ra != 31) {
833 TCGv tmp = tcg_temp_new();
834 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
835 if (islit)
836 tcg_gen_subi_i64(tmp, tmp, lit);
837 else
838 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
839 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
840 tcg_temp_free(tmp);
841 } else {
842 if (islit)
843 tcg_gen_movi_i64(cpu_ir[rc], -lit);
844 else {
845 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
846 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
850 break;
851 case 0x0F:
852 /* CMPBGE */
853 gen_cmpbge(ra, rb, rc, islit, lit);
854 break;
855 case 0x12:
856 /* S8ADDL */
857 if (likely(rc != 31)) {
858 if (ra != 31) {
859 TCGv tmp = tcg_temp_new();
860 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
861 if (islit)
862 tcg_gen_addi_i64(tmp, tmp, lit);
863 else
864 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
865 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
866 tcg_temp_free(tmp);
867 } else {
868 if (islit)
869 tcg_gen_movi_i64(cpu_ir[rc], lit);
870 else
871 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
874 break;
875 case 0x1B:
876 /* S8SUBL */
877 if (likely(rc != 31)) {
878 if (ra != 31) {
879 TCGv tmp = tcg_temp_new();
880 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
881 if (islit)
882 tcg_gen_subi_i64(tmp, tmp, lit);
883 else
884 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
885 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
886 tcg_temp_free(tmp);
887 } else {
888 if (islit)
889 tcg_gen_movi_i64(cpu_ir[rc], -lit);
890 else
891 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
892 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
896 break;
897 case 0x1D:
898 /* CMPULT */
899 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
900 break;
901 case 0x20:
902 /* ADDQ */
903 if (likely(rc != 31)) {
904 if (ra != 31) {
905 if (islit)
906 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
907 else
908 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
909 } else {
910 if (islit)
911 tcg_gen_movi_i64(cpu_ir[rc], lit);
912 else
913 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
916 break;
917 case 0x22:
918 /* S4ADDQ */
919 if (likely(rc != 31)) {
920 if (ra != 31) {
921 TCGv tmp = tcg_temp_new();
922 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
923 if (islit)
924 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
925 else
926 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
927 tcg_temp_free(tmp);
928 } else {
929 if (islit)
930 tcg_gen_movi_i64(cpu_ir[rc], lit);
931 else
932 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
935 break;
936 case 0x29:
937 /* SUBQ */
938 if (likely(rc != 31)) {
939 if (ra != 31) {
940 if (islit)
941 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
942 else
943 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
944 } else {
945 if (islit)
946 tcg_gen_movi_i64(cpu_ir[rc], -lit);
947 else
948 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
951 break;
952 case 0x2B:
953 /* S4SUBQ */
954 if (likely(rc != 31)) {
955 if (ra != 31) {
956 TCGv tmp = tcg_temp_new();
957 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
958 if (islit)
959 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
960 else
961 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
962 tcg_temp_free(tmp);
963 } else {
964 if (islit)
965 tcg_gen_movi_i64(cpu_ir[rc], -lit);
966 else
967 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
970 break;
971 case 0x2D:
972 /* CMPEQ */
973 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
974 break;
975 case 0x32:
976 /* S8ADDQ */
977 if (likely(rc != 31)) {
978 if (ra != 31) {
979 TCGv tmp = tcg_temp_new();
980 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
981 if (islit)
982 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
983 else
984 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
985 tcg_temp_free(tmp);
986 } else {
987 if (islit)
988 tcg_gen_movi_i64(cpu_ir[rc], lit);
989 else
990 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
993 break;
994 case 0x3B:
995 /* S8SUBQ */
996 if (likely(rc != 31)) {
997 if (ra != 31) {
998 TCGv tmp = tcg_temp_new();
999 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1000 if (islit)
1001 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1002 else
1003 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1004 tcg_temp_free(tmp);
1005 } else {
1006 if (islit)
1007 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1008 else
1009 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1012 break;
1013 case 0x3D:
1014 /* CMPULE */
1015 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1016 break;
1017 case 0x40:
1018 /* ADDL/V */
1019 gen_addlv(ra, rb, rc, islit, lit);
1020 break;
1021 case 0x49:
1022 /* SUBL/V */
1023 gen_sublv(ra, rb, rc, islit, lit);
1024 break;
1025 case 0x4D:
1026 /* CMPLT */
1027 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1028 break;
1029 case 0x60:
1030 /* ADDQ/V */
1031 gen_addqv(ra, rb, rc, islit, lit);
1032 break;
1033 case 0x69:
1034 /* SUBQ/V */
1035 gen_subqv(ra, rb, rc, islit, lit);
1036 break;
1037 case 0x6D:
1038 /* CMPLE */
1039 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1040 break;
1041 default:
1042 goto invalid_opc;
1044 break;
1045 case 0x11:
1046 switch (fn7) {
1047 case 0x00:
1048 /* AND */
1049 if (likely(rc != 31)) {
1050 if (ra == 31)
1051 tcg_gen_movi_i64(cpu_ir[rc], 0);
1052 else if (islit)
1053 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1054 else
1055 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1057 break;
1058 case 0x08:
1059 /* BIC */
1060 if (likely(rc != 31)) {
1061 if (ra != 31) {
1062 if (islit)
1063 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1064 else
1065 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1066 } else
1067 tcg_gen_movi_i64(cpu_ir[rc], 0);
1069 break;
1070 case 0x14:
1071 /* CMOVLBS */
1072 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1073 break;
1074 case 0x16:
1075 /* CMOVLBC */
1076 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1077 break;
1078 case 0x20:
1079 /* BIS */
1080 if (likely(rc != 31)) {
1081 if (ra != 31) {
1082 if (islit)
1083 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1084 else
1085 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1086 } else {
1087 if (islit)
1088 tcg_gen_movi_i64(cpu_ir[rc], lit);
1089 else
1090 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1093 break;
1094 case 0x24:
1095 /* CMOVEQ */
1096 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1097 break;
1098 case 0x26:
1099 /* CMOVNE */
1100 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1101 break;
1102 case 0x28:
1103 /* ORNOT */
1104 if (likely(rc != 31)) {
1105 if (ra != 31) {
1106 if (islit)
1107 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1108 else
1109 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1110 } else {
1111 if (islit)
1112 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1113 else
1114 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1117 break;
1118 case 0x40:
1119 /* XOR */
1120 if (likely(rc != 31)) {
1121 if (ra != 31) {
1122 if (islit)
1123 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1124 else
1125 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1126 } else {
1127 if (islit)
1128 tcg_gen_movi_i64(cpu_ir[rc], lit);
1129 else
1130 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1133 break;
1134 case 0x44:
1135 /* CMOVLT */
1136 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1137 break;
1138 case 0x46:
1139 /* CMOVGE */
1140 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1141 break;
1142 case 0x48:
1143 /* EQV */
1144 if (likely(rc != 31)) {
1145 if (ra != 31) {
1146 if (islit)
1147 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1148 else
1149 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1150 } else {
1151 if (islit)
1152 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1153 else
1154 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1157 break;
1158 case 0x61:
1159 /* AMASK */
1160 if (likely(rc != 31)) {
1161 if (islit)
1162 tcg_gen_movi_i64(cpu_ir[rc], helper_amask(lit));
1163 else
1164 gen_helper_amask(cpu_ir[rc], cpu_ir[rb]);
1166 break;
1167 case 0x64:
1168 /* CMOVLE */
1169 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1170 break;
1171 case 0x66:
1172 /* CMOVGT */
1173 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1174 break;
1175 case 0x6C:
1176 /* IMPLVER */
1177 if (rc != 31)
1178 gen_helper_load_implver(cpu_ir[rc]);
1179 break;
1180 default:
1181 goto invalid_opc;
1183 break;
1184 case 0x12:
1185 switch (fn7) {
1186 case 0x02:
1187 /* MSKBL */
1188 gen_mskbl(ra, rb, rc, islit, lit);
1189 break;
1190 case 0x06:
1191 /* EXTBL */
1192 gen_ext_l(&tcg_gen_ext8u_i64, ra, rb, rc, islit, lit);
1193 break;
1194 case 0x0B:
1195 /* INSBL */
1196 gen_insbl(ra, rb, rc, islit, lit);
1197 break;
1198 case 0x12:
1199 /* MSKWL */
1200 gen_mskwl(ra, rb, rc, islit, lit);
1201 break;
1202 case 0x16:
1203 /* EXTWL */
1204 gen_ext_l(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit);
1205 break;
1206 case 0x1B:
1207 /* INSWL */
1208 gen_inswl(ra, rb, rc, islit, lit);
1209 break;
1210 case 0x22:
1211 /* MSKLL */
1212 gen_mskll(ra, rb, rc, islit, lit);
1213 break;
1214 case 0x26:
1215 /* EXTLL */
1216 gen_ext_l(&tcg_gen_ext32u_i64, ra, rb, rc, islit, lit);
1217 break;
1218 case 0x2B:
1219 /* INSLL */
1220 gen_insll(ra, rb, rc, islit, lit);
1221 break;
1222 case 0x30:
1223 /* ZAP */
1224 gen_zap(ra, rb, rc, islit, lit);
1225 break;
1226 case 0x31:
1227 /* ZAPNOT */
1228 gen_zapnot(ra, rb, rc, islit, lit);
1229 break;
1230 case 0x32:
1231 /* MSKQL */
1232 gen_mskql(ra, rb, rc, islit, lit);
1233 break;
1234 case 0x34:
1235 /* SRL */
1236 if (likely(rc != 31)) {
1237 if (ra != 31) {
1238 if (islit)
1239 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1240 else {
1241 TCGv shift = tcg_temp_new();
1242 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1243 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1244 tcg_temp_free(shift);
1246 } else
1247 tcg_gen_movi_i64(cpu_ir[rc], 0);
1249 break;
1250 case 0x36:
1251 /* EXTQL */
1252 gen_ext_l(NULL, ra, rb, rc, islit, lit);
1253 break;
1254 case 0x39:
1255 /* SLL */
1256 if (likely(rc != 31)) {
1257 if (ra != 31) {
1258 if (islit)
1259 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1260 else {
1261 TCGv shift = tcg_temp_new();
1262 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1263 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1264 tcg_temp_free(shift);
1266 } else
1267 tcg_gen_movi_i64(cpu_ir[rc], 0);
1269 break;
1270 case 0x3B:
1271 /* INSQL */
1272 gen_insql(ra, rb, rc, islit, lit);
1273 break;
1274 case 0x3C:
1275 /* SRA */
1276 if (likely(rc != 31)) {
1277 if (ra != 31) {
1278 if (islit)
1279 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1280 else {
1281 TCGv shift = tcg_temp_new();
1282 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1283 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1284 tcg_temp_free(shift);
1286 } else
1287 tcg_gen_movi_i64(cpu_ir[rc], 0);
1289 break;
1290 case 0x52:
1291 /* MSKWH */
1292 gen_mskwh(ra, rb, rc, islit, lit);
1293 break;
1294 case 0x57:
1295 /* INSWH */
1296 gen_inswh(ra, rb, rc, islit, lit);
1297 break;
1298 case 0x5A:
1299 /* EXTWH */
1300 gen_ext_h(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit);
1301 break;
1302 case 0x62:
1303 /* MSKLH */
1304 gen_msklh(ra, rb, rc, islit, lit);
1305 break;
1306 case 0x67:
1307 /* INSLH */
1308 gen_inslh(ra, rb, rc, islit, lit);
1309 break;
1310 case 0x6A:
1311 /* EXTLH */
1312 gen_ext_h(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit);
1313 break;
1314 case 0x72:
1315 /* MSKQH */
1316 gen_mskqh(ra, rb, rc, islit, lit);
1317 break;
1318 case 0x77:
1319 /* INSQH */
1320 gen_insqh(ra, rb, rc, islit, lit);
1321 break;
1322 case 0x7A:
1323 /* EXTQH */
1324 gen_ext_h(NULL, ra, rb, rc, islit, lit);
1325 break;
1326 default:
1327 goto invalid_opc;
1329 break;
1330 case 0x13:
1331 switch (fn7) {
1332 case 0x00:
1333 /* MULL */
1334 if (likely(rc != 31)) {
1335 if (ra == 31)
1336 tcg_gen_movi_i64(cpu_ir[rc], 0);
1337 else {
1338 if (islit)
1339 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1340 else
1341 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1342 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1345 break;
1346 case 0x20:
1347 /* MULQ */
1348 if (likely(rc != 31)) {
1349 if (ra == 31)
1350 tcg_gen_movi_i64(cpu_ir[rc], 0);
1351 else if (islit)
1352 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1353 else
1354 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1356 break;
1357 case 0x30:
1358 /* UMULH */
1359 gen_umulh(ra, rb, rc, islit, lit);
1360 break;
1361 case 0x40:
1362 /* MULL/V */
1363 gen_mullv(ra, rb, rc, islit, lit);
1364 break;
1365 case 0x60:
1366 /* MULQ/V */
1367 gen_mulqv(ra, rb, rc, islit, lit);
1368 break;
1369 default:
1370 goto invalid_opc;
1372 break;
1373 case 0x14:
1374 switch (fpfn) { /* f11 & 0x3F */
1375 case 0x04:
1376 /* ITOFS */
1377 if (!(ctx->amask & AMASK_FIX))
1378 goto invalid_opc;
1379 if (likely(rc != 31)) {
1380 if (ra != 31) {
1381 TCGv_i32 tmp = tcg_temp_new_i32();
1382 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1383 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1384 tcg_temp_free_i32(tmp);
1385 } else
1386 tcg_gen_movi_i64(cpu_fir[rc], 0);
1388 break;
1389 case 0x0A:
1390 /* SQRTF */
1391 if (!(ctx->amask & AMASK_FIX))
1392 goto invalid_opc;
1393 gen_fsqrtf(rb, rc);
1394 break;
1395 case 0x0B:
1396 /* SQRTS */
1397 if (!(ctx->amask & AMASK_FIX))
1398 goto invalid_opc;
1399 gen_fsqrts(rb, rc);
1400 break;
1401 case 0x14:
1402 /* ITOFF */
1403 if (!(ctx->amask & AMASK_FIX))
1404 goto invalid_opc;
1405 if (likely(rc != 31)) {
1406 if (ra != 31) {
1407 TCGv_i32 tmp = tcg_temp_new_i32();
1408 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1409 gen_helper_memory_to_f(cpu_fir[rc], tmp);
1410 tcg_temp_free_i32(tmp);
1411 } else
1412 tcg_gen_movi_i64(cpu_fir[rc], 0);
1414 break;
1415 case 0x24:
1416 /* ITOFT */
1417 if (!(ctx->amask & AMASK_FIX))
1418 goto invalid_opc;
1419 if (likely(rc != 31)) {
1420 if (ra != 31)
1421 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
1422 else
1423 tcg_gen_movi_i64(cpu_fir[rc], 0);
1425 break;
1426 case 0x2A:
1427 /* SQRTG */
1428 if (!(ctx->amask & AMASK_FIX))
1429 goto invalid_opc;
1430 gen_fsqrtg(rb, rc);
1431 break;
1432 case 0x02B:
1433 /* SQRTT */
1434 if (!(ctx->amask & AMASK_FIX))
1435 goto invalid_opc;
1436 gen_fsqrtt(rb, rc);
1437 break;
1438 default:
1439 goto invalid_opc;
1441 break;
1442 case 0x15:
1443 /* VAX floating point */
1444 /* XXX: rounding mode and trap are ignored (!) */
1445 switch (fpfn) { /* f11 & 0x3F */
1446 case 0x00:
1447 /* ADDF */
1448 gen_faddf(ra, rb, rc);
1449 break;
1450 case 0x01:
1451 /* SUBF */
1452 gen_fsubf(ra, rb, rc);
1453 break;
1454 case 0x02:
1455 /* MULF */
1456 gen_fmulf(ra, rb, rc);
1457 break;
1458 case 0x03:
1459 /* DIVF */
1460 gen_fdivf(ra, rb, rc);
1461 break;
1462 case 0x1E:
1463 /* CVTDG */
1464 #if 0 // TODO
1465 gen_fcvtdg(rb, rc);
1466 #else
1467 goto invalid_opc;
1468 #endif
1469 break;
1470 case 0x20:
1471 /* ADDG */
1472 gen_faddg(ra, rb, rc);
1473 break;
1474 case 0x21:
1475 /* SUBG */
1476 gen_fsubg(ra, rb, rc);
1477 break;
1478 case 0x22:
1479 /* MULG */
1480 gen_fmulg(ra, rb, rc);
1481 break;
1482 case 0x23:
1483 /* DIVG */
1484 gen_fdivg(ra, rb, rc);
1485 break;
1486 case 0x25:
1487 /* CMPGEQ */
1488 gen_fcmpgeq(ra, rb, rc);
1489 break;
1490 case 0x26:
1491 /* CMPGLT */
1492 gen_fcmpglt(ra, rb, rc);
1493 break;
1494 case 0x27:
1495 /* CMPGLE */
1496 gen_fcmpgle(ra, rb, rc);
1497 break;
1498 case 0x2C:
1499 /* CVTGF */
1500 gen_fcvtgf(rb, rc);
1501 break;
1502 case 0x2D:
1503 /* CVTGD */
1504 #if 0 // TODO
1505 gen_fcvtgd(rb, rc);
1506 #else
1507 goto invalid_opc;
1508 #endif
1509 break;
1510 case 0x2F:
1511 /* CVTGQ */
1512 gen_fcvtgq(rb, rc);
1513 break;
1514 case 0x3C:
1515 /* CVTQF */
1516 gen_fcvtqf(rb, rc);
1517 break;
1518 case 0x3E:
1519 /* CVTQG */
1520 gen_fcvtqg(rb, rc);
1521 break;
1522 default:
1523 goto invalid_opc;
1525 break;
1526 case 0x16:
1527 /* IEEE floating-point */
1528 /* XXX: rounding mode and traps are ignored (!) */
1529 switch (fpfn) { /* f11 & 0x3F */
1530 case 0x00:
1531 /* ADDS */
1532 gen_fadds(ra, rb, rc);
1533 break;
1534 case 0x01:
1535 /* SUBS */
1536 gen_fsubs(ra, rb, rc);
1537 break;
1538 case 0x02:
1539 /* MULS */
1540 gen_fmuls(ra, rb, rc);
1541 break;
1542 case 0x03:
1543 /* DIVS */
1544 gen_fdivs(ra, rb, rc);
1545 break;
1546 case 0x20:
1547 /* ADDT */
1548 gen_faddt(ra, rb, rc);
1549 break;
1550 case 0x21:
1551 /* SUBT */
1552 gen_fsubt(ra, rb, rc);
1553 break;
1554 case 0x22:
1555 /* MULT */
1556 gen_fmult(ra, rb, rc);
1557 break;
1558 case 0x23:
1559 /* DIVT */
1560 gen_fdivt(ra, rb, rc);
1561 break;
1562 case 0x24:
1563 /* CMPTUN */
1564 gen_fcmptun(ra, rb, rc);
1565 break;
1566 case 0x25:
1567 /* CMPTEQ */
1568 gen_fcmpteq(ra, rb, rc);
1569 break;
1570 case 0x26:
1571 /* CMPTLT */
1572 gen_fcmptlt(ra, rb, rc);
1573 break;
1574 case 0x27:
1575 /* CMPTLE */
1576 gen_fcmptle(ra, rb, rc);
1577 break;
1578 case 0x2C:
1579 /* XXX: incorrect */
1580 if (fn11 == 0x2AC || fn11 == 0x6AC) {
1581 /* CVTST */
1582 gen_fcvtst(rb, rc);
1583 } else {
1584 /* CVTTS */
1585 gen_fcvtts(rb, rc);
1587 break;
1588 case 0x2F:
1589 /* CVTTQ */
1590 gen_fcvttq(rb, rc);
1591 break;
1592 case 0x3C:
1593 /* CVTQS */
1594 gen_fcvtqs(rb, rc);
1595 break;
1596 case 0x3E:
1597 /* CVTQT */
1598 gen_fcvtqt(rb, rc);
1599 break;
1600 default:
1601 goto invalid_opc;
1603 break;
1604 case 0x17:
1605 switch (fn11) {
1606 case 0x010:
1607 /* CVTLQ */
1608 gen_fcvtlq(rb, rc);
1609 break;
1610 case 0x020:
1611 if (likely(rc != 31)) {
1612 if (ra == rb)
1613 /* FMOV */
1614 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
1615 else
1616 /* CPYS */
1617 gen_fcpys(ra, rb, rc);
1619 break;
1620 case 0x021:
1621 /* CPYSN */
1622 gen_fcpysn(ra, rb, rc);
1623 break;
1624 case 0x022:
1625 /* CPYSE */
1626 gen_fcpyse(ra, rb, rc);
1627 break;
1628 case 0x024:
1629 /* MT_FPCR */
1630 if (likely(ra != 31))
1631 gen_helper_store_fpcr(cpu_fir[ra]);
1632 else {
1633 TCGv tmp = tcg_const_i64(0);
1634 gen_helper_store_fpcr(tmp);
1635 tcg_temp_free(tmp);
1637 break;
1638 case 0x025:
1639 /* MF_FPCR */
1640 if (likely(ra != 31))
1641 gen_helper_load_fpcr(cpu_fir[ra]);
1642 break;
1643 case 0x02A:
1644 /* FCMOVEQ */
1645 gen_fcmpfeq(ra, rb, rc);
1646 break;
1647 case 0x02B:
1648 /* FCMOVNE */
1649 gen_fcmpfne(ra, rb, rc);
1650 break;
1651 case 0x02C:
1652 /* FCMOVLT */
1653 gen_fcmpflt(ra, rb, rc);
1654 break;
1655 case 0x02D:
1656 /* FCMOVGE */
1657 gen_fcmpfge(ra, rb, rc);
1658 break;
1659 case 0x02E:
1660 /* FCMOVLE */
1661 gen_fcmpfle(ra, rb, rc);
1662 break;
1663 case 0x02F:
1664 /* FCMOVGT */
1665 gen_fcmpfgt(ra, rb, rc);
1666 break;
1667 case 0x030:
1668 /* CVTQL */
1669 gen_fcvtql(rb, rc);
1670 break;
1671 case 0x130:
1672 /* CVTQL/V */
1673 gen_fcvtqlv(rb, rc);
1674 break;
1675 case 0x530:
1676 /* CVTQL/SV */
1677 gen_fcvtqlsv(rb, rc);
1678 break;
1679 default:
1680 goto invalid_opc;
1682 break;
1683 case 0x18:
1684 switch ((uint16_t)disp16) {
1685 case 0x0000:
1686 /* TRAPB */
1687 /* No-op. Just exit from the current tb */
1688 ret = 2;
1689 break;
1690 case 0x0400:
1691 /* EXCB */
1692 /* No-op. Just exit from the current tb */
1693 ret = 2;
1694 break;
1695 case 0x4000:
1696 /* MB */
1697 /* No-op */
1698 break;
1699 case 0x4400:
1700 /* WMB */
1701 /* No-op */
1702 break;
1703 case 0x8000:
1704 /* FETCH */
1705 /* No-op */
1706 break;
1707 case 0xA000:
1708 /* FETCH_M */
1709 /* No-op */
1710 break;
1711 case 0xC000:
1712 /* RPCC */
1713 if (ra != 31)
1714 gen_helper_load_pcc(cpu_ir[ra]);
1715 break;
1716 case 0xE000:
1717 /* RC */
1718 if (ra != 31)
1719 gen_helper_rc(cpu_ir[ra]);
1720 break;
1721 case 0xE800:
1722 /* ECB */
1723 /* XXX: TODO: evict tb cache at address rb */
1724 #if 0
1725 ret = 2;
1726 #else
1727 goto invalid_opc;
1728 #endif
1729 break;
1730 case 0xF000:
1731 /* RS */
1732 if (ra != 31)
1733 gen_helper_rs(cpu_ir[ra]);
1734 break;
1735 case 0xF800:
1736 /* WH64 */
1737 /* No-op */
1738 break;
1739 default:
1740 goto invalid_opc;
1742 break;
1743 case 0x19:
1744 /* HW_MFPR (PALcode) */
1745 #if defined (CONFIG_USER_ONLY)
1746 goto invalid_opc;
1747 #else
1748 if (!ctx->pal_mode)
1749 goto invalid_opc;
1750 if (ra != 31) {
1751 TCGv tmp = tcg_const_i32(insn & 0xFF);
1752 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
1753 tcg_temp_free(tmp);
1755 break;
1756 #endif
1757 case 0x1A:
1758 if (rb != 31)
1759 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
1760 else
1761 tcg_gen_movi_i64(cpu_pc, 0);
1762 if (ra != 31)
1763 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
1764 /* Those four jumps only differ by the branch prediction hint */
1765 switch (fn2) {
1766 case 0x0:
1767 /* JMP */
1768 break;
1769 case 0x1:
1770 /* JSR */
1771 break;
1772 case 0x2:
1773 /* RET */
1774 break;
1775 case 0x3:
1776 /* JSR_COROUTINE */
1777 break;
1779 ret = 1;
1780 break;
1781 case 0x1B:
1782 /* HW_LD (PALcode) */
1783 #if defined (CONFIG_USER_ONLY)
1784 goto invalid_opc;
1785 #else
1786 if (!ctx->pal_mode)
1787 goto invalid_opc;
1788 if (ra != 31) {
1789 TCGv addr = tcg_temp_new();
1790 if (rb != 31)
1791 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
1792 else
1793 tcg_gen_movi_i64(addr, disp12);
1794 switch ((insn >> 12) & 0xF) {
1795 case 0x0:
1796 /* Longword physical access */
1797 gen_helper_ldl_raw(cpu_ir[ra], addr);
1798 break;
1799 case 0x1:
1800 /* Quadword physical access */
1801 gen_helper_ldq_raw(cpu_ir[ra], addr);
1802 break;
1803 case 0x2:
1804 /* Longword physical access with lock */
1805 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
1806 break;
1807 case 0x3:
1808 /* Quadword physical access with lock */
1809 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
1810 break;
1811 case 0x4:
1812 /* Longword virtual PTE fetch */
1813 gen_helper_ldl_kernel(cpu_ir[ra], addr);
1814 break;
1815 case 0x5:
1816 /* Quadword virtual PTE fetch */
1817 gen_helper_ldq_kernel(cpu_ir[ra], addr);
1818 break;
1819 case 0x6:
1820 /* Incpu_ir[ra]id */
1821 goto incpu_ir[ra]id_opc;
1822 case 0x7:
1823 /* Incpu_ir[ra]id */
1824 goto incpu_ir[ra]id_opc;
1825 case 0x8:
1826 /* Longword virtual access */
1827 gen_helper_st_virt_to_phys(addr, addr);
1828 gen_helper_ldl_raw(cpu_ir[ra], addr);
1829 break;
1830 case 0x9:
1831 /* Quadword virtual access */
1832 gen_helper_st_virt_to_phys(addr, addr);
1833 gen_helper_ldq_raw(cpu_ir[ra], addr);
1834 break;
1835 case 0xA:
1836 /* Longword virtual access with protection check */
1837 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, ctx->flags);
1838 break;
1839 case 0xB:
1840 /* Quadword virtual access with protection check */
1841 tcg_gen_qemu_ld64(cpu_ir[ra], addr, ctx->flags);
1842 break;
1843 case 0xC:
1844 /* Longword virtual access with altenate access mode */
1845 gen_helper_set_alt_mode();
1846 gen_helper_st_virt_to_phys(addr, addr);
1847 gen_helper_ldl_raw(cpu_ir[ra], addr);
1848 gen_helper_restore_mode();
1849 break;
1850 case 0xD:
1851 /* Quadword virtual access with altenate access mode */
1852 gen_helper_set_alt_mode();
1853 gen_helper_st_virt_to_phys(addr, addr);
1854 gen_helper_ldq_raw(cpu_ir[ra], addr);
1855 gen_helper_restore_mode();
1856 break;
1857 case 0xE:
1858 /* Longword virtual access with alternate access mode and
1859 * protection checks
1861 gen_helper_set_alt_mode();
1862 gen_helper_ldl_data(cpu_ir[ra], addr);
1863 gen_helper_restore_mode();
1864 break;
1865 case 0xF:
1866 /* Quadword virtual access with alternate access mode and
1867 * protection checks
1869 gen_helper_set_alt_mode();
1870 gen_helper_ldq_data(cpu_ir[ra], addr);
1871 gen_helper_restore_mode();
1872 break;
1874 tcg_temp_free(addr);
1876 break;
1877 #endif
1878 case 0x1C:
1879 switch (fn7) {
1880 case 0x00:
1881 /* SEXTB */
1882 if (!(ctx->amask & AMASK_BWX))
1883 goto invalid_opc;
1884 if (likely(rc != 31)) {
1885 if (islit)
1886 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
1887 else
1888 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
1890 break;
1891 case 0x01:
1892 /* SEXTW */
1893 if (!(ctx->amask & AMASK_BWX))
1894 goto invalid_opc;
1895 if (likely(rc != 31)) {
1896 if (islit)
1897 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
1898 else
1899 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
1901 break;
1902 case 0x30:
1903 /* CTPOP */
1904 if (!(ctx->amask & AMASK_CIX))
1905 goto invalid_opc;
1906 if (likely(rc != 31)) {
1907 if (islit)
1908 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
1909 else
1910 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
1912 break;
1913 case 0x31:
1914 /* PERR */
1915 if (!(ctx->amask & AMASK_MVI))
1916 goto invalid_opc;
1917 /* XXX: TODO */
1918 goto invalid_opc;
1919 break;
1920 case 0x32:
1921 /* CTLZ */
1922 if (!(ctx->amask & AMASK_CIX))
1923 goto invalid_opc;
1924 if (likely(rc != 31)) {
1925 if (islit)
1926 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
1927 else
1928 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
1930 break;
1931 case 0x33:
1932 /* CTTZ */
1933 if (!(ctx->amask & AMASK_CIX))
1934 goto invalid_opc;
1935 if (likely(rc != 31)) {
1936 if (islit)
1937 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
1938 else
1939 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
1941 break;
1942 case 0x34:
1943 /* UNPKBW */
1944 if (!(ctx->amask & AMASK_MVI))
1945 goto invalid_opc;
1946 /* XXX: TODO */
1947 goto invalid_opc;
1948 break;
1949 case 0x35:
1950 /* UNPKWL */
1951 if (!(ctx->amask & AMASK_MVI))
1952 goto invalid_opc;
1953 /* XXX: TODO */
1954 goto invalid_opc;
1955 break;
1956 case 0x36:
1957 /* PKWB */
1958 if (!(ctx->amask & AMASK_MVI))
1959 goto invalid_opc;
1960 /* XXX: TODO */
1961 goto invalid_opc;
1962 break;
1963 case 0x37:
1964 /* PKLB */
1965 if (!(ctx->amask & AMASK_MVI))
1966 goto invalid_opc;
1967 /* XXX: TODO */
1968 goto invalid_opc;
1969 break;
1970 case 0x38:
1971 /* MINSB8 */
1972 if (!(ctx->amask & AMASK_MVI))
1973 goto invalid_opc;
1974 /* XXX: TODO */
1975 goto invalid_opc;
1976 break;
1977 case 0x39:
1978 /* MINSW4 */
1979 if (!(ctx->amask & AMASK_MVI))
1980 goto invalid_opc;
1981 /* XXX: TODO */
1982 goto invalid_opc;
1983 break;
1984 case 0x3A:
1985 /* MINUB8 */
1986 if (!(ctx->amask & AMASK_MVI))
1987 goto invalid_opc;
1988 /* XXX: TODO */
1989 goto invalid_opc;
1990 break;
1991 case 0x3B:
1992 /* MINUW4 */
1993 if (!(ctx->amask & AMASK_MVI))
1994 goto invalid_opc;
1995 /* XXX: TODO */
1996 goto invalid_opc;
1997 break;
1998 case 0x3C:
1999 /* MAXUB8 */
2000 if (!(ctx->amask & AMASK_MVI))
2001 goto invalid_opc;
2002 /* XXX: TODO */
2003 goto invalid_opc;
2004 break;
2005 case 0x3D:
2006 /* MAXUW4 */
2007 if (!(ctx->amask & AMASK_MVI))
2008 goto invalid_opc;
2009 /* XXX: TODO */
2010 goto invalid_opc;
2011 break;
2012 case 0x3E:
2013 /* MAXSB8 */
2014 if (!(ctx->amask & AMASK_MVI))
2015 goto invalid_opc;
2016 /* XXX: TODO */
2017 goto invalid_opc;
2018 break;
2019 case 0x3F:
2020 /* MAXSW4 */
2021 if (!(ctx->amask & AMASK_MVI))
2022 goto invalid_opc;
2023 /* XXX: TODO */
2024 goto invalid_opc;
2025 break;
2026 case 0x70:
2027 /* FTOIT */
2028 if (!(ctx->amask & AMASK_FIX))
2029 goto invalid_opc;
2030 if (likely(rc != 31)) {
2031 if (ra != 31)
2032 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2033 else
2034 tcg_gen_movi_i64(cpu_ir[rc], 0);
2036 break;
2037 case 0x78:
2038 /* FTOIS */
2039 if (!(ctx->amask & AMASK_FIX))
2040 goto invalid_opc;
2041 if (rc != 31) {
2042 TCGv_i32 tmp1 = tcg_temp_new_i32();
2043 if (ra != 31)
2044 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2045 else {
2046 TCGv tmp2 = tcg_const_i64(0);
2047 gen_helper_s_to_memory(tmp1, tmp2);
2048 tcg_temp_free(tmp2);
2050 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2051 tcg_temp_free_i32(tmp1);
2053 break;
2054 default:
2055 goto invalid_opc;
2057 break;
2058 case 0x1D:
2059 /* HW_MTPR (PALcode) */
2060 #if defined (CONFIG_USER_ONLY)
2061 goto invalid_opc;
2062 #else
2063 if (!ctx->pal_mode)
2064 goto invalid_opc;
2065 else {
2066 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2067 if (ra != 31)
2068 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2069 else {
2070 TCGv tmp2 = tcg_const_i64(0);
2071 gen_helper_mtpr(tmp1, tmp2);
2072 tcg_temp_free(tmp2);
2074 tcg_temp_free(tmp1);
2075 ret = 2;
2077 break;
2078 #endif
2079 case 0x1E:
2080 /* HW_REI (PALcode) */
2081 #if defined (CONFIG_USER_ONLY)
2082 goto invalid_opc;
2083 #else
2084 if (!ctx->pal_mode)
2085 goto invalid_opc;
2086 if (rb == 31) {
2087 /* "Old" alpha */
2088 gen_helper_hw_rei();
2089 } else {
2090 TCGv tmp;
2092 if (ra != 31) {
2093 tmp = tcg_temp_new();
2094 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2095 } else
2096 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2097 gen_helper_hw_ret(tmp);
2098 tcg_temp_free(tmp);
2100 ret = 2;
2101 break;
2102 #endif
2103 case 0x1F:
2104 /* HW_ST (PALcode) */
2105 #if defined (CONFIG_USER_ONLY)
2106 goto invalid_opc;
2107 #else
2108 if (!ctx->pal_mode)
2109 goto invalid_opc;
2110 else {
2111 TCGv addr, val;
2112 addr = tcg_temp_new();
2113 if (rb != 31)
2114 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2115 else
2116 tcg_gen_movi_i64(addr, disp12);
2117 if (ra != 31)
2118 val = cpu_ir[ra];
2119 else {
2120 val = tcg_temp_new();
2121 tcg_gen_movi_i64(val, 0);
2123 switch ((insn >> 12) & 0xF) {
2124 case 0x0:
2125 /* Longword physical access */
2126 gen_helper_stl_raw(val, addr);
2127 break;
2128 case 0x1:
2129 /* Quadword physical access */
2130 gen_helper_stq_raw(val, addr);
2131 break;
2132 case 0x2:
2133 /* Longword physical access with lock */
2134 gen_helper_stl_c_raw(val, val, addr);
2135 break;
2136 case 0x3:
2137 /* Quadword physical access with lock */
2138 gen_helper_stq_c_raw(val, val, addr);
2139 break;
2140 case 0x4:
2141 /* Longword virtual access */
2142 gen_helper_st_virt_to_phys(addr, addr);
2143 gen_helper_stl_raw(val, addr);
2144 break;
2145 case 0x5:
2146 /* Quadword virtual access */
2147 gen_helper_st_virt_to_phys(addr, addr);
2148 gen_helper_stq_raw(val, addr);
2149 break;
2150 case 0x6:
2151 /* Invalid */
2152 goto invalid_opc;
2153 case 0x7:
2154 /* Invalid */
2155 goto invalid_opc;
2156 case 0x8:
2157 /* Invalid */
2158 goto invalid_opc;
2159 case 0x9:
2160 /* Invalid */
2161 goto invalid_opc;
2162 case 0xA:
2163 /* Invalid */
2164 goto invalid_opc;
2165 case 0xB:
2166 /* Invalid */
2167 goto invalid_opc;
2168 case 0xC:
2169 /* Longword virtual access with alternate access mode */
2170 gen_helper_set_alt_mode();
2171 gen_helper_st_virt_to_phys(addr, addr);
2172 gen_helper_stl_raw(val, addr);
2173 gen_helper_restore_mode();
2174 break;
2175 case 0xD:
2176 /* Quadword virtual access with alternate access mode */
2177 gen_helper_set_alt_mode();
2178 gen_helper_st_virt_to_phys(addr, addr);
2179 gen_helper_stl_raw(val, addr);
2180 gen_helper_restore_mode();
2181 break;
2182 case 0xE:
2183 /* Invalid */
2184 goto invalid_opc;
2185 case 0xF:
2186 /* Invalid */
2187 goto invalid_opc;
2189 if (ra != 31)
2190 tcg_temp_free(val);
2191 tcg_temp_free(addr);
2193 ret = 2;
2194 break;
2195 #endif
2196 case 0x20:
2197 /* LDF */
2198 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2199 break;
2200 case 0x21:
2201 /* LDG */
2202 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2203 break;
2204 case 0x22:
2205 /* LDS */
2206 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2207 break;
2208 case 0x23:
2209 /* LDT */
2210 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2211 break;
2212 case 0x24:
2213 /* STF */
2214 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2215 break;
2216 case 0x25:
2217 /* STG */
2218 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2219 break;
2220 case 0x26:
2221 /* STS */
2222 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2223 break;
2224 case 0x27:
2225 /* STT */
2226 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2227 break;
2228 case 0x28:
2229 /* LDL */
2230 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2231 break;
2232 case 0x29:
2233 /* LDQ */
2234 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2235 break;
2236 case 0x2A:
2237 /* LDL_L */
2238 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2239 break;
2240 case 0x2B:
2241 /* LDQ_L */
2242 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2243 break;
2244 case 0x2C:
2245 /* STL */
2246 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2247 break;
2248 case 0x2D:
2249 /* STQ */
2250 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2251 break;
2252 case 0x2E:
2253 /* STL_C */
2254 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2255 break;
2256 case 0x2F:
2257 /* STQ_C */
2258 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2259 break;
2260 case 0x30:
2261 /* BR */
2262 if (ra != 31)
2263 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2264 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2265 ret = 1;
2266 break;
2267 case 0x31: /* FBEQ */
2268 case 0x32: /* FBLT */
2269 case 0x33: /* FBLE */
2270 gen_fbcond(ctx, opc, ra, disp16);
2271 ret = 1;
2272 break;
2273 case 0x34:
2274 /* BSR */
2275 if (ra != 31)
2276 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2277 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2278 ret = 1;
2279 break;
2280 case 0x35: /* FBNE */
2281 case 0x36: /* FBGE */
2282 case 0x37: /* FBGT */
2283 gen_fbcond(ctx, opc, ra, disp16);
2284 ret = 1;
2285 break;
2286 case 0x38:
2287 /* BLBC */
2288 gen_bcond(ctx, TCG_COND_EQ, ra, disp16, 1);
2289 ret = 1;
2290 break;
2291 case 0x39:
2292 /* BEQ */
2293 gen_bcond(ctx, TCG_COND_EQ, ra, disp16, 0);
2294 ret = 1;
2295 break;
2296 case 0x3A:
2297 /* BLT */
2298 gen_bcond(ctx, TCG_COND_LT, ra, disp16, 0);
2299 ret = 1;
2300 break;
2301 case 0x3B:
2302 /* BLE */
2303 gen_bcond(ctx, TCG_COND_LE, ra, disp16, 0);
2304 ret = 1;
2305 break;
2306 case 0x3C:
2307 /* BLBS */
2308 gen_bcond(ctx, TCG_COND_NE, ra, disp16, 1);
2309 ret = 1;
2310 break;
2311 case 0x3D:
2312 /* BNE */
2313 gen_bcond(ctx, TCG_COND_NE, ra, disp16, 0);
2314 ret = 1;
2315 break;
2316 case 0x3E:
2317 /* BGE */
2318 gen_bcond(ctx, TCG_COND_GE, ra, disp16, 0);
2319 ret = 1;
2320 break;
2321 case 0x3F:
2322 /* BGT */
2323 gen_bcond(ctx, TCG_COND_GT, ra, disp16, 0);
2324 ret = 1;
2325 break;
2326 invalid_opc:
2327 gen_invalid(ctx);
2328 ret = 3;
2329 break;
2332 return ret;
2335 static always_inline void gen_intermediate_code_internal (CPUState *env,
2336 TranslationBlock *tb,
2337 int search_pc)
2339 #if defined ALPHA_DEBUG_DISAS
2340 static int insn_count;
2341 #endif
2342 DisasContext ctx, *ctxp = &ctx;
2343 target_ulong pc_start;
2344 uint32_t insn;
2345 uint16_t *gen_opc_end;
2346 CPUBreakpoint *bp;
2347 int j, lj = -1;
2348 int ret;
2349 int num_insns;
2350 int max_insns;
2352 pc_start = tb->pc;
2353 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2354 ctx.pc = pc_start;
2355 ctx.amask = env->amask;
2356 #if defined (CONFIG_USER_ONLY)
2357 ctx.mem_idx = 0;
2358 #else
2359 ctx.mem_idx = ((env->ps >> 3) & 3);
2360 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2361 #endif
2362 num_insns = 0;
2363 max_insns = tb->cflags & CF_COUNT_MASK;
2364 if (max_insns == 0)
2365 max_insns = CF_COUNT_MASK;
2367 gen_icount_start();
2368 for (ret = 0; ret == 0;) {
2369 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
2370 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
2371 if (bp->pc == ctx.pc) {
2372 gen_excp(&ctx, EXCP_DEBUG, 0);
2373 break;
2377 if (search_pc) {
2378 j = gen_opc_ptr - gen_opc_buf;
2379 if (lj < j) {
2380 lj++;
2381 while (lj < j)
2382 gen_opc_instr_start[lj++] = 0;
2383 gen_opc_pc[lj] = ctx.pc;
2384 gen_opc_instr_start[lj] = 1;
2385 gen_opc_icount[lj] = num_insns;
2388 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2389 gen_io_start();
2390 #if defined ALPHA_DEBUG_DISAS
2391 insn_count++;
2392 LOG_DISAS("pc " TARGET_FMT_lx " mem_idx %d\n",
2393 ctx.pc, ctx.mem_idx);
2394 #endif
2395 insn = ldl_code(ctx.pc);
2396 #if defined ALPHA_DEBUG_DISAS
2397 insn_count++;
2398 LOG_DISAS("opcode %08x %d\n", insn, insn_count);
2399 #endif
2400 num_insns++;
2401 ctx.pc += 4;
2402 ret = translate_one(ctxp, insn);
2403 if (ret != 0)
2404 break;
2405 /* if we reach a page boundary or are single stepping, stop
2406 * generation
2408 if (((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) ||
2409 num_insns >= max_insns) {
2410 break;
2413 if (env->singlestep_enabled) {
2414 gen_excp(&ctx, EXCP_DEBUG, 0);
2415 break;
2418 #if defined (DO_SINGLE_STEP)
2419 break;
2420 #endif
2422 if (ret != 1 && ret != 3) {
2423 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2425 #if defined (DO_TB_FLUSH)
2426 gen_helper_tb_flush();
2427 #endif
2428 if (tb->cflags & CF_LAST_IO)
2429 gen_io_end();
2430 /* Generate the return instruction */
2431 tcg_gen_exit_tb(0);
2432 gen_icount_end(tb, num_insns);
2433 *gen_opc_ptr = INDEX_op_end;
2434 if (search_pc) {
2435 j = gen_opc_ptr - gen_opc_buf;
2436 lj++;
2437 while (lj <= j)
2438 gen_opc_instr_start[lj++] = 0;
2439 } else {
2440 tb->size = ctx.pc - pc_start;
2441 tb->icount = num_insns;
2443 #if defined ALPHA_DEBUG_DISAS
2444 log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
2445 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2446 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2447 log_target_disas(pc_start, ctx.pc - pc_start, 1);
2448 qemu_log("\n");
2450 #endif
2453 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
2455 gen_intermediate_code_internal(env, tb, 0);
2458 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
2460 gen_intermediate_code_internal(env, tb, 1);
2463 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
2465 CPUAlphaState *env;
2466 uint64_t hwpcb;
2468 env = qemu_mallocz(sizeof(CPUAlphaState));
2469 if (!env)
2470 return NULL;
2471 cpu_exec_init(env);
2472 alpha_translate_init();
2473 tlb_flush(env, 1);
2474 /* XXX: should not be hardcoded */
2475 env->implver = IMPLVER_2106x;
2476 env->ps = 0x1F00;
2477 #if defined (CONFIG_USER_ONLY)
2478 env->ps |= 1 << 3;
2479 #endif
2480 pal_init(env);
2481 /* Initialize IPR */
2482 hwpcb = env->ipr[IPR_PCBB];
2483 env->ipr[IPR_ASN] = 0;
2484 env->ipr[IPR_ASTEN] = 0;
2485 env->ipr[IPR_ASTSR] = 0;
2486 env->ipr[IPR_DATFX] = 0;
2487 /* XXX: fix this */
2488 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2489 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2490 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2491 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2492 env->ipr[IPR_FEN] = 0;
2493 env->ipr[IPR_IPL] = 31;
2494 env->ipr[IPR_MCES] = 0;
2495 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
2496 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2497 env->ipr[IPR_SISR] = 0;
2498 env->ipr[IPR_VIRBND] = -1ULL;
2500 return env;
2503 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2504 unsigned long searched_pc, int pc_pos, void *puc)
2506 env->pc = gen_opc_pc[pc_pos];