baum: use a common prefix for chr callbacks
[qemu/ar7.git] / target / hppa / translate.c
blob4d243f7d3df46d05791387bb2bb0a9e67ebdde76
1 /*
2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
34 typedef struct DisasCond {
35 TCGCond c;
36 TCGv a0, a1;
37 bool a0_is_n;
38 bool a1_is_0;
39 } DisasCond;
41 typedef struct DisasContext {
42 struct TranslationBlock *tb;
43 CPUState *cs;
45 target_ulong iaoq_f;
46 target_ulong iaoq_b;
47 target_ulong iaoq_n;
48 TCGv iaoq_n_var;
50 int ntemps;
51 TCGv temps[8];
53 DisasCond null_cond;
54 TCGLabel *null_lab;
56 bool singlestep_enabled;
57 bool psw_n_nonzero;
58 } DisasContext;
60 /* Return values from translate_one, indicating the state of the TB.
61 Note that zero indicates that we are not exiting the TB. */
63 typedef enum {
64 NO_EXIT,
66 /* We have emitted one or more goto_tb. No fixup required. */
67 EXIT_GOTO_TB,
69 /* We are not using a goto_tb (for whatever reason), but have updated
70 the iaq (for whatever reason), so don't do it again on exit. */
71 EXIT_IAQ_N_UPDATED,
73 /* We are exiting the TB, but have neither emitted a goto_tb, nor
74 updated the iaq for the next instruction to be executed. */
75 EXIT_IAQ_N_STALE,
77 /* We are ending the TB with a noreturn function call, e.g. longjmp.
78 No following code will be executed. */
79 EXIT_NORETURN,
80 } ExitStatus;
82 typedef struct DisasInsn {
83 uint32_t insn, mask;
84 ExitStatus (*trans)(DisasContext *ctx, uint32_t insn,
85 const struct DisasInsn *f);
86 union {
87 void (*f_ttt)(TCGv, TCGv, TCGv);
88 void (*f_weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
89 void (*f_dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
90 void (*f_wew)(TCGv_i32, TCGv_env, TCGv_i32);
91 void (*f_ded)(TCGv_i64, TCGv_env, TCGv_i64);
92 void (*f_wed)(TCGv_i32, TCGv_env, TCGv_i64);
93 void (*f_dew)(TCGv_i64, TCGv_env, TCGv_i32);
95 } DisasInsn;
97 /* global register indexes */
98 static TCGv_env cpu_env;
99 static TCGv cpu_gr[32];
100 static TCGv cpu_iaoq_f;
101 static TCGv cpu_iaoq_b;
102 static TCGv cpu_sar;
103 static TCGv cpu_psw_n;
104 static TCGv cpu_psw_v;
105 static TCGv cpu_psw_cb;
106 static TCGv cpu_psw_cb_msb;
107 static TCGv cpu_cr26;
108 static TCGv cpu_cr27;
110 #include "exec/gen-icount.h"
112 void hppa_translate_init(void)
114 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
116 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
117 static const GlobalVar vars[] = {
118 DEF_VAR(sar),
119 DEF_VAR(cr26),
120 DEF_VAR(cr27),
121 DEF_VAR(psw_n),
122 DEF_VAR(psw_v),
123 DEF_VAR(psw_cb),
124 DEF_VAR(psw_cb_msb),
125 DEF_VAR(iaoq_f),
126 DEF_VAR(iaoq_b),
129 #undef DEF_VAR
131 /* Use the symbolic register names that match the disassembler. */
132 static const char gr_names[32][4] = {
133 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
134 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
135 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
136 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
139 static bool done_init = 0;
140 int i;
142 if (done_init) {
143 return;
145 done_init = 1;
147 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
148 tcg_ctx.tcg_env = cpu_env;
150 TCGV_UNUSED(cpu_gr[0]);
151 for (i = 1; i < 32; i++) {
152 cpu_gr[i] = tcg_global_mem_new(cpu_env,
153 offsetof(CPUHPPAState, gr[i]),
154 gr_names[i]);
157 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
158 const GlobalVar *v = &vars[i];
159 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
163 static DisasCond cond_make_f(void)
165 DisasCond r = { .c = TCG_COND_NEVER };
166 TCGV_UNUSED(r.a0);
167 TCGV_UNUSED(r.a1);
168 return r;
171 static DisasCond cond_make_n(void)
173 DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
174 r.a0 = cpu_psw_n;
175 TCGV_UNUSED(r.a1);
176 return r;
179 static DisasCond cond_make_0(TCGCond c, TCGv a0)
181 DisasCond r = { .c = c, .a1_is_0 = true };
183 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
184 r.a0 = tcg_temp_new();
185 tcg_gen_mov_tl(r.a0, a0);
186 TCGV_UNUSED(r.a1);
188 return r;
191 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
193 DisasCond r = { .c = c };
195 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
196 r.a0 = tcg_temp_new();
197 tcg_gen_mov_tl(r.a0, a0);
198 r.a1 = tcg_temp_new();
199 tcg_gen_mov_tl(r.a1, a1);
201 return r;
204 static void cond_prep(DisasCond *cond)
206 if (cond->a1_is_0) {
207 cond->a1_is_0 = false;
208 cond->a1 = tcg_const_tl(0);
212 static void cond_free(DisasCond *cond)
214 switch (cond->c) {
215 default:
216 if (!cond->a0_is_n) {
217 tcg_temp_free(cond->a0);
219 if (!cond->a1_is_0) {
220 tcg_temp_free(cond->a1);
222 cond->a0_is_n = false;
223 cond->a1_is_0 = false;
224 TCGV_UNUSED(cond->a0);
225 TCGV_UNUSED(cond->a1);
226 /* fallthru */
227 case TCG_COND_ALWAYS:
228 cond->c = TCG_COND_NEVER;
229 break;
230 case TCG_COND_NEVER:
231 break;
235 static TCGv get_temp(DisasContext *ctx)
237 unsigned i = ctx->ntemps++;
238 g_assert(i < ARRAY_SIZE(ctx->temps));
239 return ctx->temps[i] = tcg_temp_new();
242 static TCGv load_const(DisasContext *ctx, target_long v)
244 TCGv t = get_temp(ctx);
245 tcg_gen_movi_tl(t, v);
246 return t;
249 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
251 if (reg == 0) {
252 TCGv t = get_temp(ctx);
253 tcg_gen_movi_tl(t, 0);
254 return t;
255 } else {
256 return cpu_gr[reg];
260 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
262 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
263 return get_temp(ctx);
264 } else {
265 return cpu_gr[reg];
269 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
271 if (ctx->null_cond.c != TCG_COND_NEVER) {
272 cond_prep(&ctx->null_cond);
273 tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
274 ctx->null_cond.a1, dest, t);
275 } else {
276 tcg_gen_mov_tl(dest, t);
280 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
282 if (reg != 0) {
283 save_or_nullify(ctx, cpu_gr[reg], t);
287 #ifdef HOST_WORDS_BIGENDIAN
288 # define HI_OFS 0
289 # define LO_OFS 4
290 #else
291 # define HI_OFS 4
292 # define LO_OFS 0
293 #endif
295 static TCGv_i32 load_frw_i32(unsigned rt)
297 TCGv_i32 ret = tcg_temp_new_i32();
298 tcg_gen_ld_i32(ret, cpu_env,
299 offsetof(CPUHPPAState, fr[rt & 31])
300 + (rt & 32 ? LO_OFS : HI_OFS));
301 return ret;
304 static TCGv_i32 load_frw0_i32(unsigned rt)
306 if (rt == 0) {
307 return tcg_const_i32(0);
308 } else {
309 return load_frw_i32(rt);
313 static TCGv_i64 load_frw0_i64(unsigned rt)
315 if (rt == 0) {
316 return tcg_const_i64(0);
317 } else {
318 TCGv_i64 ret = tcg_temp_new_i64();
319 tcg_gen_ld32u_i64(ret, cpu_env,
320 offsetof(CPUHPPAState, fr[rt & 31])
321 + (rt & 32 ? LO_OFS : HI_OFS));
322 return ret;
326 static void save_frw_i32(unsigned rt, TCGv_i32 val)
328 tcg_gen_st_i32(val, cpu_env,
329 offsetof(CPUHPPAState, fr[rt & 31])
330 + (rt & 32 ? LO_OFS : HI_OFS));
333 #undef HI_OFS
334 #undef LO_OFS
336 static TCGv_i64 load_frd(unsigned rt)
338 TCGv_i64 ret = tcg_temp_new_i64();
339 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
340 return ret;
343 static TCGv_i64 load_frd0(unsigned rt)
345 if (rt == 0) {
346 return tcg_const_i64(0);
347 } else {
348 return load_frd(rt);
352 static void save_frd(unsigned rt, TCGv_i64 val)
354 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
357 /* Skip over the implementation of an insn that has been nullified.
358 Use this when the insn is too complex for a conditional move. */
359 static void nullify_over(DisasContext *ctx)
361 if (ctx->null_cond.c != TCG_COND_NEVER) {
362 /* The always condition should have been handled in the main loop. */
363 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
365 ctx->null_lab = gen_new_label();
366 cond_prep(&ctx->null_cond);
368 /* If we're using PSW[N], copy it to a temp because... */
369 if (ctx->null_cond.a0_is_n) {
370 ctx->null_cond.a0_is_n = false;
371 ctx->null_cond.a0 = tcg_temp_new();
372 tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
374 /* ... we clear it before branching over the implementation,
375 so that (1) it's clear after nullifying this insn and
376 (2) if this insn nullifies the next, PSW[N] is valid. */
377 if (ctx->psw_n_nonzero) {
378 ctx->psw_n_nonzero = false;
379 tcg_gen_movi_tl(cpu_psw_n, 0);
382 tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
383 ctx->null_cond.a1, ctx->null_lab);
384 cond_free(&ctx->null_cond);
388 /* Save the current nullification state to PSW[N]. */
389 static void nullify_save(DisasContext *ctx)
391 if (ctx->null_cond.c == TCG_COND_NEVER) {
392 if (ctx->psw_n_nonzero) {
393 tcg_gen_movi_tl(cpu_psw_n, 0);
395 return;
397 if (!ctx->null_cond.a0_is_n) {
398 cond_prep(&ctx->null_cond);
399 tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
400 ctx->null_cond.a0, ctx->null_cond.a1);
401 ctx->psw_n_nonzero = true;
403 cond_free(&ctx->null_cond);
406 /* Set a PSW[N] to X. The intention is that this is used immediately
407 before a goto_tb/exit_tb, so that there is no fallthru path to other
408 code within the TB. Therefore we do not update psw_n_nonzero. */
409 static void nullify_set(DisasContext *ctx, bool x)
411 if (ctx->psw_n_nonzero || x) {
412 tcg_gen_movi_tl(cpu_psw_n, x);
416 /* Mark the end of an instruction that may have been nullified.
417 This is the pair to nullify_over. */
418 static ExitStatus nullify_end(DisasContext *ctx, ExitStatus status)
420 TCGLabel *null_lab = ctx->null_lab;
422 if (likely(null_lab == NULL)) {
423 /* The current insn wasn't conditional or handled the condition
424 applied to it without a branch, so the (new) setting of
425 NULL_COND can be applied directly to the next insn. */
426 return status;
428 ctx->null_lab = NULL;
430 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
431 /* The next instruction will be unconditional,
432 and NULL_COND already reflects that. */
433 gen_set_label(null_lab);
434 } else {
435 /* The insn that we just executed is itself nullifying the next
436 instruction. Store the condition in the PSW[N] global.
437 We asserted PSW[N] = 0 in nullify_over, so that after the
438 label we have the proper value in place. */
439 nullify_save(ctx);
440 gen_set_label(null_lab);
441 ctx->null_cond = cond_make_n();
444 assert(status != EXIT_GOTO_TB && status != EXIT_IAQ_N_UPDATED);
445 if (status == EXIT_NORETURN) {
446 status = NO_EXIT;
448 return status;
451 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
453 if (unlikely(ival == -1)) {
454 tcg_gen_mov_tl(dest, vval);
455 } else {
456 tcg_gen_movi_tl(dest, ival);
460 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
462 return ctx->iaoq_f + disp + 8;
465 static void gen_excp_1(int exception)
467 TCGv_i32 t = tcg_const_i32(exception);
468 gen_helper_excp(cpu_env, t);
469 tcg_temp_free_i32(t);
472 static ExitStatus gen_excp(DisasContext *ctx, int exception)
474 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
475 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
476 nullify_save(ctx);
477 gen_excp_1(exception);
478 return EXIT_NORETURN;
481 static ExitStatus gen_illegal(DisasContext *ctx)
483 nullify_over(ctx);
484 return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
487 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
489 /* Suppress goto_tb in the case of single-steping and IO. */
490 if ((ctx->tb->cflags & CF_LAST_IO) || ctx->singlestep_enabled) {
491 return false;
493 return true;
496 /* If the next insn is to be nullified, and it's on the same page,
497 and we're not attempting to set a breakpoint on it, then we can
498 totally skip the nullified insn. This avoids creating and
499 executing a TB that merely branches to the next TB. */
500 static bool use_nullify_skip(DisasContext *ctx)
502 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
503 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
506 static void gen_goto_tb(DisasContext *ctx, int which,
507 target_ulong f, target_ulong b)
509 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
510 tcg_gen_goto_tb(which);
511 tcg_gen_movi_tl(cpu_iaoq_f, f);
512 tcg_gen_movi_tl(cpu_iaoq_b, b);
513 tcg_gen_exit_tb((uintptr_t)ctx->tb + which);
514 } else {
515 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
516 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
517 if (ctx->singlestep_enabled) {
518 gen_excp_1(EXCP_DEBUG);
519 } else {
520 tcg_gen_exit_tb(0);
525 /* PA has a habit of taking the LSB of a field and using that as the sign,
526 with the rest of the field becoming the least significant bits. */
527 static target_long low_sextract(uint32_t val, int pos, int len)
529 target_ulong x = -(target_ulong)extract32(val, pos, 1);
530 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
531 return x;
534 static unsigned assemble_rt64(uint32_t insn)
536 unsigned r1 = extract32(insn, 6, 1);
537 unsigned r0 = extract32(insn, 0, 5);
538 return r1 * 32 + r0;
541 static unsigned assemble_ra64(uint32_t insn)
543 unsigned r1 = extract32(insn, 7, 1);
544 unsigned r0 = extract32(insn, 21, 5);
545 return r1 * 32 + r0;
548 static unsigned assemble_rb64(uint32_t insn)
550 unsigned r1 = extract32(insn, 12, 1);
551 unsigned r0 = extract32(insn, 16, 5);
552 return r1 * 32 + r0;
555 static unsigned assemble_rc64(uint32_t insn)
557 unsigned r2 = extract32(insn, 8, 1);
558 unsigned r1 = extract32(insn, 13, 3);
559 unsigned r0 = extract32(insn, 9, 2);
560 return r2 * 32 + r1 * 4 + r0;
563 static target_long assemble_12(uint32_t insn)
565 target_ulong x = -(target_ulong)(insn & 1);
566 x = (x << 1) | extract32(insn, 2, 1);
567 x = (x << 10) | extract32(insn, 3, 10);
568 return x;
571 static target_long assemble_16(uint32_t insn)
573 /* Take the name from PA2.0, which produces a 16-bit number
574 only with wide mode; otherwise a 14-bit number. Since we don't
575 implement wide mode, this is always the 14-bit number. */
576 return low_sextract(insn, 0, 14);
579 static target_long assemble_16a(uint32_t insn)
581 /* Take the name from PA2.0, which produces a 14-bit shifted number
582 only with wide mode; otherwise a 12-bit shifted number. Since we
583 don't implement wide mode, this is always the 12-bit number. */
584 target_ulong x = -(target_ulong)(insn & 1);
585 x = (x << 11) | extract32(insn, 2, 11);
586 return x << 2;
589 static target_long assemble_17(uint32_t insn)
591 target_ulong x = -(target_ulong)(insn & 1);
592 x = (x << 5) | extract32(insn, 16, 5);
593 x = (x << 1) | extract32(insn, 2, 1);
594 x = (x << 10) | extract32(insn, 3, 10);
595 return x << 2;
598 static target_long assemble_21(uint32_t insn)
600 target_ulong x = -(target_ulong)(insn & 1);
601 x = (x << 11) | extract32(insn, 1, 11);
602 x = (x << 2) | extract32(insn, 14, 2);
603 x = (x << 5) | extract32(insn, 16, 5);
604 x = (x << 2) | extract32(insn, 12, 2);
605 return x << 11;
608 static target_long assemble_22(uint32_t insn)
610 target_ulong x = -(target_ulong)(insn & 1);
611 x = (x << 10) | extract32(insn, 16, 10);
612 x = (x << 1) | extract32(insn, 2, 1);
613 x = (x << 10) | extract32(insn, 3, 10);
614 return x << 2;
617 /* The parisc documentation describes only the general interpretation of
618 the conditions, without describing their exact implementation. The
619 interpretations do not stand up well when considering ADD,C and SUB,B.
620 However, considering the Addition, Subtraction and Logical conditions
621 as a whole it would appear that these relations are similar to what
622 a traditional NZCV set of flags would produce. */
624 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
626 DisasCond cond;
627 TCGv tmp;
629 switch (cf >> 1) {
630 case 0: /* Never / TR */
631 cond = cond_make_f();
632 break;
633 case 1: /* = / <> (Z / !Z) */
634 cond = cond_make_0(TCG_COND_EQ, res);
635 break;
636 case 2: /* < / >= (N / !N) */
637 cond = cond_make_0(TCG_COND_LT, res);
638 break;
639 case 3: /* <= / > (N | Z / !N & !Z) */
640 cond = cond_make_0(TCG_COND_LE, res);
641 break;
642 case 4: /* NUV / UV (!C / C) */
643 cond = cond_make_0(TCG_COND_EQ, cb_msb);
644 break;
645 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
646 tmp = tcg_temp_new();
647 tcg_gen_neg_tl(tmp, cb_msb);
648 tcg_gen_and_tl(tmp, tmp, res);
649 cond = cond_make_0(TCG_COND_EQ, tmp);
650 tcg_temp_free(tmp);
651 break;
652 case 6: /* SV / NSV (V / !V) */
653 cond = cond_make_0(TCG_COND_LT, sv);
654 break;
655 case 7: /* OD / EV */
656 tmp = tcg_temp_new();
657 tcg_gen_andi_tl(tmp, res, 1);
658 cond = cond_make_0(TCG_COND_NE, tmp);
659 tcg_temp_free(tmp);
660 break;
661 default:
662 g_assert_not_reached();
664 if (cf & 1) {
665 cond.c = tcg_invert_cond(cond.c);
668 return cond;
671 /* Similar, but for the special case of subtraction without borrow, we
672 can use the inputs directly. This can allow other computation to be
673 deleted as unused. */
675 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
677 DisasCond cond;
679 switch (cf >> 1) {
680 case 1: /* = / <> */
681 cond = cond_make(TCG_COND_EQ, in1, in2);
682 break;
683 case 2: /* < / >= */
684 cond = cond_make(TCG_COND_LT, in1, in2);
685 break;
686 case 3: /* <= / > */
687 cond = cond_make(TCG_COND_LE, in1, in2);
688 break;
689 case 4: /* << / >>= */
690 cond = cond_make(TCG_COND_LTU, in1, in2);
691 break;
692 case 5: /* <<= / >> */
693 cond = cond_make(TCG_COND_LEU, in1, in2);
694 break;
695 default:
696 return do_cond(cf, res, sv, sv);
698 if (cf & 1) {
699 cond.c = tcg_invert_cond(cond.c);
702 return cond;
705 /* Similar, but for logicals, where the carry and overflow bits are not
706 computed, and use of them is undefined. */
708 static DisasCond do_log_cond(unsigned cf, TCGv res)
710 switch (cf >> 1) {
711 case 4: case 5: case 6:
712 cf &= 1;
713 break;
715 return do_cond(cf, res, res, res);
718 /* Similar, but for shift/extract/deposit conditions. */
720 static DisasCond do_sed_cond(unsigned orig, TCGv res)
722 unsigned c, f;
724 /* Convert the compressed condition codes to standard.
725 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
726 4-7 are the reverse of 0-3. */
727 c = orig & 3;
728 if (c == 3) {
729 c = 7;
731 f = (orig & 4) / 4;
733 return do_log_cond(c * 2 + f, res);
736 /* Similar, but for unit conditions. */
738 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
740 DisasCond cond;
741 TCGv tmp, cb;
743 TCGV_UNUSED(cb);
744 if (cf & 8) {
745 /* Since we want to test lots of carry-out bits all at once, do not
746 * do our normal thing and compute carry-in of bit B+1 since that
747 * leaves us with carry bits spread across two words.
749 cb = tcg_temp_new();
750 tmp = tcg_temp_new();
751 tcg_gen_or_tl(cb, in1, in2);
752 tcg_gen_and_tl(tmp, in1, in2);
753 tcg_gen_andc_tl(cb, cb, res);
754 tcg_gen_or_tl(cb, cb, tmp);
755 tcg_temp_free(tmp);
758 switch (cf >> 1) {
759 case 0: /* never / TR */
760 case 1: /* undefined */
761 case 5: /* undefined */
762 cond = cond_make_f();
763 break;
765 case 2: /* SBZ / NBZ */
766 /* See hasless(v,1) from
767 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
769 tmp = tcg_temp_new();
770 tcg_gen_subi_tl(tmp, res, 0x01010101u);
771 tcg_gen_andc_tl(tmp, tmp, res);
772 tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
773 cond = cond_make_0(TCG_COND_NE, tmp);
774 tcg_temp_free(tmp);
775 break;
777 case 3: /* SHZ / NHZ */
778 tmp = tcg_temp_new();
779 tcg_gen_subi_tl(tmp, res, 0x00010001u);
780 tcg_gen_andc_tl(tmp, tmp, res);
781 tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
782 cond = cond_make_0(TCG_COND_NE, tmp);
783 tcg_temp_free(tmp);
784 break;
786 case 4: /* SDC / NDC */
787 tcg_gen_andi_tl(cb, cb, 0x88888888u);
788 cond = cond_make_0(TCG_COND_NE, cb);
789 break;
791 case 6: /* SBC / NBC */
792 tcg_gen_andi_tl(cb, cb, 0x80808080u);
793 cond = cond_make_0(TCG_COND_NE, cb);
794 break;
796 case 7: /* SHC / NHC */
797 tcg_gen_andi_tl(cb, cb, 0x80008000u);
798 cond = cond_make_0(TCG_COND_NE, cb);
799 break;
801 default:
802 g_assert_not_reached();
804 if (cf & 8) {
805 tcg_temp_free(cb);
807 if (cf & 1) {
808 cond.c = tcg_invert_cond(cond.c);
811 return cond;
814 /* Compute signed overflow for addition. */
815 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
817 TCGv sv = get_temp(ctx);
818 TCGv tmp = tcg_temp_new();
820 tcg_gen_xor_tl(sv, res, in1);
821 tcg_gen_xor_tl(tmp, in1, in2);
822 tcg_gen_andc_tl(sv, sv, tmp);
823 tcg_temp_free(tmp);
825 return sv;
828 /* Compute signed overflow for subtraction. */
829 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
831 TCGv sv = get_temp(ctx);
832 TCGv tmp = tcg_temp_new();
834 tcg_gen_xor_tl(sv, res, in1);
835 tcg_gen_xor_tl(tmp, in1, in2);
836 tcg_gen_and_tl(sv, sv, tmp);
837 tcg_temp_free(tmp);
839 return sv;
842 static ExitStatus do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
843 unsigned shift, bool is_l, bool is_tsv, bool is_tc,
844 bool is_c, unsigned cf)
846 TCGv dest, cb, cb_msb, sv, tmp;
847 unsigned c = cf >> 1;
848 DisasCond cond;
850 dest = tcg_temp_new();
851 TCGV_UNUSED(cb);
852 TCGV_UNUSED(cb_msb);
854 if (shift) {
855 tmp = get_temp(ctx);
856 tcg_gen_shli_tl(tmp, in1, shift);
857 in1 = tmp;
860 if (!is_l || c == 4 || c == 5) {
861 TCGv zero = tcg_const_tl(0);
862 cb_msb = get_temp(ctx);
863 tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
864 if (is_c) {
865 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
867 tcg_temp_free(zero);
868 if (!is_l) {
869 cb = get_temp(ctx);
870 tcg_gen_xor_tl(cb, in1, in2);
871 tcg_gen_xor_tl(cb, cb, dest);
873 } else {
874 tcg_gen_add_tl(dest, in1, in2);
875 if (is_c) {
876 tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
880 /* Compute signed overflow if required. */
881 TCGV_UNUSED(sv);
882 if (is_tsv || c == 6) {
883 sv = do_add_sv(ctx, dest, in1, in2);
884 if (is_tsv) {
885 /* ??? Need to include overflow from shift. */
886 gen_helper_tsv(cpu_env, sv);
890 /* Emit any conditional trap before any writeback. */
891 cond = do_cond(cf, dest, cb_msb, sv);
892 if (is_tc) {
893 cond_prep(&cond);
894 tmp = tcg_temp_new();
895 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
896 gen_helper_tcond(cpu_env, tmp);
897 tcg_temp_free(tmp);
900 /* Write back the result. */
901 if (!is_l) {
902 save_or_nullify(ctx, cpu_psw_cb, cb);
903 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
905 save_gpr(ctx, rt, dest);
906 tcg_temp_free(dest);
908 /* Install the new nullification. */
909 cond_free(&ctx->null_cond);
910 ctx->null_cond = cond;
911 return NO_EXIT;
914 static ExitStatus do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
915 bool is_tsv, bool is_b, bool is_tc, unsigned cf)
917 TCGv dest, sv, cb, cb_msb, zero, tmp;
918 unsigned c = cf >> 1;
919 DisasCond cond;
921 dest = tcg_temp_new();
922 cb = tcg_temp_new();
923 cb_msb = tcg_temp_new();
925 zero = tcg_const_tl(0);
926 if (is_b) {
927 /* DEST,C = IN1 + ~IN2 + C. */
928 tcg_gen_not_tl(cb, in2);
929 tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
930 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
931 tcg_gen_xor_tl(cb, cb, in1);
932 tcg_gen_xor_tl(cb, cb, dest);
933 } else {
934 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
935 operations by seeding the high word with 1 and subtracting. */
936 tcg_gen_movi_tl(cb_msb, 1);
937 tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
938 tcg_gen_eqv_tl(cb, in1, in2);
939 tcg_gen_xor_tl(cb, cb, dest);
941 tcg_temp_free(zero);
943 /* Compute signed overflow if required. */
944 TCGV_UNUSED(sv);
945 if (is_tsv || c == 6) {
946 sv = do_sub_sv(ctx, dest, in1, in2);
947 if (is_tsv) {
948 gen_helper_tsv(cpu_env, sv);
952 /* Compute the condition. We cannot use the special case for borrow. */
953 if (!is_b) {
954 cond = do_sub_cond(cf, dest, in1, in2, sv);
955 } else {
956 cond = do_cond(cf, dest, cb_msb, sv);
959 /* Emit any conditional trap before any writeback. */
960 if (is_tc) {
961 cond_prep(&cond);
962 tmp = tcg_temp_new();
963 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
964 gen_helper_tcond(cpu_env, tmp);
965 tcg_temp_free(tmp);
968 /* Write back the result. */
969 save_or_nullify(ctx, cpu_psw_cb, cb);
970 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
971 save_gpr(ctx, rt, dest);
972 tcg_temp_free(dest);
974 /* Install the new nullification. */
975 cond_free(&ctx->null_cond);
976 ctx->null_cond = cond;
977 return NO_EXIT;
980 static ExitStatus do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
981 TCGv in2, unsigned cf)
983 TCGv dest, sv;
984 DisasCond cond;
986 dest = tcg_temp_new();
987 tcg_gen_sub_tl(dest, in1, in2);
989 /* Compute signed overflow if required. */
990 TCGV_UNUSED(sv);
991 if ((cf >> 1) == 6) {
992 sv = do_sub_sv(ctx, dest, in1, in2);
995 /* Form the condition for the compare. */
996 cond = do_sub_cond(cf, dest, in1, in2, sv);
998 /* Clear. */
999 tcg_gen_movi_tl(dest, 0);
1000 save_gpr(ctx, rt, dest);
1001 tcg_temp_free(dest);
1003 /* Install the new nullification. */
1004 cond_free(&ctx->null_cond);
1005 ctx->null_cond = cond;
1006 return NO_EXIT;
1009 static ExitStatus do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
1010 unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
1012 TCGv dest = dest_gpr(ctx, rt);
1014 /* Perform the operation, and writeback. */
1015 fn(dest, in1, in2);
1016 save_gpr(ctx, rt, dest);
1018 /* Install the new nullification. */
1019 cond_free(&ctx->null_cond);
1020 if (cf) {
1021 ctx->null_cond = do_log_cond(cf, dest);
1023 return NO_EXIT;
1026 static ExitStatus do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
1027 TCGv in2, unsigned cf, bool is_tc,
1028 void (*fn)(TCGv, TCGv, TCGv))
1030 TCGv dest;
1031 DisasCond cond;
1033 if (cf == 0) {
1034 dest = dest_gpr(ctx, rt);
1035 fn(dest, in1, in2);
1036 save_gpr(ctx, rt, dest);
1037 cond_free(&ctx->null_cond);
1038 } else {
1039 dest = tcg_temp_new();
1040 fn(dest, in1, in2);
1042 cond = do_unit_cond(cf, dest, in1, in2);
1044 if (is_tc) {
1045 TCGv tmp = tcg_temp_new();
1046 cond_prep(&cond);
1047 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
1048 gen_helper_tcond(cpu_env, tmp);
1049 tcg_temp_free(tmp);
1051 save_gpr(ctx, rt, dest);
1053 cond_free(&ctx->null_cond);
1054 ctx->null_cond = cond;
1056 return NO_EXIT;
1059 /* Emit a memory load. The modify parameter should be
1060 * < 0 for pre-modify,
1061 * > 0 for post-modify,
1062 * = 0 for no base register update.
1064 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1065 unsigned rx, int scale, target_long disp,
1066 int modify, TCGMemOp mop)
1068 TCGv addr, base;
1070 /* Caller uses nullify_over/nullify_end. */
1071 assert(ctx->null_cond.c == TCG_COND_NEVER);
1073 addr = tcg_temp_new();
1074 base = load_gpr(ctx, rb);
1076 /* Note that RX is mutually exclusive with DISP. */
1077 if (rx) {
1078 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1079 tcg_gen_add_tl(addr, addr, base);
1080 } else {
1081 tcg_gen_addi_tl(addr, base, disp);
1084 if (modify == 0) {
1085 tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
1086 } else {
1087 tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
1088 MMU_USER_IDX, mop);
1089 save_gpr(ctx, rb, addr);
1091 tcg_temp_free(addr);
1094 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1095 unsigned rx, int scale, target_long disp,
1096 int modify, TCGMemOp mop)
1098 TCGv addr, base;
1100 /* Caller uses nullify_over/nullify_end. */
1101 assert(ctx->null_cond.c == TCG_COND_NEVER);
1103 addr = tcg_temp_new();
1104 base = load_gpr(ctx, rb);
1106 /* Note that RX is mutually exclusive with DISP. */
1107 if (rx) {
1108 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1109 tcg_gen_add_tl(addr, addr, base);
1110 } else {
1111 tcg_gen_addi_tl(addr, base, disp);
1114 if (modify == 0) {
1115 tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
1116 } else {
1117 tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
1118 MMU_USER_IDX, mop);
1119 save_gpr(ctx, rb, addr);
1121 tcg_temp_free(addr);
1124 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1125 unsigned rx, int scale, target_long disp,
1126 int modify, TCGMemOp mop)
1128 TCGv addr, base;
1130 /* Caller uses nullify_over/nullify_end. */
1131 assert(ctx->null_cond.c == TCG_COND_NEVER);
1133 addr = tcg_temp_new();
1134 base = load_gpr(ctx, rb);
1136 /* Note that RX is mutually exclusive with DISP. */
1137 if (rx) {
1138 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1139 tcg_gen_add_tl(addr, addr, base);
1140 } else {
1141 tcg_gen_addi_tl(addr, base, disp);
1144 tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1146 if (modify != 0) {
1147 save_gpr(ctx, rb, addr);
1149 tcg_temp_free(addr);
1152 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1153 unsigned rx, int scale, target_long disp,
1154 int modify, TCGMemOp mop)
1156 TCGv addr, base;
1158 /* Caller uses nullify_over/nullify_end. */
1159 assert(ctx->null_cond.c == TCG_COND_NEVER);
1161 addr = tcg_temp_new();
1162 base = load_gpr(ctx, rb);
1164 /* Note that RX is mutually exclusive with DISP. */
1165 if (rx) {
1166 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1167 tcg_gen_add_tl(addr, addr, base);
1168 } else {
1169 tcg_gen_addi_tl(addr, base, disp);
1172 tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1174 if (modify != 0) {
1175 save_gpr(ctx, rb, addr);
1177 tcg_temp_free(addr);
1180 #if TARGET_LONG_BITS == 64
1181 #define do_load_tl do_load_64
1182 #define do_store_tl do_store_64
1183 #else
1184 #define do_load_tl do_load_32
1185 #define do_store_tl do_store_32
1186 #endif
1188 static ExitStatus do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1189 unsigned rx, int scale, target_long disp,
1190 int modify, TCGMemOp mop)
1192 TCGv dest;
1194 nullify_over(ctx);
1196 if (modify == 0) {
1197 /* No base register update. */
1198 dest = dest_gpr(ctx, rt);
1199 } else {
1200 /* Make sure if RT == RB, we see the result of the load. */
1201 dest = get_temp(ctx);
1203 do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
1204 save_gpr(ctx, rt, dest);
1206 return nullify_end(ctx, NO_EXIT);
1209 static ExitStatus do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1210 unsigned rx, int scale, target_long disp,
1211 int modify)
1213 TCGv_i32 tmp;
1215 nullify_over(ctx);
1217 tmp = tcg_temp_new_i32();
1218 do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1219 save_frw_i32(rt, tmp);
1220 tcg_temp_free_i32(tmp);
1222 if (rt == 0) {
1223 gen_helper_loaded_fr0(cpu_env);
1226 return nullify_end(ctx, NO_EXIT);
1229 static ExitStatus do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1230 unsigned rx, int scale, target_long disp,
1231 int modify)
1233 TCGv_i64 tmp;
1235 nullify_over(ctx);
1237 tmp = tcg_temp_new_i64();
1238 do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1239 save_frd(rt, tmp);
1240 tcg_temp_free_i64(tmp);
1242 if (rt == 0) {
1243 gen_helper_loaded_fr0(cpu_env);
1246 return nullify_end(ctx, NO_EXIT);
1249 static ExitStatus do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1250 target_long disp, int modify, TCGMemOp mop)
1252 nullify_over(ctx);
1253 do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
1254 return nullify_end(ctx, NO_EXIT);
1257 static ExitStatus do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1258 unsigned rx, int scale, target_long disp,
1259 int modify)
1261 TCGv_i32 tmp;
1263 nullify_over(ctx);
1265 tmp = load_frw_i32(rt);
1266 do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1267 tcg_temp_free_i32(tmp);
1269 return nullify_end(ctx, NO_EXIT);
1272 static ExitStatus do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1273 unsigned rx, int scale, target_long disp,
1274 int modify)
1276 TCGv_i64 tmp;
1278 nullify_over(ctx);
1280 tmp = load_frd(rt);
1281 do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1282 tcg_temp_free_i64(tmp);
1284 return nullify_end(ctx, NO_EXIT);
1287 static ExitStatus do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1288 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1290 TCGv_i32 tmp;
1292 nullify_over(ctx);
1293 tmp = load_frw0_i32(ra);
1295 func(tmp, cpu_env, tmp);
1297 save_frw_i32(rt, tmp);
1298 tcg_temp_free_i32(tmp);
1299 return nullify_end(ctx, NO_EXIT);
1302 static ExitStatus do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1303 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1305 TCGv_i32 dst;
1306 TCGv_i64 src;
1308 nullify_over(ctx);
1309 src = load_frd(ra);
1310 dst = tcg_temp_new_i32();
1312 func(dst, cpu_env, src);
1314 tcg_temp_free_i64(src);
1315 save_frw_i32(rt, dst);
1316 tcg_temp_free_i32(dst);
1317 return nullify_end(ctx, NO_EXIT);
1320 static ExitStatus do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1321 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1323 TCGv_i64 tmp;
1325 nullify_over(ctx);
1326 tmp = load_frd0(ra);
1328 func(tmp, cpu_env, tmp);
1330 save_frd(rt, tmp);
1331 tcg_temp_free_i64(tmp);
1332 return nullify_end(ctx, NO_EXIT);
1335 static ExitStatus do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1336 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1338 TCGv_i32 src;
1339 TCGv_i64 dst;
1341 nullify_over(ctx);
1342 src = load_frw0_i32(ra);
1343 dst = tcg_temp_new_i64();
1345 func(dst, cpu_env, src);
1347 tcg_temp_free_i32(src);
1348 save_frd(rt, dst);
1349 tcg_temp_free_i64(dst);
1350 return nullify_end(ctx, NO_EXIT);
1353 static ExitStatus do_fop_weww(DisasContext *ctx, unsigned rt,
1354 unsigned ra, unsigned rb,
1355 void (*func)(TCGv_i32, TCGv_env,
1356 TCGv_i32, TCGv_i32))
1358 TCGv_i32 a, b;
1360 nullify_over(ctx);
1361 a = load_frw0_i32(ra);
1362 b = load_frw0_i32(rb);
1364 func(a, cpu_env, a, b);
1366 tcg_temp_free_i32(b);
1367 save_frw_i32(rt, a);
1368 tcg_temp_free_i32(a);
1369 return nullify_end(ctx, NO_EXIT);
1372 static ExitStatus do_fop_dedd(DisasContext *ctx, unsigned rt,
1373 unsigned ra, unsigned rb,
1374 void (*func)(TCGv_i64, TCGv_env,
1375 TCGv_i64, TCGv_i64))
1377 TCGv_i64 a, b;
1379 nullify_over(ctx);
1380 a = load_frd0(ra);
1381 b = load_frd0(rb);
1383 func(a, cpu_env, a, b);
1385 tcg_temp_free_i64(b);
1386 save_frd(rt, a);
1387 tcg_temp_free_i64(a);
1388 return nullify_end(ctx, NO_EXIT);
1391 /* Emit an unconditional branch to a direct target, which may or may not
1392 have already had nullification handled. */
1393 static ExitStatus do_dbranch(DisasContext *ctx, target_ulong dest,
1394 unsigned link, bool is_n)
1396 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1397 if (link != 0) {
1398 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1400 ctx->iaoq_n = dest;
1401 if (is_n) {
1402 ctx->null_cond.c = TCG_COND_ALWAYS;
1404 return NO_EXIT;
1405 } else {
1406 nullify_over(ctx);
1408 if (link != 0) {
1409 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1412 if (is_n && use_nullify_skip(ctx)) {
1413 nullify_set(ctx, 0);
1414 gen_goto_tb(ctx, 0, dest, dest + 4);
1415 } else {
1416 nullify_set(ctx, is_n);
1417 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1420 nullify_end(ctx, NO_EXIT);
1422 nullify_set(ctx, 0);
1423 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1424 return EXIT_GOTO_TB;
1428 /* Emit a conditional branch to a direct target. If the branch itself
1429 is nullified, we should have already used nullify_over. */
1430 static ExitStatus do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
1431 DisasCond *cond)
1433 target_ulong dest = iaoq_dest(ctx, disp);
1434 TCGLabel *taken = NULL;
1435 TCGCond c = cond->c;
1436 int which = 0;
1437 bool n;
1439 assert(ctx->null_cond.c == TCG_COND_NEVER);
1441 /* Handle TRUE and NEVER as direct branches. */
1442 if (c == TCG_COND_ALWAYS) {
1443 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1445 if (c == TCG_COND_NEVER) {
1446 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1449 taken = gen_new_label();
1450 cond_prep(cond);
1451 tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
1452 cond_free(cond);
1454 /* Not taken: Condition not satisfied; nullify on backward branches. */
1455 n = is_n && disp < 0;
1456 if (n && use_nullify_skip(ctx)) {
1457 nullify_set(ctx, 0);
1458 gen_goto_tb(ctx, which++, ctx->iaoq_n, ctx->iaoq_n + 4);
1459 } else {
1460 if (!n && ctx->null_lab) {
1461 gen_set_label(ctx->null_lab);
1462 ctx->null_lab = NULL;
1464 nullify_set(ctx, n);
1465 gen_goto_tb(ctx, which++, ctx->iaoq_b, ctx->iaoq_n);
1468 gen_set_label(taken);
1470 /* Taken: Condition satisfied; nullify on forward branches. */
1471 n = is_n && disp >= 0;
1472 if (n && use_nullify_skip(ctx)) {
1473 nullify_set(ctx, 0);
1474 gen_goto_tb(ctx, which++, dest, dest + 4);
1475 } else {
1476 nullify_set(ctx, n);
1477 gen_goto_tb(ctx, which++, ctx->iaoq_b, dest);
1480 /* Not taken: the branch itself was nullified. */
1481 if (ctx->null_lab) {
1482 gen_set_label(ctx->null_lab);
1483 ctx->null_lab = NULL;
1484 if (which < 2) {
1485 nullify_set(ctx, 0);
1486 gen_goto_tb(ctx, which, ctx->iaoq_b, ctx->iaoq_n);
1487 return EXIT_GOTO_TB;
1488 } else {
1489 return EXIT_IAQ_N_STALE;
1491 } else {
1492 return EXIT_GOTO_TB;
1496 /* Emit an unconditional branch to an indirect target. This handles
1497 nullification of the branch itself. */
1498 static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest,
1499 unsigned link, bool is_n)
1501 TCGv a0, a1, next, tmp;
1502 TCGCond c;
1504 assert(ctx->null_lab == NULL);
1506 if (ctx->null_cond.c == TCG_COND_NEVER) {
1507 if (link != 0) {
1508 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1510 next = get_temp(ctx);
1511 tcg_gen_mov_tl(next, dest);
1512 ctx->iaoq_n = -1;
1513 ctx->iaoq_n_var = next;
1514 if (is_n) {
1515 ctx->null_cond.c = TCG_COND_ALWAYS;
1517 } else if (is_n && use_nullify_skip(ctx)) {
1518 /* The (conditional) branch, B, nullifies the next insn, N,
1519 and we're allowed to skip execution N (no single-step or
1520 tracepoint in effect). Since the exit_tb that we must use
1521 for the indirect branch consumes no special resources, we
1522 can (conditionally) skip B and continue execution. */
1523 /* The use_nullify_skip test implies we have a known control path. */
1524 tcg_debug_assert(ctx->iaoq_b != -1);
1525 tcg_debug_assert(ctx->iaoq_n != -1);
1527 /* We do have to handle the non-local temporary, DEST, before
1528 branching. Since IOAQ_F is not really live at this point, we
1529 can simply store DEST optimistically. Similarly with IAOQ_B. */
1530 tcg_gen_mov_tl(cpu_iaoq_f, dest);
1531 tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
1533 nullify_over(ctx);
1534 if (link != 0) {
1535 tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
1537 tcg_gen_exit_tb(0);
1538 return nullify_end(ctx, NO_EXIT);
1539 } else {
1540 cond_prep(&ctx->null_cond);
1541 c = ctx->null_cond.c;
1542 a0 = ctx->null_cond.a0;
1543 a1 = ctx->null_cond.a1;
1545 tmp = tcg_temp_new();
1546 next = get_temp(ctx);
1548 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1549 tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
1550 ctx->iaoq_n = -1;
1551 ctx->iaoq_n_var = next;
1553 if (link != 0) {
1554 tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1557 if (is_n) {
1558 /* The branch nullifies the next insn, which means the state of N
1559 after the branch is the inverse of the state of N that applied
1560 to the branch. */
1561 tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1562 cond_free(&ctx->null_cond);
1563 ctx->null_cond = cond_make_n();
1564 ctx->psw_n_nonzero = true;
1565 } else {
1566 cond_free(&ctx->null_cond);
1570 return NO_EXIT;
1573 /* On Linux, page zero is normally marked execute only + gateway.
1574 Therefore normal read or write is supposed to fail, but specific
1575 offsets have kernel code mapped to raise permissions to implement
1576 system calls. Handling this via an explicit check here, rather
1577 in than the "be disp(sr2,r0)" instruction that probably sent us
1578 here, is the easiest way to handle the branch delay slot on the
1579 aforementioned BE. */
1580 static ExitStatus do_page_zero(DisasContext *ctx)
1582 /* If by some means we get here with PSW[N]=1, that implies that
1583 the B,GATE instruction would be skipped, and we'd fault on the
1584 next insn within the privilaged page. */
1585 switch (ctx->null_cond.c) {
1586 case TCG_COND_NEVER:
1587 break;
1588 case TCG_COND_ALWAYS:
1589 tcg_gen_movi_tl(cpu_psw_n, 0);
1590 goto do_sigill;
1591 default:
1592 /* Since this is always the first (and only) insn within the
1593 TB, we should know the state of PSW[N] from TB->FLAGS. */
1594 g_assert_not_reached();
1597 /* Check that we didn't arrive here via some means that allowed
1598 non-sequential instruction execution. Normally the PSW[B] bit
1599 detects this by disallowing the B,GATE instruction to execute
1600 under such conditions. */
1601 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1602 goto do_sigill;
1605 switch (ctx->iaoq_f) {
1606 case 0x00: /* Null pointer call */
1607 gen_excp_1(EXCP_SIGSEGV);
1608 return EXIT_NORETURN;
1610 case 0xb0: /* LWS */
1611 gen_excp_1(EXCP_SYSCALL_LWS);
1612 return EXIT_NORETURN;
1614 case 0xe0: /* SET_THREAD_POINTER */
1615 tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
1616 tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
1617 tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
1618 return EXIT_IAQ_N_UPDATED;
1620 case 0x100: /* SYSCALL */
1621 gen_excp_1(EXCP_SYSCALL);
1622 return EXIT_NORETURN;
1624 default:
1625 do_sigill:
1626 gen_excp_1(EXCP_SIGILL);
1627 return EXIT_NORETURN;
1631 static ExitStatus trans_nop(DisasContext *ctx, uint32_t insn,
1632 const DisasInsn *di)
1634 cond_free(&ctx->null_cond);
1635 return NO_EXIT;
1638 static ExitStatus trans_break(DisasContext *ctx, uint32_t insn,
1639 const DisasInsn *di)
1641 nullify_over(ctx);
1642 return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG));
1645 static ExitStatus trans_sync(DisasContext *ctx, uint32_t insn,
1646 const DisasInsn *di)
1648 /* No point in nullifying the memory barrier. */
1649 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1651 cond_free(&ctx->null_cond);
1652 return NO_EXIT;
1655 static ExitStatus trans_mfia(DisasContext *ctx, uint32_t insn,
1656 const DisasInsn *di)
1658 unsigned rt = extract32(insn, 0, 5);
1659 TCGv tmp = dest_gpr(ctx, rt);
1660 tcg_gen_movi_tl(tmp, ctx->iaoq_f);
1661 save_gpr(ctx, rt, tmp);
1663 cond_free(&ctx->null_cond);
1664 return NO_EXIT;
1667 static ExitStatus trans_mfsp(DisasContext *ctx, uint32_t insn,
1668 const DisasInsn *di)
1670 unsigned rt = extract32(insn, 0, 5);
1671 TCGv tmp = dest_gpr(ctx, rt);
1673 /* ??? We don't implement space registers. */
1674 tcg_gen_movi_tl(tmp, 0);
1675 save_gpr(ctx, rt, tmp);
1677 cond_free(&ctx->null_cond);
1678 return NO_EXIT;
1681 static ExitStatus trans_mfctl(DisasContext *ctx, uint32_t insn,
1682 const DisasInsn *di)
1684 unsigned rt = extract32(insn, 0, 5);
1685 unsigned ctl = extract32(insn, 21, 5);
1686 TCGv tmp;
1688 switch (ctl) {
1689 case 11: /* SAR */
1690 #ifdef TARGET_HPPA64
1691 if (extract32(insn, 14, 1) == 0) {
1692 /* MFSAR without ,W masks low 5 bits. */
1693 tmp = dest_gpr(ctx, rt);
1694 tcg_gen_andi_tl(tmp, cpu_sar, 31);
1695 save_gpr(ctx, rt, tmp);
1696 break;
1698 #endif
1699 save_gpr(ctx, rt, cpu_sar);
1700 break;
1701 case 16: /* Interval Timer */
1702 tmp = dest_gpr(ctx, rt);
1703 tcg_gen_movi_tl(tmp, 0); /* FIXME */
1704 save_gpr(ctx, rt, tmp);
1705 break;
1706 case 26:
1707 save_gpr(ctx, rt, cpu_cr26);
1708 break;
1709 case 27:
1710 save_gpr(ctx, rt, cpu_cr27);
1711 break;
1712 default:
1713 /* All other control registers are privileged. */
1714 return gen_illegal(ctx);
1717 cond_free(&ctx->null_cond);
1718 return NO_EXIT;
1721 static ExitStatus trans_mtctl(DisasContext *ctx, uint32_t insn,
1722 const DisasInsn *di)
1724 unsigned rin = extract32(insn, 16, 5);
1725 unsigned ctl = extract32(insn, 21, 5);
1726 TCGv tmp;
1728 if (ctl == 11) { /* SAR */
1729 tmp = tcg_temp_new();
1730 tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
1731 save_or_nullify(ctx, cpu_sar, tmp);
1732 tcg_temp_free(tmp);
1733 } else {
1734 /* All other control registers are privileged or read-only. */
1735 return gen_illegal(ctx);
1738 cond_free(&ctx->null_cond);
1739 return NO_EXIT;
1742 static ExitStatus trans_mtsarcm(DisasContext *ctx, uint32_t insn,
1743 const DisasInsn *di)
1745 unsigned rin = extract32(insn, 16, 5);
1746 TCGv tmp = tcg_temp_new();
1748 tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
1749 tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
1750 save_or_nullify(ctx, cpu_sar, tmp);
1751 tcg_temp_free(tmp);
1753 cond_free(&ctx->null_cond);
1754 return NO_EXIT;
1757 static ExitStatus trans_ldsid(DisasContext *ctx, uint32_t insn,
1758 const DisasInsn *di)
1760 unsigned rt = extract32(insn, 0, 5);
1761 TCGv dest = dest_gpr(ctx, rt);
1763 /* Since we don't implement space registers, this returns zero. */
1764 tcg_gen_movi_tl(dest, 0);
1765 save_gpr(ctx, rt, dest);
1767 cond_free(&ctx->null_cond);
1768 return NO_EXIT;
1771 static const DisasInsn table_system[] = {
1772 { 0x00000000u, 0xfc001fe0u, trans_break },
1773 /* We don't implement space register, so MTSP is a nop. */
1774 { 0x00001820u, 0xffe01fffu, trans_nop },
1775 { 0x00001840u, 0xfc00ffffu, trans_mtctl },
1776 { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
1777 { 0x000014a0u, 0xffffffe0u, trans_mfia },
1778 { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
1779 { 0x000008a0u, 0xfc1fffe0u, trans_mfctl },
1780 { 0x00000400u, 0xffffffffu, trans_sync },
1781 { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
1784 static ExitStatus trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
1785 const DisasInsn *di)
1787 unsigned rb = extract32(insn, 21, 5);
1788 unsigned rx = extract32(insn, 16, 5);
1789 TCGv dest = dest_gpr(ctx, rb);
1790 TCGv src1 = load_gpr(ctx, rb);
1791 TCGv src2 = load_gpr(ctx, rx);
1793 /* The only thing we need to do is the base register modification. */
1794 tcg_gen_add_tl(dest, src1, src2);
1795 save_gpr(ctx, rb, dest);
1797 cond_free(&ctx->null_cond);
1798 return NO_EXIT;
1801 static ExitStatus trans_probe(DisasContext *ctx, uint32_t insn,
1802 const DisasInsn *di)
1804 unsigned rt = extract32(insn, 0, 5);
1805 unsigned rb = extract32(insn, 21, 5);
1806 unsigned is_write = extract32(insn, 6, 1);
1807 TCGv dest;
1809 nullify_over(ctx);
1811 /* ??? Do something with priv level operand. */
1812 dest = dest_gpr(ctx, rt);
1813 if (is_write) {
1814 gen_helper_probe_w(dest, load_gpr(ctx, rb));
1815 } else {
1816 gen_helper_probe_r(dest, load_gpr(ctx, rb));
1818 save_gpr(ctx, rt, dest);
1819 return nullify_end(ctx, NO_EXIT);
1822 static const DisasInsn table_mem_mgmt[] = {
1823 { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
1824 { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
1825 { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
1826 { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */
1827 { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
1828 { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */
1829 { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
1830 { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */
1831 { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
1832 { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */
1833 { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
1834 { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */
1835 { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
1836 { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
1837 { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
1840 static ExitStatus trans_add(DisasContext *ctx, uint32_t insn,
1841 const DisasInsn *di)
1843 unsigned r2 = extract32(insn, 21, 5);
1844 unsigned r1 = extract32(insn, 16, 5);
1845 unsigned cf = extract32(insn, 12, 4);
1846 unsigned ext = extract32(insn, 8, 4);
1847 unsigned shift = extract32(insn, 6, 2);
1848 unsigned rt = extract32(insn, 0, 5);
1849 TCGv tcg_r1, tcg_r2;
1850 bool is_c = false;
1851 bool is_l = false;
1852 bool is_tc = false;
1853 bool is_tsv = false;
1854 ExitStatus ret;
1856 switch (ext) {
1857 case 0x6: /* ADD, SHLADD */
1858 break;
1859 case 0xa: /* ADD,L, SHLADD,L */
1860 is_l = true;
1861 break;
1862 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1863 is_tsv = true;
1864 break;
1865 case 0x7: /* ADD,C */
1866 is_c = true;
1867 break;
1868 case 0xf: /* ADD,C,TSV */
1869 is_c = is_tsv = true;
1870 break;
1871 default:
1872 return gen_illegal(ctx);
1875 if (cf) {
1876 nullify_over(ctx);
1878 tcg_r1 = load_gpr(ctx, r1);
1879 tcg_r2 = load_gpr(ctx, r2);
1880 ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
1881 return nullify_end(ctx, ret);
1884 static ExitStatus trans_sub(DisasContext *ctx, uint32_t insn,
1885 const DisasInsn *di)
1887 unsigned r2 = extract32(insn, 21, 5);
1888 unsigned r1 = extract32(insn, 16, 5);
1889 unsigned cf = extract32(insn, 12, 4);
1890 unsigned ext = extract32(insn, 6, 6);
1891 unsigned rt = extract32(insn, 0, 5);
1892 TCGv tcg_r1, tcg_r2;
1893 bool is_b = false;
1894 bool is_tc = false;
1895 bool is_tsv = false;
1896 ExitStatus ret;
1898 switch (ext) {
1899 case 0x10: /* SUB */
1900 break;
1901 case 0x30: /* SUB,TSV */
1902 is_tsv = true;
1903 break;
1904 case 0x14: /* SUB,B */
1905 is_b = true;
1906 break;
1907 case 0x34: /* SUB,B,TSV */
1908 is_b = is_tsv = true;
1909 break;
1910 case 0x13: /* SUB,TC */
1911 is_tc = true;
1912 break;
1913 case 0x33: /* SUB,TSV,TC */
1914 is_tc = is_tsv = true;
1915 break;
1916 default:
1917 return gen_illegal(ctx);
1920 if (cf) {
1921 nullify_over(ctx);
1923 tcg_r1 = load_gpr(ctx, r1);
1924 tcg_r2 = load_gpr(ctx, r2);
1925 ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
1926 return nullify_end(ctx, ret);
1929 static ExitStatus trans_log(DisasContext *ctx, uint32_t insn,
1930 const DisasInsn *di)
1932 unsigned r2 = extract32(insn, 21, 5);
1933 unsigned r1 = extract32(insn, 16, 5);
1934 unsigned cf = extract32(insn, 12, 4);
1935 unsigned rt = extract32(insn, 0, 5);
1936 TCGv tcg_r1, tcg_r2;
1937 ExitStatus ret;
1939 if (cf) {
1940 nullify_over(ctx);
1942 tcg_r1 = load_gpr(ctx, r1);
1943 tcg_r2 = load_gpr(ctx, r2);
1944 ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f_ttt);
1945 return nullify_end(ctx, ret);
1948 /* OR r,0,t -> COPY (according to gas) */
1949 static ExitStatus trans_copy(DisasContext *ctx, uint32_t insn,
1950 const DisasInsn *di)
1952 unsigned r1 = extract32(insn, 16, 5);
1953 unsigned rt = extract32(insn, 0, 5);
1955 if (r1 == 0) {
1956 TCGv dest = dest_gpr(ctx, rt);
1957 tcg_gen_movi_tl(dest, 0);
1958 save_gpr(ctx, rt, dest);
1959 } else {
1960 save_gpr(ctx, rt, cpu_gr[r1]);
1962 cond_free(&ctx->null_cond);
1963 return NO_EXIT;
1966 static ExitStatus trans_cmpclr(DisasContext *ctx, uint32_t insn,
1967 const DisasInsn *di)
1969 unsigned r2 = extract32(insn, 21, 5);
1970 unsigned r1 = extract32(insn, 16, 5);
1971 unsigned cf = extract32(insn, 12, 4);
1972 unsigned rt = extract32(insn, 0, 5);
1973 TCGv tcg_r1, tcg_r2;
1974 ExitStatus ret;
1976 if (cf) {
1977 nullify_over(ctx);
1979 tcg_r1 = load_gpr(ctx, r1);
1980 tcg_r2 = load_gpr(ctx, r2);
1981 ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
1982 return nullify_end(ctx, ret);
1985 static ExitStatus trans_uxor(DisasContext *ctx, uint32_t insn,
1986 const DisasInsn *di)
1988 unsigned r2 = extract32(insn, 21, 5);
1989 unsigned r1 = extract32(insn, 16, 5);
1990 unsigned cf = extract32(insn, 12, 4);
1991 unsigned rt = extract32(insn, 0, 5);
1992 TCGv tcg_r1, tcg_r2;
1993 ExitStatus ret;
1995 if (cf) {
1996 nullify_over(ctx);
1998 tcg_r1 = load_gpr(ctx, r1);
1999 tcg_r2 = load_gpr(ctx, r2);
2000 ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
2001 return nullify_end(ctx, ret);
2004 static ExitStatus trans_uaddcm(DisasContext *ctx, uint32_t insn,
2005 const DisasInsn *di)
2007 unsigned r2 = extract32(insn, 21, 5);
2008 unsigned r1 = extract32(insn, 16, 5);
2009 unsigned cf = extract32(insn, 12, 4);
2010 unsigned is_tc = extract32(insn, 6, 1);
2011 unsigned rt = extract32(insn, 0, 5);
2012 TCGv tcg_r1, tcg_r2, tmp;
2013 ExitStatus ret;
2015 if (cf) {
2016 nullify_over(ctx);
2018 tcg_r1 = load_gpr(ctx, r1);
2019 tcg_r2 = load_gpr(ctx, r2);
2020 tmp = get_temp(ctx);
2021 tcg_gen_not_tl(tmp, tcg_r2);
2022 ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
2023 return nullify_end(ctx, ret);
2026 static ExitStatus trans_dcor(DisasContext *ctx, uint32_t insn,
2027 const DisasInsn *di)
2029 unsigned r2 = extract32(insn, 21, 5);
2030 unsigned cf = extract32(insn, 12, 4);
2031 unsigned is_i = extract32(insn, 6, 1);
2032 unsigned rt = extract32(insn, 0, 5);
2033 TCGv tmp;
2034 ExitStatus ret;
2036 nullify_over(ctx);
2038 tmp = get_temp(ctx);
2039 tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
2040 if (!is_i) {
2041 tcg_gen_not_tl(tmp, tmp);
2043 tcg_gen_andi_tl(tmp, tmp, 0x11111111);
2044 tcg_gen_muli_tl(tmp, tmp, 6);
2045 ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2046 is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
2048 return nullify_end(ctx, ret);
2051 static ExitStatus trans_ds(DisasContext *ctx, uint32_t insn,
2052 const DisasInsn *di)
2054 unsigned r2 = extract32(insn, 21, 5);
2055 unsigned r1 = extract32(insn, 16, 5);
2056 unsigned cf = extract32(insn, 12, 4);
2057 unsigned rt = extract32(insn, 0, 5);
2058 TCGv dest, add1, add2, addc, zero, in1, in2;
2060 nullify_over(ctx);
2062 in1 = load_gpr(ctx, r1);
2063 in2 = load_gpr(ctx, r2);
2065 add1 = tcg_temp_new();
2066 add2 = tcg_temp_new();
2067 addc = tcg_temp_new();
2068 dest = tcg_temp_new();
2069 zero = tcg_const_tl(0);
2071 /* Form R1 << 1 | PSW[CB]{8}. */
2072 tcg_gen_add_tl(add1, in1, in1);
2073 tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
2075 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2076 carry{8} requires that we subtract via + ~R2 + 1, as described in
2077 the manual. By extracting and masking V, we can produce the
2078 proper inputs to the addition without movcond. */
2079 tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
2080 tcg_gen_xor_tl(add2, in2, addc);
2081 tcg_gen_andi_tl(addc, addc, 1);
2082 /* ??? This is only correct for 32-bit. */
2083 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2084 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2086 tcg_temp_free(addc);
2087 tcg_temp_free(zero);
2089 /* Write back the result register. */
2090 save_gpr(ctx, rt, dest);
2092 /* Write back PSW[CB]. */
2093 tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
2094 tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
2096 /* Write back PSW[V] for the division step. */
2097 tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
2098 tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
2100 /* Install the new nullification. */
2101 if (cf) {
2102 TCGv sv;
2103 TCGV_UNUSED(sv);
2104 if (cf >> 1 == 6) {
2105 /* ??? The lshift is supposed to contribute to overflow. */
2106 sv = do_add_sv(ctx, dest, add1, add2);
2108 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2111 tcg_temp_free(add1);
2112 tcg_temp_free(add2);
2113 tcg_temp_free(dest);
2115 return nullify_end(ctx, NO_EXIT);
2118 static const DisasInsn table_arith_log[] = {
2119 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
2120 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2121 { 0x08000000u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_andc_tl },
2122 { 0x08000200u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_and_tl },
2123 { 0x08000240u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_or_tl },
2124 { 0x08000280u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_xor_tl },
2125 { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2126 { 0x08000380u, 0xfc000fe0u, trans_uxor },
2127 { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2128 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2129 { 0x08000440u, 0xfc000fe0u, trans_ds },
2130 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2131 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2132 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2133 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2136 static ExitStatus trans_addi(DisasContext *ctx, uint32_t insn)
2138 target_long im = low_sextract(insn, 0, 11);
2139 unsigned e1 = extract32(insn, 11, 1);
2140 unsigned cf = extract32(insn, 12, 4);
2141 unsigned rt = extract32(insn, 16, 5);
2142 unsigned r2 = extract32(insn, 21, 5);
2143 unsigned o1 = extract32(insn, 26, 1);
2144 TCGv tcg_im, tcg_r2;
2145 ExitStatus ret;
2147 if (cf) {
2148 nullify_over(ctx);
2151 tcg_im = load_const(ctx, im);
2152 tcg_r2 = load_gpr(ctx, r2);
2153 ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2155 return nullify_end(ctx, ret);
2158 static ExitStatus trans_subi(DisasContext *ctx, uint32_t insn)
2160 target_long im = low_sextract(insn, 0, 11);
2161 unsigned e1 = extract32(insn, 11, 1);
2162 unsigned cf = extract32(insn, 12, 4);
2163 unsigned rt = extract32(insn, 16, 5);
2164 unsigned r2 = extract32(insn, 21, 5);
2165 TCGv tcg_im, tcg_r2;
2166 ExitStatus ret;
2168 if (cf) {
2169 nullify_over(ctx);
2172 tcg_im = load_const(ctx, im);
2173 tcg_r2 = load_gpr(ctx, r2);
2174 ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2176 return nullify_end(ctx, ret);
2179 static ExitStatus trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2181 target_long im = low_sextract(insn, 0, 11);
2182 unsigned cf = extract32(insn, 12, 4);
2183 unsigned rt = extract32(insn, 16, 5);
2184 unsigned r2 = extract32(insn, 21, 5);
2185 TCGv tcg_im, tcg_r2;
2186 ExitStatus ret;
2188 if (cf) {
2189 nullify_over(ctx);
2192 tcg_im = load_const(ctx, im);
2193 tcg_r2 = load_gpr(ctx, r2);
2194 ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2196 return nullify_end(ctx, ret);
2199 static ExitStatus trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2200 const DisasInsn *di)
2202 unsigned rt = extract32(insn, 0, 5);
2203 unsigned m = extract32(insn, 5, 1);
2204 unsigned sz = extract32(insn, 6, 2);
2205 unsigned a = extract32(insn, 13, 1);
2206 int disp = low_sextract(insn, 16, 5);
2207 unsigned rb = extract32(insn, 21, 5);
2208 int modify = (m ? (a ? -1 : 1) : 0);
2209 TCGMemOp mop = MO_TE | sz;
2211 return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
2214 static ExitStatus trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2215 const DisasInsn *di)
2217 unsigned rt = extract32(insn, 0, 5);
2218 unsigned m = extract32(insn, 5, 1);
2219 unsigned sz = extract32(insn, 6, 2);
2220 unsigned u = extract32(insn, 13, 1);
2221 unsigned rx = extract32(insn, 16, 5);
2222 unsigned rb = extract32(insn, 21, 5);
2223 TCGMemOp mop = MO_TE | sz;
2225 return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
2228 static ExitStatus trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2229 const DisasInsn *di)
2231 int disp = low_sextract(insn, 0, 5);
2232 unsigned m = extract32(insn, 5, 1);
2233 unsigned sz = extract32(insn, 6, 2);
2234 unsigned a = extract32(insn, 13, 1);
2235 unsigned rr = extract32(insn, 16, 5);
2236 unsigned rb = extract32(insn, 21, 5);
2237 int modify = (m ? (a ? -1 : 1) : 0);
2238 TCGMemOp mop = MO_TE | sz;
2240 return do_store(ctx, rr, rb, disp, modify, mop);
2243 static ExitStatus trans_ldcw(DisasContext *ctx, uint32_t insn,
2244 const DisasInsn *di)
2246 unsigned rt = extract32(insn, 0, 5);
2247 unsigned m = extract32(insn, 5, 1);
2248 unsigned i = extract32(insn, 12, 1);
2249 unsigned au = extract32(insn, 13, 1);
2250 unsigned rx = extract32(insn, 16, 5);
2251 unsigned rb = extract32(insn, 21, 5);
2252 TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2253 TCGv zero, addr, base, dest;
2254 int modify, disp = 0, scale = 0;
2256 nullify_over(ctx);
2258 /* ??? Share more code with do_load and do_load_{32,64}. */
2260 if (i) {
2261 modify = (m ? (au ? -1 : 1) : 0);
2262 disp = low_sextract(rx, 0, 5);
2263 rx = 0;
2264 } else {
2265 modify = m;
2266 if (au) {
2267 scale = mop & MO_SIZE;
2270 if (modify) {
2271 /* Base register modification. Make sure if RT == RB, we see
2272 the result of the load. */
2273 dest = get_temp(ctx);
2274 } else {
2275 dest = dest_gpr(ctx, rt);
2278 addr = tcg_temp_new();
2279 base = load_gpr(ctx, rb);
2280 if (rx) {
2281 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
2282 tcg_gen_add_tl(addr, addr, base);
2283 } else {
2284 tcg_gen_addi_tl(addr, base, disp);
2287 zero = tcg_const_tl(0);
2288 tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
2289 zero, MMU_USER_IDX, mop);
2290 if (modify) {
2291 save_gpr(ctx, rb, addr);
2293 save_gpr(ctx, rt, dest);
2295 return nullify_end(ctx, NO_EXIT);
2298 static ExitStatus trans_stby(DisasContext *ctx, uint32_t insn,
2299 const DisasInsn *di)
2301 target_long disp = low_sextract(insn, 0, 5);
2302 unsigned m = extract32(insn, 5, 1);
2303 unsigned a = extract32(insn, 13, 1);
2304 unsigned rt = extract32(insn, 16, 5);
2305 unsigned rb = extract32(insn, 21, 5);
2306 TCGv addr, val;
2308 nullify_over(ctx);
2310 addr = tcg_temp_new();
2311 if (m || disp == 0) {
2312 tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
2313 } else {
2314 tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
2316 val = load_gpr(ctx, rt);
2318 if (a) {
2319 gen_helper_stby_e(cpu_env, addr, val);
2320 } else {
2321 gen_helper_stby_b(cpu_env, addr, val);
2324 if (m) {
2325 tcg_gen_addi_tl(addr, addr, disp);
2326 tcg_gen_andi_tl(addr, addr, ~3);
2327 save_gpr(ctx, rb, addr);
2329 tcg_temp_free(addr);
2331 return nullify_end(ctx, NO_EXIT);
2334 static const DisasInsn table_index_mem[] = {
2335 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
2336 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
2337 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
2338 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
2339 { 0x0c001300u, 0xfc0013c0, trans_stby },
2342 static ExitStatus trans_ldil(DisasContext *ctx, uint32_t insn)
2344 unsigned rt = extract32(insn, 21, 5);
2345 target_long i = assemble_21(insn);
2346 TCGv tcg_rt = dest_gpr(ctx, rt);
2348 tcg_gen_movi_tl(tcg_rt, i);
2349 save_gpr(ctx, rt, tcg_rt);
2350 cond_free(&ctx->null_cond);
2352 return NO_EXIT;
2355 static ExitStatus trans_addil(DisasContext *ctx, uint32_t insn)
2357 unsigned rt = extract32(insn, 21, 5);
2358 target_long i = assemble_21(insn);
2359 TCGv tcg_rt = load_gpr(ctx, rt);
2360 TCGv tcg_r1 = dest_gpr(ctx, 1);
2362 tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
2363 save_gpr(ctx, 1, tcg_r1);
2364 cond_free(&ctx->null_cond);
2366 return NO_EXIT;
2369 static ExitStatus trans_ldo(DisasContext *ctx, uint32_t insn)
2371 unsigned rb = extract32(insn, 21, 5);
2372 unsigned rt = extract32(insn, 16, 5);
2373 target_long i = assemble_16(insn);
2374 TCGv tcg_rt = dest_gpr(ctx, rt);
2376 /* Special case rb == 0, for the LDI pseudo-op.
2377 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2378 if (rb == 0) {
2379 tcg_gen_movi_tl(tcg_rt, i);
2380 } else {
2381 tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
2383 save_gpr(ctx, rt, tcg_rt);
2384 cond_free(&ctx->null_cond);
2386 return NO_EXIT;
2389 static ExitStatus trans_load(DisasContext *ctx, uint32_t insn,
2390 bool is_mod, TCGMemOp mop)
2392 unsigned rb = extract32(insn, 21, 5);
2393 unsigned rt = extract32(insn, 16, 5);
2394 target_long i = assemble_16(insn);
2396 return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2399 static ExitStatus trans_load_w(DisasContext *ctx, uint32_t insn)
2401 unsigned rb = extract32(insn, 21, 5);
2402 unsigned rt = extract32(insn, 16, 5);
2403 target_long i = assemble_16a(insn);
2404 unsigned ext2 = extract32(insn, 1, 2);
2406 switch (ext2) {
2407 case 0:
2408 case 1:
2409 /* FLDW without modification. */
2410 return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2411 case 2:
2412 /* LDW with modification. Note that the sign of I selects
2413 post-dec vs pre-inc. */
2414 return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
2415 default:
2416 return gen_illegal(ctx);
2420 static ExitStatus trans_fload_mod(DisasContext *ctx, uint32_t insn)
2422 target_long i = assemble_16a(insn);
2423 unsigned t1 = extract32(insn, 1, 1);
2424 unsigned a = extract32(insn, 2, 1);
2425 unsigned t0 = extract32(insn, 16, 5);
2426 unsigned rb = extract32(insn, 21, 5);
2428 /* FLDW with modification. */
2429 return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2432 static ExitStatus trans_store(DisasContext *ctx, uint32_t insn,
2433 bool is_mod, TCGMemOp mop)
2435 unsigned rb = extract32(insn, 21, 5);
2436 unsigned rt = extract32(insn, 16, 5);
2437 target_long i = assemble_16(insn);
2439 return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2442 static ExitStatus trans_store_w(DisasContext *ctx, uint32_t insn)
2444 unsigned rb = extract32(insn, 21, 5);
2445 unsigned rt = extract32(insn, 16, 5);
2446 target_long i = assemble_16a(insn);
2447 unsigned ext2 = extract32(insn, 1, 2);
2449 switch (ext2) {
2450 case 0:
2451 case 1:
2452 /* FSTW without modification. */
2453 return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2454 case 2:
2455 /* LDW with modification. */
2456 return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
2457 default:
2458 return gen_illegal(ctx);
2462 static ExitStatus trans_fstore_mod(DisasContext *ctx, uint32_t insn)
2464 target_long i = assemble_16a(insn);
2465 unsigned t1 = extract32(insn, 1, 1);
2466 unsigned a = extract32(insn, 2, 1);
2467 unsigned t0 = extract32(insn, 16, 5);
2468 unsigned rb = extract32(insn, 21, 5);
2470 /* FSTW with modification. */
2471 return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2474 static ExitStatus trans_copr_w(DisasContext *ctx, uint32_t insn)
2476 unsigned t0 = extract32(insn, 0, 5);
2477 unsigned m = extract32(insn, 5, 1);
2478 unsigned t1 = extract32(insn, 6, 1);
2479 unsigned ext3 = extract32(insn, 7, 3);
2480 /* unsigned cc = extract32(insn, 10, 2); */
2481 unsigned i = extract32(insn, 12, 1);
2482 unsigned ua = extract32(insn, 13, 1);
2483 unsigned rx = extract32(insn, 16, 5);
2484 unsigned rb = extract32(insn, 21, 5);
2485 unsigned rt = t1 * 32 + t0;
2486 int modify = (m ? (ua ? -1 : 1) : 0);
2487 int disp, scale;
2489 if (i == 0) {
2490 scale = (ua ? 2 : 0);
2491 disp = 0;
2492 modify = m;
2493 } else {
2494 disp = low_sextract(rx, 0, 5);
2495 scale = 0;
2496 rx = 0;
2497 modify = (m ? (ua ? -1 : 1) : 0);
2500 switch (ext3) {
2501 case 0: /* FLDW */
2502 return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
2503 case 4: /* FSTW */
2504 return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
2506 return gen_illegal(ctx);
2509 static ExitStatus trans_copr_dw(DisasContext *ctx, uint32_t insn)
2511 unsigned rt = extract32(insn, 0, 5);
2512 unsigned m = extract32(insn, 5, 1);
2513 unsigned ext4 = extract32(insn, 6, 4);
2514 /* unsigned cc = extract32(insn, 10, 2); */
2515 unsigned i = extract32(insn, 12, 1);
2516 unsigned ua = extract32(insn, 13, 1);
2517 unsigned rx = extract32(insn, 16, 5);
2518 unsigned rb = extract32(insn, 21, 5);
2519 int modify = (m ? (ua ? -1 : 1) : 0);
2520 int disp, scale;
2522 if (i == 0) {
2523 scale = (ua ? 3 : 0);
2524 disp = 0;
2525 modify = m;
2526 } else {
2527 disp = low_sextract(rx, 0, 5);
2528 scale = 0;
2529 rx = 0;
2530 modify = (m ? (ua ? -1 : 1) : 0);
2533 switch (ext4) {
2534 case 0: /* FLDD */
2535 return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
2536 case 8: /* FSTD */
2537 return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
2538 default:
2539 return gen_illegal(ctx);
2543 static ExitStatus trans_cmpb(DisasContext *ctx, uint32_t insn,
2544 bool is_true, bool is_imm, bool is_dw)
2546 target_long disp = assemble_12(insn) * 4;
2547 unsigned n = extract32(insn, 1, 1);
2548 unsigned c = extract32(insn, 13, 3);
2549 unsigned r = extract32(insn, 21, 5);
2550 unsigned cf = c * 2 + !is_true;
2551 TCGv dest, in1, in2, sv;
2552 DisasCond cond;
2554 nullify_over(ctx);
2556 if (is_imm) {
2557 in1 = load_const(ctx, low_sextract(insn, 16, 5));
2558 } else {
2559 in1 = load_gpr(ctx, extract32(insn, 16, 5));
2561 in2 = load_gpr(ctx, r);
2562 dest = get_temp(ctx);
2564 tcg_gen_sub_tl(dest, in1, in2);
2566 TCGV_UNUSED(sv);
2567 if (c == 6) {
2568 sv = do_sub_sv(ctx, dest, in1, in2);
2571 cond = do_sub_cond(cf, dest, in1, in2, sv);
2572 return do_cbranch(ctx, disp, n, &cond);
2575 static ExitStatus trans_addb(DisasContext *ctx, uint32_t insn,
2576 bool is_true, bool is_imm)
2578 target_long disp = assemble_12(insn) * 4;
2579 unsigned n = extract32(insn, 1, 1);
2580 unsigned c = extract32(insn, 13, 3);
2581 unsigned r = extract32(insn, 21, 5);
2582 unsigned cf = c * 2 + !is_true;
2583 TCGv dest, in1, in2, sv, cb_msb;
2584 DisasCond cond;
2586 nullify_over(ctx);
2588 if (is_imm) {
2589 in1 = load_const(ctx, low_sextract(insn, 16, 5));
2590 } else {
2591 in1 = load_gpr(ctx, extract32(insn, 16, 5));
2593 in2 = load_gpr(ctx, r);
2594 dest = dest_gpr(ctx, r);
2595 TCGV_UNUSED(sv);
2596 TCGV_UNUSED(cb_msb);
2598 switch (c) {
2599 default:
2600 tcg_gen_add_tl(dest, in1, in2);
2601 break;
2602 case 4: case 5:
2603 cb_msb = get_temp(ctx);
2604 tcg_gen_movi_tl(cb_msb, 0);
2605 tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
2606 break;
2607 case 6:
2608 tcg_gen_add_tl(dest, in1, in2);
2609 sv = do_add_sv(ctx, dest, in1, in2);
2610 break;
2613 cond = do_cond(cf, dest, cb_msb, sv);
2614 return do_cbranch(ctx, disp, n, &cond);
2617 static ExitStatus trans_bb(DisasContext *ctx, uint32_t insn)
2619 target_long disp = assemble_12(insn) * 4;
2620 unsigned n = extract32(insn, 1, 1);
2621 unsigned c = extract32(insn, 15, 1);
2622 unsigned r = extract32(insn, 16, 5);
2623 unsigned p = extract32(insn, 21, 5);
2624 unsigned i = extract32(insn, 26, 1);
2625 TCGv tmp, tcg_r;
2626 DisasCond cond;
2628 nullify_over(ctx);
2630 tmp = tcg_temp_new();
2631 tcg_r = load_gpr(ctx, r);
2632 if (i) {
2633 tcg_gen_shli_tl(tmp, tcg_r, p);
2634 } else {
2635 tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
2638 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
2639 tcg_temp_free(tmp);
2640 return do_cbranch(ctx, disp, n, &cond);
2643 static ExitStatus trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
2645 target_long disp = assemble_12(insn) * 4;
2646 unsigned n = extract32(insn, 1, 1);
2647 unsigned c = extract32(insn, 13, 3);
2648 unsigned t = extract32(insn, 16, 5);
2649 unsigned r = extract32(insn, 21, 5);
2650 TCGv dest;
2651 DisasCond cond;
2653 nullify_over(ctx);
2655 dest = dest_gpr(ctx, r);
2656 if (is_imm) {
2657 tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
2658 } else if (t == 0) {
2659 tcg_gen_movi_tl(dest, 0);
2660 } else {
2661 tcg_gen_mov_tl(dest, cpu_gr[t]);
2664 cond = do_sed_cond(c, dest);
2665 return do_cbranch(ctx, disp, n, &cond);
2668 static ExitStatus trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
2669 const DisasInsn *di)
2671 unsigned rt = extract32(insn, 0, 5);
2672 unsigned c = extract32(insn, 13, 3);
2673 unsigned r1 = extract32(insn, 16, 5);
2674 unsigned r2 = extract32(insn, 21, 5);
2675 TCGv dest;
2677 if (c) {
2678 nullify_over(ctx);
2681 dest = dest_gpr(ctx, rt);
2682 if (r1 == 0) {
2683 tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
2684 tcg_gen_shr_tl(dest, dest, cpu_sar);
2685 } else if (r1 == r2) {
2686 TCGv_i32 t32 = tcg_temp_new_i32();
2687 tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
2688 tcg_gen_rotr_i32(t32, t32, cpu_sar);
2689 tcg_gen_extu_i32_tl(dest, t32);
2690 tcg_temp_free_i32(t32);
2691 } else {
2692 TCGv_i64 t = tcg_temp_new_i64();
2693 TCGv_i64 s = tcg_temp_new_i64();
2695 tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
2696 tcg_gen_extu_tl_i64(s, cpu_sar);
2697 tcg_gen_shr_i64(t, t, s);
2698 tcg_gen_trunc_i64_tl(dest, t);
2700 tcg_temp_free_i64(t);
2701 tcg_temp_free_i64(s);
2703 save_gpr(ctx, rt, dest);
2705 /* Install the new nullification. */
2706 cond_free(&ctx->null_cond);
2707 if (c) {
2708 ctx->null_cond = do_sed_cond(c, dest);
2710 return nullify_end(ctx, NO_EXIT);
2713 static ExitStatus trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
2714 const DisasInsn *di)
2716 unsigned rt = extract32(insn, 0, 5);
2717 unsigned cpos = extract32(insn, 5, 5);
2718 unsigned c = extract32(insn, 13, 3);
2719 unsigned r1 = extract32(insn, 16, 5);
2720 unsigned r2 = extract32(insn, 21, 5);
2721 unsigned sa = 31 - cpos;
2722 TCGv dest, t2;
2724 if (c) {
2725 nullify_over(ctx);
2728 dest = dest_gpr(ctx, rt);
2729 t2 = load_gpr(ctx, r2);
2730 if (r1 == r2) {
2731 TCGv_i32 t32 = tcg_temp_new_i32();
2732 tcg_gen_trunc_tl_i32(t32, t2);
2733 tcg_gen_rotri_i32(t32, t32, sa);
2734 tcg_gen_extu_i32_tl(dest, t32);
2735 tcg_temp_free_i32(t32);
2736 } else if (r1 == 0) {
2737 tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
2738 } else {
2739 TCGv t0 = tcg_temp_new();
2740 tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
2741 tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
2742 tcg_temp_free(t0);
2744 save_gpr(ctx, rt, dest);
2746 /* Install the new nullification. */
2747 cond_free(&ctx->null_cond);
2748 if (c) {
2749 ctx->null_cond = do_sed_cond(c, dest);
2751 return nullify_end(ctx, NO_EXIT);
2754 static ExitStatus trans_extrw_sar(DisasContext *ctx, uint32_t insn,
2755 const DisasInsn *di)
2757 unsigned clen = extract32(insn, 0, 5);
2758 unsigned is_se = extract32(insn, 10, 1);
2759 unsigned c = extract32(insn, 13, 3);
2760 unsigned rt = extract32(insn, 16, 5);
2761 unsigned rr = extract32(insn, 21, 5);
2762 unsigned len = 32 - clen;
2763 TCGv dest, src, tmp;
2765 if (c) {
2766 nullify_over(ctx);
2769 dest = dest_gpr(ctx, rt);
2770 src = load_gpr(ctx, rr);
2771 tmp = tcg_temp_new();
2773 /* Recall that SAR is using big-endian bit numbering. */
2774 tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
2775 if (is_se) {
2776 tcg_gen_sar_tl(dest, src, tmp);
2777 tcg_gen_sextract_tl(dest, dest, 0, len);
2778 } else {
2779 tcg_gen_shr_tl(dest, src, tmp);
2780 tcg_gen_extract_tl(dest, dest, 0, len);
2782 tcg_temp_free(tmp);
2783 save_gpr(ctx, rt, dest);
2785 /* Install the new nullification. */
2786 cond_free(&ctx->null_cond);
2787 if (c) {
2788 ctx->null_cond = do_sed_cond(c, dest);
2790 return nullify_end(ctx, NO_EXIT);
2793 static ExitStatus trans_extrw_imm(DisasContext *ctx, uint32_t insn,
2794 const DisasInsn *di)
2796 unsigned clen = extract32(insn, 0, 5);
2797 unsigned pos = extract32(insn, 5, 5);
2798 unsigned is_se = extract32(insn, 10, 1);
2799 unsigned c = extract32(insn, 13, 3);
2800 unsigned rt = extract32(insn, 16, 5);
2801 unsigned rr = extract32(insn, 21, 5);
2802 unsigned len = 32 - clen;
2803 unsigned cpos = 31 - pos;
2804 TCGv dest, src;
2806 if (c) {
2807 nullify_over(ctx);
2810 dest = dest_gpr(ctx, rt);
2811 src = load_gpr(ctx, rr);
2812 if (is_se) {
2813 tcg_gen_sextract_tl(dest, src, cpos, len);
2814 } else {
2815 tcg_gen_extract_tl(dest, src, cpos, len);
2817 save_gpr(ctx, rt, dest);
2819 /* Install the new nullification. */
2820 cond_free(&ctx->null_cond);
2821 if (c) {
2822 ctx->null_cond = do_sed_cond(c, dest);
2824 return nullify_end(ctx, NO_EXIT);
2827 static const DisasInsn table_sh_ex[] = {
2828 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
2829 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
2830 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
2831 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
2834 static ExitStatus trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
2835 const DisasInsn *di)
2837 unsigned clen = extract32(insn, 0, 5);
2838 unsigned cpos = extract32(insn, 5, 5);
2839 unsigned nz = extract32(insn, 10, 1);
2840 unsigned c = extract32(insn, 13, 3);
2841 target_long val = low_sextract(insn, 16, 5);
2842 unsigned rt = extract32(insn, 21, 5);
2843 unsigned len = 32 - clen;
2844 target_long mask0, mask1;
2845 TCGv dest;
2847 if (c) {
2848 nullify_over(ctx);
2850 if (cpos + len > 32) {
2851 len = 32 - cpos;
2854 dest = dest_gpr(ctx, rt);
2855 mask0 = deposit64(0, cpos, len, val);
2856 mask1 = deposit64(-1, cpos, len, val);
2858 if (nz) {
2859 TCGv src = load_gpr(ctx, rt);
2860 if (mask1 != -1) {
2861 tcg_gen_andi_tl(dest, src, mask1);
2862 src = dest;
2864 tcg_gen_ori_tl(dest, src, mask0);
2865 } else {
2866 tcg_gen_movi_tl(dest, mask0);
2868 save_gpr(ctx, rt, dest);
2870 /* Install the new nullification. */
2871 cond_free(&ctx->null_cond);
2872 if (c) {
2873 ctx->null_cond = do_sed_cond(c, dest);
2875 return nullify_end(ctx, NO_EXIT);
2878 static ExitStatus trans_depw_imm(DisasContext *ctx, uint32_t insn,
2879 const DisasInsn *di)
2881 unsigned clen = extract32(insn, 0, 5);
2882 unsigned cpos = extract32(insn, 5, 5);
2883 unsigned nz = extract32(insn, 10, 1);
2884 unsigned c = extract32(insn, 13, 3);
2885 unsigned rr = extract32(insn, 16, 5);
2886 unsigned rt = extract32(insn, 21, 5);
2887 unsigned rs = nz ? rt : 0;
2888 unsigned len = 32 - clen;
2889 TCGv dest, val;
2891 if (c) {
2892 nullify_over(ctx);
2894 if (cpos + len > 32) {
2895 len = 32 - cpos;
2898 dest = dest_gpr(ctx, rt);
2899 val = load_gpr(ctx, rr);
2900 if (rs == 0) {
2901 tcg_gen_deposit_z_tl(dest, val, cpos, len);
2902 } else {
2903 tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
2905 save_gpr(ctx, rt, dest);
2907 /* Install the new nullification. */
2908 cond_free(&ctx->null_cond);
2909 if (c) {
2910 ctx->null_cond = do_sed_cond(c, dest);
2912 return nullify_end(ctx, NO_EXIT);
2915 static ExitStatus trans_depw_sar(DisasContext *ctx, uint32_t insn,
2916 const DisasInsn *di)
2918 unsigned clen = extract32(insn, 0, 5);
2919 unsigned nz = extract32(insn, 10, 1);
2920 unsigned i = extract32(insn, 12, 1);
2921 unsigned c = extract32(insn, 13, 3);
2922 unsigned rt = extract32(insn, 21, 5);
2923 unsigned rs = nz ? rt : 0;
2924 unsigned len = 32 - clen;
2925 TCGv val, mask, tmp, shift, dest;
2926 unsigned msb = 1U << (len - 1);
2928 if (c) {
2929 nullify_over(ctx);
2932 if (i) {
2933 val = load_const(ctx, low_sextract(insn, 16, 5));
2934 } else {
2935 val = load_gpr(ctx, extract32(insn, 16, 5));
2937 dest = dest_gpr(ctx, rt);
2938 shift = tcg_temp_new();
2939 tmp = tcg_temp_new();
2941 /* Convert big-endian bit numbering in SAR to left-shift. */
2942 tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
2944 mask = tcg_const_tl(msb + (msb - 1));
2945 tcg_gen_and_tl(tmp, val, mask);
2946 if (rs) {
2947 tcg_gen_shl_tl(mask, mask, shift);
2948 tcg_gen_shl_tl(tmp, tmp, shift);
2949 tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
2950 tcg_gen_or_tl(dest, dest, tmp);
2951 } else {
2952 tcg_gen_shl_tl(dest, tmp, shift);
2954 tcg_temp_free(shift);
2955 tcg_temp_free(mask);
2956 tcg_temp_free(tmp);
2957 save_gpr(ctx, rt, dest);
2959 /* Install the new nullification. */
2960 cond_free(&ctx->null_cond);
2961 if (c) {
2962 ctx->null_cond = do_sed_cond(c, dest);
2964 return nullify_end(ctx, NO_EXIT);
2967 static const DisasInsn table_depw[] = {
2968 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
2969 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
2970 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
2973 static ExitStatus trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
2975 unsigned n = extract32(insn, 1, 1);
2976 unsigned b = extract32(insn, 21, 5);
2977 target_long disp = assemble_17(insn);
2979 /* unsigned s = low_uextract(insn, 13, 3); */
2980 /* ??? It seems like there should be a good way of using
2981 "be disp(sr2, r0)", the canonical gateway entry mechanism
2982 to our advantage. But that appears to be inconvenient to
2983 manage along side branch delay slots. Therefore we handle
2984 entry into the gateway page via absolute address. */
2986 /* Since we don't implement spaces, just branch. Do notice the special
2987 case of "be disp(*,r0)" using a direct branch to disp, so that we can
2988 goto_tb to the TB containing the syscall. */
2989 if (b == 0) {
2990 return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
2991 } else {
2992 TCGv tmp = get_temp(ctx);
2993 tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
2994 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
2998 static ExitStatus trans_bl(DisasContext *ctx, uint32_t insn,
2999 const DisasInsn *di)
3001 unsigned n = extract32(insn, 1, 1);
3002 unsigned link = extract32(insn, 21, 5);
3003 target_long disp = assemble_17(insn);
3005 return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
3008 static ExitStatus trans_bl_long(DisasContext *ctx, uint32_t insn,
3009 const DisasInsn *di)
3011 unsigned n = extract32(insn, 1, 1);
3012 target_long disp = assemble_22(insn);
3014 return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
3017 static ExitStatus trans_blr(DisasContext *ctx, uint32_t insn,
3018 const DisasInsn *di)
3020 unsigned n = extract32(insn, 1, 1);
3021 unsigned rx = extract32(insn, 16, 5);
3022 unsigned link = extract32(insn, 21, 5);
3023 TCGv tmp = get_temp(ctx);
3025 tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
3026 tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
3027 return do_ibranch(ctx, tmp, link, n);
3030 static ExitStatus trans_bv(DisasContext *ctx, uint32_t insn,
3031 const DisasInsn *di)
3033 unsigned n = extract32(insn, 1, 1);
3034 unsigned rx = extract32(insn, 16, 5);
3035 unsigned rb = extract32(insn, 21, 5);
3036 TCGv dest;
3038 if (rx == 0) {
3039 dest = load_gpr(ctx, rb);
3040 } else {
3041 dest = get_temp(ctx);
3042 tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
3043 tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
3045 return do_ibranch(ctx, dest, 0, n);
3048 static ExitStatus trans_bve(DisasContext *ctx, uint32_t insn,
3049 const DisasInsn *di)
3051 unsigned n = extract32(insn, 1, 1);
3052 unsigned rb = extract32(insn, 21, 5);
3053 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3055 return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
3058 static const DisasInsn table_branch[] = {
3059 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3060 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3061 { 0xe8004000u, 0xfc00fffdu, trans_blr },
3062 { 0xe800c000u, 0xfc00fffdu, trans_bv },
3063 { 0xe800d000u, 0xfc00dffcu, trans_bve },
3066 static ExitStatus trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3067 const DisasInsn *di)
3069 unsigned rt = extract32(insn, 0, 5);
3070 unsigned ra = extract32(insn, 21, 5);
3071 return do_fop_wew(ctx, rt, ra, di->f_wew);
3074 static ExitStatus trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3075 const DisasInsn *di)
3077 unsigned rt = assemble_rt64(insn);
3078 unsigned ra = assemble_ra64(insn);
3079 return do_fop_wew(ctx, rt, ra, di->f_wew);
3082 static ExitStatus trans_fop_ded(DisasContext *ctx, uint32_t insn,
3083 const DisasInsn *di)
3085 unsigned rt = extract32(insn, 0, 5);
3086 unsigned ra = extract32(insn, 21, 5);
3087 return do_fop_ded(ctx, rt, ra, di->f_ded);
3090 static ExitStatus trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3091 const DisasInsn *di)
3093 unsigned rt = extract32(insn, 0, 5);
3094 unsigned ra = extract32(insn, 21, 5);
3095 return do_fop_wed(ctx, rt, ra, di->f_wed);
3098 static ExitStatus trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3099 const DisasInsn *di)
3101 unsigned rt = assemble_rt64(insn);
3102 unsigned ra = extract32(insn, 21, 5);
3103 return do_fop_wed(ctx, rt, ra, di->f_wed);
3106 static ExitStatus trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3107 const DisasInsn *di)
3109 unsigned rt = extract32(insn, 0, 5);
3110 unsigned ra = extract32(insn, 21, 5);
3111 return do_fop_dew(ctx, rt, ra, di->f_dew);
3114 static ExitStatus trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3115 const DisasInsn *di)
3117 unsigned rt = extract32(insn, 0, 5);
3118 unsigned ra = assemble_ra64(insn);
3119 return do_fop_dew(ctx, rt, ra, di->f_dew);
3122 static ExitStatus trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3123 const DisasInsn *di)
3125 unsigned rt = extract32(insn, 0, 5);
3126 unsigned rb = extract32(insn, 16, 5);
3127 unsigned ra = extract32(insn, 21, 5);
3128 return do_fop_weww(ctx, rt, ra, rb, di->f_weww);
3131 static ExitStatus trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3132 const DisasInsn *di)
3134 unsigned rt = assemble_rt64(insn);
3135 unsigned rb = assemble_rb64(insn);
3136 unsigned ra = assemble_ra64(insn);
3137 return do_fop_weww(ctx, rt, ra, rb, di->f_weww);
3140 static ExitStatus trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3141 const DisasInsn *di)
3143 unsigned rt = extract32(insn, 0, 5);
3144 unsigned rb = extract32(insn, 16, 5);
3145 unsigned ra = extract32(insn, 21, 5);
3146 return do_fop_dedd(ctx, rt, ra, rb, di->f_dedd);
3149 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3151 tcg_gen_mov_i32(dst, src);
3154 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3156 tcg_gen_mov_i64(dst, src);
3159 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3161 tcg_gen_andi_i32(dst, src, INT32_MAX);
3164 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3166 tcg_gen_andi_i64(dst, src, INT64_MAX);
3169 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3171 tcg_gen_xori_i32(dst, src, INT32_MIN);
3174 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3176 tcg_gen_xori_i64(dst, src, INT64_MIN);
3179 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3181 tcg_gen_ori_i32(dst, src, INT32_MIN);
3184 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3186 tcg_gen_ori_i64(dst, src, INT64_MIN);
3189 static ExitStatus do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3190 unsigned y, unsigned c)
3192 TCGv_i32 ta, tb, tc, ty;
3194 nullify_over(ctx);
3196 ta = load_frw0_i32(ra);
3197 tb = load_frw0_i32(rb);
3198 ty = tcg_const_i32(y);
3199 tc = tcg_const_i32(c);
3201 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3203 tcg_temp_free_i32(ta);
3204 tcg_temp_free_i32(tb);
3205 tcg_temp_free_i32(ty);
3206 tcg_temp_free_i32(tc);
3208 return nullify_end(ctx, NO_EXIT);
3211 static ExitStatus trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3212 const DisasInsn *di)
3214 unsigned c = extract32(insn, 0, 5);
3215 unsigned y = extract32(insn, 13, 3);
3216 unsigned rb = extract32(insn, 16, 5);
3217 unsigned ra = extract32(insn, 21, 5);
3218 return do_fcmp_s(ctx, ra, rb, y, c);
3221 static ExitStatus trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3222 const DisasInsn *di)
3224 unsigned c = extract32(insn, 0, 5);
3225 unsigned y = extract32(insn, 13, 3);
3226 unsigned rb = assemble_rb64(insn);
3227 unsigned ra = assemble_ra64(insn);
3228 return do_fcmp_s(ctx, ra, rb, y, c);
3231 static ExitStatus trans_fcmp_d(DisasContext *ctx, uint32_t insn,
3232 const DisasInsn *di)
3234 unsigned c = extract32(insn, 0, 5);
3235 unsigned y = extract32(insn, 13, 3);
3236 unsigned rb = extract32(insn, 16, 5);
3237 unsigned ra = extract32(insn, 21, 5);
3238 TCGv_i64 ta, tb;
3239 TCGv_i32 tc, ty;
3241 nullify_over(ctx);
3243 ta = load_frd0(ra);
3244 tb = load_frd0(rb);
3245 ty = tcg_const_i32(y);
3246 tc = tcg_const_i32(c);
3248 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3250 tcg_temp_free_i64(ta);
3251 tcg_temp_free_i64(tb);
3252 tcg_temp_free_i32(ty);
3253 tcg_temp_free_i32(tc);
3255 return nullify_end(ctx, NO_EXIT);
3258 static ExitStatus trans_ftest_t(DisasContext *ctx, uint32_t insn,
3259 const DisasInsn *di)
3261 unsigned y = extract32(insn, 13, 3);
3262 unsigned cbit = (y ^ 1) - 1;
3263 TCGv t;
3265 nullify_over(ctx);
3267 t = tcg_temp_new();
3268 tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3269 tcg_gen_extract_tl(t, t, 21 - cbit, 1);
3270 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3271 tcg_temp_free(t);
3273 return nullify_end(ctx, NO_EXIT);
3276 static ExitStatus trans_ftest_q(DisasContext *ctx, uint32_t insn,
3277 const DisasInsn *di)
3279 unsigned c = extract32(insn, 0, 5);
3280 int mask;
3281 bool inv = false;
3282 TCGv t;
3284 nullify_over(ctx);
3286 t = tcg_temp_new();
3287 tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3289 switch (c) {
3290 case 0: /* simple */
3291 tcg_gen_andi_tl(t, t, 0x4000000);
3292 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3293 goto done;
3294 case 2: /* rej */
3295 inv = true;
3296 /* fallthru */
3297 case 1: /* acc */
3298 mask = 0x43ff800;
3299 break;
3300 case 6: /* rej8 */
3301 inv = true;
3302 /* fallthru */
3303 case 5: /* acc8 */
3304 mask = 0x43f8000;
3305 break;
3306 case 9: /* acc6 */
3307 mask = 0x43e0000;
3308 break;
3309 case 13: /* acc4 */
3310 mask = 0x4380000;
3311 break;
3312 case 17: /* acc2 */
3313 mask = 0x4200000;
3314 break;
3315 default:
3316 return gen_illegal(ctx);
3318 if (inv) {
3319 TCGv c = load_const(ctx, mask);
3320 tcg_gen_or_tl(t, t, c);
3321 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3322 } else {
3323 tcg_gen_andi_tl(t, t, mask);
3324 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3326 done:
3327 return nullify_end(ctx, NO_EXIT);
3330 static ExitStatus trans_xmpyu(DisasContext *ctx, uint32_t insn,
3331 const DisasInsn *di)
3333 unsigned rt = extract32(insn, 0, 5);
3334 unsigned rb = assemble_rb64(insn);
3335 unsigned ra = assemble_ra64(insn);
3336 TCGv_i64 a, b;
3338 nullify_over(ctx);
3340 a = load_frw0_i64(ra);
3341 b = load_frw0_i64(rb);
3342 tcg_gen_mul_i64(a, a, b);
3343 save_frd(rt, a);
3344 tcg_temp_free_i64(a);
3345 tcg_temp_free_i64(b);
3347 return nullify_end(ctx, NO_EXIT);
3350 #define FOP_DED trans_fop_ded, .f_ded
3351 #define FOP_DEDD trans_fop_dedd, .f_dedd
3353 #define FOP_WEW trans_fop_wew_0c, .f_wew
3354 #define FOP_DEW trans_fop_dew_0c, .f_dew
3355 #define FOP_WED trans_fop_wed_0c, .f_wed
3356 #define FOP_WEWW trans_fop_weww_0c, .f_weww
3358 static const DisasInsn table_float_0c[] = {
3359 /* floating point class zero */
3360 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
3361 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
3362 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
3363 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
3364 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
3365 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
3367 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3368 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3369 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3370 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3371 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3372 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3374 /* floating point class three */
3375 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
3376 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
3377 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
3378 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
3380 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3381 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3382 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3383 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3385 /* floating point class one */
3386 /* float/float */
3387 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
3388 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
3389 /* int/float */
3390 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
3391 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
3392 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
3393 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3394 /* float/int */
3395 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
3396 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
3397 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
3398 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3399 /* float/int truncate */
3400 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
3401 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
3402 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
3403 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3404 /* uint/float */
3405 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
3406 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
3407 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
3408 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3409 /* float/uint */
3410 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
3411 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
3412 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
3413 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3414 /* float/uint truncate */
3415 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
3416 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
3417 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
3418 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3420 /* floating point class two */
3421 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
3422 { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
3423 { 0x30002420, 0xffffffe0, trans_ftest_q },
3424 { 0x30000420, 0xffff1fff, trans_ftest_t },
3426 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
3427 This is machine/revision == 0, which is reserved for simulator. */
3428 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
3431 #undef FOP_WEW
3432 #undef FOP_DEW
3433 #undef FOP_WED
3434 #undef FOP_WEWW
3435 #define FOP_WEW trans_fop_wew_0e, .f_wew
3436 #define FOP_DEW trans_fop_dew_0e, .f_dew
3437 #define FOP_WED trans_fop_wed_0e, .f_wed
3438 #define FOP_WEWW trans_fop_weww_0e, .f_weww
3440 static const DisasInsn table_float_0e[] = {
3441 /* floating point class zero */
3442 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
3443 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
3444 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
3445 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
3446 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
3447 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
3449 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3450 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3451 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3452 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3453 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3454 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3456 /* floating point class three */
3457 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
3458 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
3459 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
3460 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
3462 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3463 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3464 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3465 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3467 { 0x38004700, 0xfc00ef60, trans_xmpyu },
3469 /* floating point class one */
3470 /* float/float */
3471 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
3472 { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
3473 /* int/float */
3474 { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
3475 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
3476 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
3477 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3478 /* float/int */
3479 { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
3480 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
3481 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
3482 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3483 /* float/int truncate */
3484 { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
3485 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
3486 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
3487 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3488 /* uint/float */
3489 { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
3490 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
3491 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
3492 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3493 /* float/uint */
3494 { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
3495 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
3496 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
3497 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3498 /* float/uint truncate */
3499 { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
3500 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
3501 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
3502 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3504 /* floating point class two */
3505 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
3506 { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
3509 #undef FOP_WEW
3510 #undef FOP_DEW
3511 #undef FOP_WED
3512 #undef FOP_WEWW
3513 #undef FOP_DED
3514 #undef FOP_DEDD
3516 /* Convert the fmpyadd single-precision register encodings to standard. */
3517 static inline int fmpyadd_s_reg(unsigned r)
3519 return (r & 16) * 2 + 16 + (r & 15);
3522 static ExitStatus trans_fmpyadd(DisasContext *ctx, uint32_t insn, bool is_sub)
3524 unsigned tm = extract32(insn, 0, 5);
3525 unsigned f = extract32(insn, 5, 1);
3526 unsigned ra = extract32(insn, 6, 5);
3527 unsigned ta = extract32(insn, 11, 5);
3528 unsigned rm2 = extract32(insn, 16, 5);
3529 unsigned rm1 = extract32(insn, 21, 5);
3531 nullify_over(ctx);
3533 /* Independent multiply & add/sub, with undefined behaviour
3534 if outputs overlap inputs. */
3535 if (f == 0) {
3536 tm = fmpyadd_s_reg(tm);
3537 ra = fmpyadd_s_reg(ra);
3538 ta = fmpyadd_s_reg(ta);
3539 rm2 = fmpyadd_s_reg(rm2);
3540 rm1 = fmpyadd_s_reg(rm1);
3541 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3542 do_fop_weww(ctx, ta, ta, ra,
3543 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3544 } else {
3545 do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
3546 do_fop_dedd(ctx, ta, ta, ra,
3547 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3550 return nullify_end(ctx, NO_EXIT);
3553 static ExitStatus trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
3554 const DisasInsn *di)
3556 unsigned rt = assemble_rt64(insn);
3557 unsigned neg = extract32(insn, 5, 1);
3558 unsigned rm1 = assemble_ra64(insn);
3559 unsigned rm2 = assemble_rb64(insn);
3560 unsigned ra3 = assemble_rc64(insn);
3561 TCGv_i32 a, b, c;
3563 nullify_over(ctx);
3564 a = load_frw0_i32(rm1);
3565 b = load_frw0_i32(rm2);
3566 c = load_frw0_i32(ra3);
3568 if (neg) {
3569 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
3570 } else {
3571 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
3574 tcg_temp_free_i32(b);
3575 tcg_temp_free_i32(c);
3576 save_frw_i32(rt, a);
3577 tcg_temp_free_i32(a);
3578 return nullify_end(ctx, NO_EXIT);
3581 static ExitStatus trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
3582 const DisasInsn *di)
3584 unsigned rt = extract32(insn, 0, 5);
3585 unsigned neg = extract32(insn, 5, 1);
3586 unsigned rm1 = extract32(insn, 21, 5);
3587 unsigned rm2 = extract32(insn, 16, 5);
3588 unsigned ra3 = assemble_rc64(insn);
3589 TCGv_i64 a, b, c;
3591 nullify_over(ctx);
3592 a = load_frd0(rm1);
3593 b = load_frd0(rm2);
3594 c = load_frd0(ra3);
3596 if (neg) {
3597 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
3598 } else {
3599 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
3602 tcg_temp_free_i64(b);
3603 tcg_temp_free_i64(c);
3604 save_frd(rt, a);
3605 tcg_temp_free_i64(a);
3606 return nullify_end(ctx, NO_EXIT);
3609 static const DisasInsn table_fp_fused[] = {
3610 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
3611 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
3614 static ExitStatus translate_table_int(DisasContext *ctx, uint32_t insn,
3615 const DisasInsn table[], size_t n)
3617 size_t i;
3618 for (i = 0; i < n; ++i) {
3619 if ((insn & table[i].mask) == table[i].insn) {
3620 return table[i].trans(ctx, insn, &table[i]);
3623 return gen_illegal(ctx);
3626 #define translate_table(ctx, insn, table) \
3627 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
3629 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
3631 uint32_t opc = extract32(insn, 26, 6);
3633 switch (opc) {
3634 case 0x00: /* system op */
3635 return translate_table(ctx, insn, table_system);
3636 case 0x01:
3637 return translate_table(ctx, insn, table_mem_mgmt);
3638 case 0x02:
3639 return translate_table(ctx, insn, table_arith_log);
3640 case 0x03:
3641 return translate_table(ctx, insn, table_index_mem);
3642 case 0x06:
3643 return trans_fmpyadd(ctx, insn, false);
3644 case 0x08:
3645 return trans_ldil(ctx, insn);
3646 case 0x09:
3647 return trans_copr_w(ctx, insn);
3648 case 0x0A:
3649 return trans_addil(ctx, insn);
3650 case 0x0B:
3651 return trans_copr_dw(ctx, insn);
3652 case 0x0C:
3653 return translate_table(ctx, insn, table_float_0c);
3654 case 0x0D:
3655 return trans_ldo(ctx, insn);
3656 case 0x0E:
3657 return translate_table(ctx, insn, table_float_0e);
3659 case 0x10:
3660 return trans_load(ctx, insn, false, MO_UB);
3661 case 0x11:
3662 return trans_load(ctx, insn, false, MO_TEUW);
3663 case 0x12:
3664 return trans_load(ctx, insn, false, MO_TEUL);
3665 case 0x13:
3666 return trans_load(ctx, insn, true, MO_TEUL);
3667 case 0x16:
3668 return trans_fload_mod(ctx, insn);
3669 case 0x17:
3670 return trans_load_w(ctx, insn);
3671 case 0x18:
3672 return trans_store(ctx, insn, false, MO_UB);
3673 case 0x19:
3674 return trans_store(ctx, insn, false, MO_TEUW);
3675 case 0x1A:
3676 return trans_store(ctx, insn, false, MO_TEUL);
3677 case 0x1B:
3678 return trans_store(ctx, insn, true, MO_TEUL);
3679 case 0x1E:
3680 return trans_fstore_mod(ctx, insn);
3681 case 0x1F:
3682 return trans_store_w(ctx, insn);
3684 case 0x20:
3685 return trans_cmpb(ctx, insn, true, false, false);
3686 case 0x21:
3687 return trans_cmpb(ctx, insn, true, true, false);
3688 case 0x22:
3689 return trans_cmpb(ctx, insn, false, false, false);
3690 case 0x23:
3691 return trans_cmpb(ctx, insn, false, true, false);
3692 case 0x24:
3693 return trans_cmpiclr(ctx, insn);
3694 case 0x25:
3695 return trans_subi(ctx, insn);
3696 case 0x26:
3697 return trans_fmpyadd(ctx, insn, true);
3698 case 0x27:
3699 return trans_cmpb(ctx, insn, true, false, true);
3700 case 0x28:
3701 return trans_addb(ctx, insn, true, false);
3702 case 0x29:
3703 return trans_addb(ctx, insn, true, true);
3704 case 0x2A:
3705 return trans_addb(ctx, insn, false, false);
3706 case 0x2B:
3707 return trans_addb(ctx, insn, false, true);
3708 case 0x2C:
3709 case 0x2D:
3710 return trans_addi(ctx, insn);
3711 case 0x2E:
3712 return translate_table(ctx, insn, table_fp_fused);
3713 case 0x2F:
3714 return trans_cmpb(ctx, insn, false, false, true);
3716 case 0x30:
3717 case 0x31:
3718 return trans_bb(ctx, insn);
3719 case 0x32:
3720 return trans_movb(ctx, insn, false);
3721 case 0x33:
3722 return trans_movb(ctx, insn, true);
3723 case 0x34:
3724 return translate_table(ctx, insn, table_sh_ex);
3725 case 0x35:
3726 return translate_table(ctx, insn, table_depw);
3727 case 0x38:
3728 return trans_be(ctx, insn, false);
3729 case 0x39:
3730 return trans_be(ctx, insn, true);
3731 case 0x3A:
3732 return translate_table(ctx, insn, table_branch);
3734 case 0x04: /* spopn */
3735 case 0x05: /* diag */
3736 case 0x0F: /* product specific */
3737 break;
3739 case 0x07: /* unassigned */
3740 case 0x15: /* unassigned */
3741 case 0x1D: /* unassigned */
3742 case 0x37: /* unassigned */
3743 case 0x3F: /* unassigned */
3744 default:
3745 break;
3747 return gen_illegal(ctx);
3750 void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb)
3752 HPPACPU *cpu = hppa_env_get_cpu(env);
3753 CPUState *cs = CPU(cpu);
3754 DisasContext ctx;
3755 ExitStatus ret;
3756 int num_insns, max_insns, i;
3758 ctx.tb = tb;
3759 ctx.cs = cs;
3760 ctx.iaoq_f = tb->pc;
3761 ctx.iaoq_b = tb->cs_base;
3762 ctx.singlestep_enabled = cs->singlestep_enabled;
3764 ctx.ntemps = 0;
3765 for (i = 0; i < ARRAY_SIZE(ctx.temps); ++i) {
3766 TCGV_UNUSED(ctx.temps[i]);
3769 /* Compute the maximum number of insns to execute, as bounded by
3770 (1) icount, (2) single-stepping, (3) branch delay slots, or
3771 (4) the number of insns remaining on the current page. */
3772 max_insns = tb->cflags & CF_COUNT_MASK;
3773 if (max_insns == 0) {
3774 max_insns = CF_COUNT_MASK;
3776 if (ctx.singlestep_enabled || singlestep) {
3777 max_insns = 1;
3778 } else if (max_insns > TCG_MAX_INSNS) {
3779 max_insns = TCG_MAX_INSNS;
3782 num_insns = 0;
3783 gen_tb_start(tb);
3785 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */
3786 ctx.null_cond = cond_make_f();
3787 ctx.psw_n_nonzero = false;
3788 if (tb->flags & 1) {
3789 ctx.null_cond.c = TCG_COND_ALWAYS;
3790 ctx.psw_n_nonzero = true;
3792 ctx.null_lab = NULL;
3794 do {
3795 tcg_gen_insn_start(ctx.iaoq_f, ctx.iaoq_b);
3796 num_insns++;
3798 if (unlikely(cpu_breakpoint_test(cs, ctx.iaoq_f, BP_ANY))) {
3799 ret = gen_excp(&ctx, EXCP_DEBUG);
3800 break;
3802 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
3803 gen_io_start();
3806 if (ctx.iaoq_f < TARGET_PAGE_SIZE) {
3807 ret = do_page_zero(&ctx);
3808 assert(ret != NO_EXIT);
3809 } else {
3810 /* Always fetch the insn, even if nullified, so that we check
3811 the page permissions for execute. */
3812 uint32_t insn = cpu_ldl_code(env, ctx.iaoq_f);
3814 /* Set up the IA queue for the next insn.
3815 This will be overwritten by a branch. */
3816 if (ctx.iaoq_b == -1) {
3817 ctx.iaoq_n = -1;
3818 ctx.iaoq_n_var = get_temp(&ctx);
3819 tcg_gen_addi_tl(ctx.iaoq_n_var, cpu_iaoq_b, 4);
3820 } else {
3821 ctx.iaoq_n = ctx.iaoq_b + 4;
3822 TCGV_UNUSED(ctx.iaoq_n_var);
3825 if (unlikely(ctx.null_cond.c == TCG_COND_ALWAYS)) {
3826 ctx.null_cond.c = TCG_COND_NEVER;
3827 ret = NO_EXIT;
3828 } else {
3829 ret = translate_one(&ctx, insn);
3830 assert(ctx.null_lab == NULL);
3834 for (i = 0; i < ctx.ntemps; ++i) {
3835 tcg_temp_free(ctx.temps[i]);
3836 TCGV_UNUSED(ctx.temps[i]);
3838 ctx.ntemps = 0;
3840 /* If we see non-linear instructions, exhaust instruction count,
3841 or run out of buffer space, stop generation. */
3842 /* ??? The non-linear instruction restriction is purely due to
3843 the debugging dump. Otherwise we *could* follow unconditional
3844 branches within the same page. */
3845 if (ret == NO_EXIT
3846 && (ctx.iaoq_b != ctx.iaoq_f + 4
3847 || num_insns >= max_insns
3848 || tcg_op_buf_full())) {
3849 if (ctx.null_cond.c == TCG_COND_NEVER
3850 || ctx.null_cond.c == TCG_COND_ALWAYS) {
3851 nullify_set(&ctx, ctx.null_cond.c == TCG_COND_ALWAYS);
3852 gen_goto_tb(&ctx, 0, ctx.iaoq_b, ctx.iaoq_n);
3853 ret = EXIT_GOTO_TB;
3854 } else {
3855 ret = EXIT_IAQ_N_STALE;
3859 ctx.iaoq_f = ctx.iaoq_b;
3860 ctx.iaoq_b = ctx.iaoq_n;
3861 if (ret == EXIT_NORETURN
3862 || ret == EXIT_GOTO_TB
3863 || ret == EXIT_IAQ_N_UPDATED) {
3864 break;
3866 if (ctx.iaoq_f == -1) {
3867 tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
3868 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_n, ctx.iaoq_n_var);
3869 nullify_save(&ctx);
3870 ret = EXIT_IAQ_N_UPDATED;
3871 break;
3873 if (ctx.iaoq_b == -1) {
3874 tcg_gen_mov_tl(cpu_iaoq_b, ctx.iaoq_n_var);
3876 } while (ret == NO_EXIT);
3878 if (tb->cflags & CF_LAST_IO) {
3879 gen_io_end();
3882 switch (ret) {
3883 case EXIT_GOTO_TB:
3884 case EXIT_NORETURN:
3885 break;
3886 case EXIT_IAQ_N_STALE:
3887 copy_iaoq_entry(cpu_iaoq_f, ctx.iaoq_f, cpu_iaoq_f);
3888 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_b, cpu_iaoq_b);
3889 nullify_save(&ctx);
3890 /* FALLTHRU */
3891 case EXIT_IAQ_N_UPDATED:
3892 if (ctx.singlestep_enabled) {
3893 gen_excp_1(EXCP_DEBUG);
3894 } else {
3895 tcg_gen_exit_tb(0);
3897 break;
3898 default:
3899 abort();
3902 gen_tb_end(tb, num_insns);
3904 tb->size = num_insns * 4;
3905 tb->icount = num_insns;
3907 #ifdef DEBUG_DISAS
3908 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3909 && qemu_log_in_addr_range(tb->pc)) {
3910 qemu_log_lock();
3911 switch (tb->pc) {
3912 case 0x00:
3913 qemu_log("IN:\n0x00000000: (null)\n\n");
3914 break;
3915 case 0xb0:
3916 qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n");
3917 break;
3918 case 0xe0:
3919 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n");
3920 break;
3921 case 0x100:
3922 qemu_log("IN:\n0x00000100: syscall\n\n");
3923 break;
3924 default:
3925 qemu_log("IN: %s\n", lookup_symbol(tb->pc));
3926 log_target_disas(cs, tb->pc, tb->size, 1);
3927 qemu_log("\n");
3928 break;
3930 qemu_log_unlock();
3932 #endif
3935 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
3936 target_ulong *data)
3938 env->iaoq_f = data[0];
3939 if (data[1] != -1) {
3940 env->iaoq_b = data[1];
3942 /* Since we were executing the instruction at IAOQ_F, and took some
3943 sort of action that provoked the cpu_restore_state, we can infer
3944 that the instruction was not nullified. */
3945 env->psw_n = 0;