Merge tag 'v9.0.0-rc3'
[qemu/ar7.git] / target / hppa / translate.c
blob42fa4809504e445fd9212887f93b027105a9a95d
1 /*
2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef HELPER_H
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
39 typedef struct DisasCond {
40 TCGCond c;
41 TCGv_i64 a0, a1;
42 } DisasCond;
44 typedef struct DisasContext {
45 DisasContextBase base;
46 CPUState *cs;
48 uint64_t iaoq_f;
49 uint64_t iaoq_b;
50 uint64_t iaoq_n;
51 TCGv_i64 iaoq_n_var;
53 DisasCond null_cond;
54 TCGLabel *null_lab;
56 TCGv_i64 zero;
58 uint32_t insn;
59 uint32_t tb_flags;
60 int mmu_idx;
61 int privilege;
62 bool psw_n_nonzero;
63 bool is_pa20;
64 bool insn_start_updated;
66 #ifdef CONFIG_USER_ONLY
67 MemOp unalign;
68 #endif
69 } DisasContext;
71 #ifdef CONFIG_USER_ONLY
72 #define UNALIGN(C) (C)->unalign
73 #define MMU_DISABLED(C) false
74 #else
75 #define UNALIGN(C) MO_ALIGN
76 #define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx)
77 #endif
79 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
80 static int expand_sm_imm(DisasContext *ctx, int val)
82 /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
83 if (ctx->is_pa20) {
84 if (val & PSW_SM_W) {
85 val |= PSW_W;
87 val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
88 } else {
89 val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
91 return val;
94 /* Inverted space register indicates 0 means sr0 not inferred from base. */
95 static int expand_sr3x(DisasContext *ctx, int val)
97 return ~val;
100 /* Convert the M:A bits within a memory insn to the tri-state value
101 we use for the final M. */
102 static int ma_to_m(DisasContext *ctx, int val)
104 return val & 2 ? (val & 1 ? -1 : 1) : 0;
107 /* Convert the sign of the displacement to a pre or post-modify. */
108 static int pos_to_m(DisasContext *ctx, int val)
110 return val ? 1 : -1;
113 static int neg_to_m(DisasContext *ctx, int val)
115 return val ? -1 : 1;
118 /* Used for branch targets and fp memory ops. */
119 static int expand_shl2(DisasContext *ctx, int val)
121 return val << 2;
124 /* Used for assemble_21. */
125 static int expand_shl11(DisasContext *ctx, int val)
127 return val << 11;
130 static int assemble_6(DisasContext *ctx, int val)
133 * Officially, 32 * x + 32 - y.
134 * Here, x is already in bit 5, and y is [4:0].
135 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
136 * with the overflow from bit 4 summing with x.
138 return (val ^ 31) + 1;
141 /* Expander for assemble_16a(s,cat(im10a,0),i). */
142 static int expand_11a(DisasContext *ctx, int val)
145 * @val is bit 0 and bits [4:15].
146 * Swizzle thing around depending on PSW.W.
148 int im10a = extract32(val, 1, 10);
149 int s = extract32(val, 11, 2);
150 int i = (-(val & 1) << 13) | (im10a << 3);
152 if (ctx->tb_flags & PSW_W) {
153 i ^= s << 13;
155 return i;
158 /* Expander for assemble_16a(s,im11a,i). */
159 static int expand_12a(DisasContext *ctx, int val)
162 * @val is bit 0 and bits [3:15].
163 * Swizzle thing around depending on PSW.W.
165 int im11a = extract32(val, 1, 11);
166 int s = extract32(val, 12, 2);
167 int i = (-(val & 1) << 13) | (im11a << 2);
169 if (ctx->tb_flags & PSW_W) {
170 i ^= s << 13;
172 return i;
175 /* Expander for assemble_16(s,im14). */
176 static int expand_16(DisasContext *ctx, int val)
179 * @val is bits [0:15], containing both im14 and s.
180 * Swizzle thing around depending on PSW.W.
182 int s = extract32(val, 14, 2);
183 int i = (-(val & 1) << 13) | extract32(val, 1, 13);
185 if (ctx->tb_flags & PSW_W) {
186 i ^= s << 13;
188 return i;
191 /* The sp field is only present with !PSW_W. */
192 static int sp0_if_wide(DisasContext *ctx, int sp)
194 return ctx->tb_flags & PSW_W ? 0 : sp;
197 /* Translate CMPI doubleword conditions to standard. */
198 static int cmpbid_c(DisasContext *ctx, int val)
200 return val ? val : 4; /* 0 == "*<<" */
204 * In many places pa1.x did not decode the bit that later became
205 * the pa2.0 D bit. Suppress D unless the cpu is pa2.0.
207 static int pa20_d(DisasContext *ctx, int val)
209 return ctx->is_pa20 & val;
212 /* Include the auto-generated decoder. */
213 #include "decode-insns.c.inc"
215 /* We are not using a goto_tb (for whatever reason), but have updated
216 the iaq (for whatever reason), so don't do it again on exit. */
217 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
219 /* We are exiting the TB, but have neither emitted a goto_tb, nor
220 updated the iaq for the next instruction to be executed. */
221 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
223 /* Similarly, but we want to return to the main loop immediately
224 to recognize unmasked interrupts. */
225 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
226 #define DISAS_EXIT DISAS_TARGET_3
228 /* global register indexes */
229 static TCGv_i64 cpu_gr[32];
230 static TCGv_i64 cpu_sr[4];
231 static TCGv_i64 cpu_srH;
232 static TCGv_i64 cpu_iaoq_f;
233 static TCGv_i64 cpu_iaoq_b;
234 static TCGv_i64 cpu_iasq_f;
235 static TCGv_i64 cpu_iasq_b;
236 static TCGv_i64 cpu_sar;
237 static TCGv_i64 cpu_psw_n;
238 static TCGv_i64 cpu_psw_v;
239 static TCGv_i64 cpu_psw_cb;
240 static TCGv_i64 cpu_psw_cb_msb;
242 void hppa_translate_init(void)
244 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
246 typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
247 static const GlobalVar vars[] = {
248 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
249 DEF_VAR(psw_n),
250 DEF_VAR(psw_v),
251 DEF_VAR(psw_cb),
252 DEF_VAR(psw_cb_msb),
253 DEF_VAR(iaoq_f),
254 DEF_VAR(iaoq_b),
257 #undef DEF_VAR
259 /* Use the symbolic register names that match the disassembler. */
260 static const char gr_names[32][4] = {
261 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
262 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
263 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
264 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
266 /* SR[4-7] are not global registers so that we can index them. */
267 static const char sr_names[5][4] = {
268 "sr0", "sr1", "sr2", "sr3", "srH"
271 int i;
273 cpu_gr[0] = NULL;
274 for (i = 1; i < 32; i++) {
275 cpu_gr[i] = tcg_global_mem_new(tcg_env,
276 offsetof(CPUHPPAState, gr[i]),
277 gr_names[i]);
279 for (i = 0; i < 4; i++) {
280 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
281 offsetof(CPUHPPAState, sr[i]),
282 sr_names[i]);
284 cpu_srH = tcg_global_mem_new_i64(tcg_env,
285 offsetof(CPUHPPAState, sr[4]),
286 sr_names[4]);
288 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
289 const GlobalVar *v = &vars[i];
290 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
293 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
294 offsetof(CPUHPPAState, iasq_f),
295 "iasq_f");
296 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
297 offsetof(CPUHPPAState, iasq_b),
298 "iasq_b");
301 static void set_insn_breg(DisasContext *ctx, int breg)
303 assert(!ctx->insn_start_updated);
304 ctx->insn_start_updated = true;
305 tcg_set_insn_start_param(ctx->base.insn_start, 2, breg);
308 static DisasCond cond_make_f(void)
310 return (DisasCond){
311 .c = TCG_COND_NEVER,
312 .a0 = NULL,
313 .a1 = NULL,
317 static DisasCond cond_make_t(void)
319 return (DisasCond){
320 .c = TCG_COND_ALWAYS,
321 .a0 = NULL,
322 .a1 = NULL,
326 static DisasCond cond_make_n(void)
328 return (DisasCond){
329 .c = TCG_COND_NE,
330 .a0 = cpu_psw_n,
331 .a1 = tcg_constant_i64(0)
335 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
337 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
338 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
341 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
343 return cond_make_tmp(c, a0, tcg_constant_i64(0));
346 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
348 TCGv_i64 tmp = tcg_temp_new_i64();
349 tcg_gen_mov_i64(tmp, a0);
350 return cond_make_0_tmp(c, tmp);
353 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
355 TCGv_i64 t0 = tcg_temp_new_i64();
356 TCGv_i64 t1 = tcg_temp_new_i64();
358 tcg_gen_mov_i64(t0, a0);
359 tcg_gen_mov_i64(t1, a1);
360 return cond_make_tmp(c, t0, t1);
363 static void cond_free(DisasCond *cond)
365 switch (cond->c) {
366 default:
367 cond->a0 = NULL;
368 cond->a1 = NULL;
369 /* fallthru */
370 case TCG_COND_ALWAYS:
371 cond->c = TCG_COND_NEVER;
372 break;
373 case TCG_COND_NEVER:
374 break;
378 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
380 if (reg == 0) {
381 return ctx->zero;
382 } else {
383 return cpu_gr[reg];
387 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
389 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
390 return tcg_temp_new_i64();
391 } else {
392 return cpu_gr[reg];
396 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
398 if (ctx->null_cond.c != TCG_COND_NEVER) {
399 tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
400 ctx->null_cond.a1, dest, t);
401 } else {
402 tcg_gen_mov_i64(dest, t);
406 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
408 if (reg != 0) {
409 save_or_nullify(ctx, cpu_gr[reg], t);
413 #if HOST_BIG_ENDIAN
414 # define HI_OFS 0
415 # define LO_OFS 4
416 #else
417 # define HI_OFS 4
418 # define LO_OFS 0
419 #endif
421 static TCGv_i32 load_frw_i32(unsigned rt)
423 TCGv_i32 ret = tcg_temp_new_i32();
424 tcg_gen_ld_i32(ret, tcg_env,
425 offsetof(CPUHPPAState, fr[rt & 31])
426 + (rt & 32 ? LO_OFS : HI_OFS));
427 return ret;
430 static TCGv_i32 load_frw0_i32(unsigned rt)
432 if (rt == 0) {
433 TCGv_i32 ret = tcg_temp_new_i32();
434 tcg_gen_movi_i32(ret, 0);
435 return ret;
436 } else {
437 return load_frw_i32(rt);
441 static TCGv_i64 load_frw0_i64(unsigned rt)
443 TCGv_i64 ret = tcg_temp_new_i64();
444 if (rt == 0) {
445 tcg_gen_movi_i64(ret, 0);
446 } else {
447 tcg_gen_ld32u_i64(ret, tcg_env,
448 offsetof(CPUHPPAState, fr[rt & 31])
449 + (rt & 32 ? LO_OFS : HI_OFS));
451 return ret;
454 static void save_frw_i32(unsigned rt, TCGv_i32 val)
456 tcg_gen_st_i32(val, tcg_env,
457 offsetof(CPUHPPAState, fr[rt & 31])
458 + (rt & 32 ? LO_OFS : HI_OFS));
461 #undef HI_OFS
462 #undef LO_OFS
464 static TCGv_i64 load_frd(unsigned rt)
466 TCGv_i64 ret = tcg_temp_new_i64();
467 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
468 return ret;
471 static TCGv_i64 load_frd0(unsigned rt)
473 if (rt == 0) {
474 TCGv_i64 ret = tcg_temp_new_i64();
475 tcg_gen_movi_i64(ret, 0);
476 return ret;
477 } else {
478 return load_frd(rt);
482 static void save_frd(unsigned rt, TCGv_i64 val)
484 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
487 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
489 #ifdef CONFIG_USER_ONLY
490 tcg_gen_movi_i64(dest, 0);
491 #else
492 if (reg < 4) {
493 tcg_gen_mov_i64(dest, cpu_sr[reg]);
494 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
495 tcg_gen_mov_i64(dest, cpu_srH);
496 } else {
497 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
499 #endif
502 /* Skip over the implementation of an insn that has been nullified.
503 Use this when the insn is too complex for a conditional move. */
504 static void nullify_over(DisasContext *ctx)
506 if (ctx->null_cond.c != TCG_COND_NEVER) {
507 /* The always condition should have been handled in the main loop. */
508 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
510 ctx->null_lab = gen_new_label();
512 /* If we're using PSW[N], copy it to a temp because... */
513 if (ctx->null_cond.a0 == cpu_psw_n) {
514 ctx->null_cond.a0 = tcg_temp_new_i64();
515 tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
517 /* ... we clear it before branching over the implementation,
518 so that (1) it's clear after nullifying this insn and
519 (2) if this insn nullifies the next, PSW[N] is valid. */
520 if (ctx->psw_n_nonzero) {
521 ctx->psw_n_nonzero = false;
522 tcg_gen_movi_i64(cpu_psw_n, 0);
525 tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
526 ctx->null_cond.a1, ctx->null_lab);
527 cond_free(&ctx->null_cond);
531 /* Save the current nullification state to PSW[N]. */
532 static void nullify_save(DisasContext *ctx)
534 if (ctx->null_cond.c == TCG_COND_NEVER) {
535 if (ctx->psw_n_nonzero) {
536 tcg_gen_movi_i64(cpu_psw_n, 0);
538 return;
540 if (ctx->null_cond.a0 != cpu_psw_n) {
541 tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
542 ctx->null_cond.a0, ctx->null_cond.a1);
543 ctx->psw_n_nonzero = true;
545 cond_free(&ctx->null_cond);
548 /* Set a PSW[N] to X. The intention is that this is used immediately
549 before a goto_tb/exit_tb, so that there is no fallthru path to other
550 code within the TB. Therefore we do not update psw_n_nonzero. */
551 static void nullify_set(DisasContext *ctx, bool x)
553 if (ctx->psw_n_nonzero || x) {
554 tcg_gen_movi_i64(cpu_psw_n, x);
558 /* Mark the end of an instruction that may have been nullified.
559 This is the pair to nullify_over. Always returns true so that
560 it may be tail-called from a translate function. */
561 static bool nullify_end(DisasContext *ctx)
563 TCGLabel *null_lab = ctx->null_lab;
564 DisasJumpType status = ctx->base.is_jmp;
566 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
567 For UPDATED, we cannot update on the nullified path. */
568 assert(status != DISAS_IAQ_N_UPDATED);
570 if (likely(null_lab == NULL)) {
571 /* The current insn wasn't conditional or handled the condition
572 applied to it without a branch, so the (new) setting of
573 NULL_COND can be applied directly to the next insn. */
574 return true;
576 ctx->null_lab = NULL;
578 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
579 /* The next instruction will be unconditional,
580 and NULL_COND already reflects that. */
581 gen_set_label(null_lab);
582 } else {
583 /* The insn that we just executed is itself nullifying the next
584 instruction. Store the condition in the PSW[N] global.
585 We asserted PSW[N] = 0 in nullify_over, so that after the
586 label we have the proper value in place. */
587 nullify_save(ctx);
588 gen_set_label(null_lab);
589 ctx->null_cond = cond_make_n();
591 if (status == DISAS_NORETURN) {
592 ctx->base.is_jmp = DISAS_NEXT;
594 return true;
597 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
598 uint64_t ival, TCGv_i64 vval)
600 uint64_t mask = gva_offset_mask(ctx->tb_flags);
602 if (ival != -1) {
603 tcg_gen_movi_i64(dest, ival & mask);
604 return;
606 tcg_debug_assert(vval != NULL);
609 * We know that the IAOQ is already properly masked.
610 * This optimization is primarily for "iaoq_f = iaoq_b".
612 if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
613 tcg_gen_mov_i64(dest, vval);
614 } else {
615 tcg_gen_andi_i64(dest, vval, mask);
619 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
621 return ctx->iaoq_f + disp + 8;
624 static void gen_excp_1(int exception)
626 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
629 static void gen_excp(DisasContext *ctx, int exception)
631 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
632 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
633 nullify_save(ctx);
634 gen_excp_1(exception);
635 ctx->base.is_jmp = DISAS_NORETURN;
638 static bool gen_excp_iir(DisasContext *ctx, int exc)
640 nullify_over(ctx);
641 tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
642 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
643 gen_excp(ctx, exc);
644 return nullify_end(ctx);
647 static bool gen_illegal(DisasContext *ctx)
649 return gen_excp_iir(ctx, EXCP_ILL);
652 #ifdef CONFIG_USER_ONLY
653 #define CHECK_MOST_PRIVILEGED(EXCP) \
654 return gen_excp_iir(ctx, EXCP)
655 #else
656 #define CHECK_MOST_PRIVILEGED(EXCP) \
657 do { \
658 if (ctx->privilege != 0) { \
659 return gen_excp_iir(ctx, EXCP); \
661 } while (0)
662 #endif
664 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
666 return translator_use_goto_tb(&ctx->base, dest);
669 /* If the next insn is to be nullified, and it's on the same page,
670 and we're not attempting to set a breakpoint on it, then we can
671 totally skip the nullified insn. This avoids creating and
672 executing a TB that merely branches to the next TB. */
673 static bool use_nullify_skip(DisasContext *ctx)
675 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
676 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
679 static void gen_goto_tb(DisasContext *ctx, int which,
680 uint64_t f, uint64_t b)
682 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
683 tcg_gen_goto_tb(which);
684 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
685 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
686 tcg_gen_exit_tb(ctx->base.tb, which);
687 } else {
688 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
689 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
690 tcg_gen_lookup_and_goto_ptr();
694 static bool cond_need_sv(int c)
696 return c == 2 || c == 3 || c == 6;
699 static bool cond_need_cb(int c)
701 return c == 4 || c == 5;
705 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
706 * the Parisc 1.1 Architecture Reference Manual for details.
709 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
710 TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv)
712 DisasCond cond;
713 TCGv_i64 tmp;
715 switch (cf >> 1) {
716 case 0: /* Never / TR (0 / 1) */
717 cond = cond_make_f();
718 break;
719 case 1: /* = / <> (Z / !Z) */
720 if (!d) {
721 tmp = tcg_temp_new_i64();
722 tcg_gen_ext32u_i64(tmp, res);
723 res = tmp;
725 cond = cond_make_0(TCG_COND_EQ, res);
726 break;
727 case 2: /* < / >= (N ^ V / !(N ^ V) */
728 tmp = tcg_temp_new_i64();
729 tcg_gen_xor_i64(tmp, res, sv);
730 if (!d) {
731 tcg_gen_ext32s_i64(tmp, tmp);
733 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
734 break;
735 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
737 * Simplify:
738 * (N ^ V) | Z
739 * ((res < 0) ^ (sv < 0)) | !res
740 * ((res ^ sv) < 0) | !res
741 * (~(res ^ sv) >= 0) | !res
742 * !(~(res ^ sv) >> 31) | !res
743 * !(~(res ^ sv) >> 31 & res)
745 tmp = tcg_temp_new_i64();
746 tcg_gen_eqv_i64(tmp, res, sv);
747 if (!d) {
748 tcg_gen_sextract_i64(tmp, tmp, 31, 1);
749 tcg_gen_and_i64(tmp, tmp, res);
750 tcg_gen_ext32u_i64(tmp, tmp);
751 } else {
752 tcg_gen_sari_i64(tmp, tmp, 63);
753 tcg_gen_and_i64(tmp, tmp, res);
755 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
756 break;
757 case 4: /* NUV / UV (!UV / UV) */
758 cond = cond_make_0(TCG_COND_EQ, uv);
759 break;
760 case 5: /* ZNV / VNZ (!UV | Z / UV & !Z) */
761 tmp = tcg_temp_new_i64();
762 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res);
763 if (!d) {
764 tcg_gen_ext32u_i64(tmp, tmp);
766 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
767 break;
768 case 6: /* SV / NSV (V / !V) */
769 if (!d) {
770 tmp = tcg_temp_new_i64();
771 tcg_gen_ext32s_i64(tmp, sv);
772 sv = tmp;
774 cond = cond_make_0(TCG_COND_LT, sv);
775 break;
776 case 7: /* OD / EV */
777 tmp = tcg_temp_new_i64();
778 tcg_gen_andi_i64(tmp, res, 1);
779 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
780 break;
781 default:
782 g_assert_not_reached();
784 if (cf & 1) {
785 cond.c = tcg_invert_cond(cond.c);
788 return cond;
791 /* Similar, but for the special case of subtraction without borrow, we
792 can use the inputs directly. This can allow other computation to be
793 deleted as unused. */
795 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
796 TCGv_i64 res, TCGv_i64 in1,
797 TCGv_i64 in2, TCGv_i64 sv)
799 TCGCond tc;
800 bool ext_uns;
802 switch (cf >> 1) {
803 case 1: /* = / <> */
804 tc = TCG_COND_EQ;
805 ext_uns = true;
806 break;
807 case 2: /* < / >= */
808 tc = TCG_COND_LT;
809 ext_uns = false;
810 break;
811 case 3: /* <= / > */
812 tc = TCG_COND_LE;
813 ext_uns = false;
814 break;
815 case 4: /* << / >>= */
816 tc = TCG_COND_LTU;
817 ext_uns = true;
818 break;
819 case 5: /* <<= / >> */
820 tc = TCG_COND_LEU;
821 ext_uns = true;
822 break;
823 default:
824 return do_cond(ctx, cf, d, res, NULL, sv);
827 if (cf & 1) {
828 tc = tcg_invert_cond(tc);
830 if (!d) {
831 TCGv_i64 t1 = tcg_temp_new_i64();
832 TCGv_i64 t2 = tcg_temp_new_i64();
834 if (ext_uns) {
835 tcg_gen_ext32u_i64(t1, in1);
836 tcg_gen_ext32u_i64(t2, in2);
837 } else {
838 tcg_gen_ext32s_i64(t1, in1);
839 tcg_gen_ext32s_i64(t2, in2);
841 return cond_make_tmp(tc, t1, t2);
843 return cond_make(tc, in1, in2);
847 * Similar, but for logicals, where the carry and overflow bits are not
848 * computed, and use of them is undefined.
850 * Undefined or not, hardware does not trap. It seems reasonable to
851 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
852 * how cases c={2,3} are treated.
855 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
856 TCGv_i64 res)
858 TCGCond tc;
859 bool ext_uns;
861 switch (cf) {
862 case 0: /* never */
863 case 9: /* undef, C */
864 case 11: /* undef, C & !Z */
865 case 12: /* undef, V */
866 return cond_make_f();
868 case 1: /* true */
869 case 8: /* undef, !C */
870 case 10: /* undef, !C | Z */
871 case 13: /* undef, !V */
872 return cond_make_t();
874 case 2: /* == */
875 tc = TCG_COND_EQ;
876 ext_uns = true;
877 break;
878 case 3: /* <> */
879 tc = TCG_COND_NE;
880 ext_uns = true;
881 break;
882 case 4: /* < */
883 tc = TCG_COND_LT;
884 ext_uns = false;
885 break;
886 case 5: /* >= */
887 tc = TCG_COND_GE;
888 ext_uns = false;
889 break;
890 case 6: /* <= */
891 tc = TCG_COND_LE;
892 ext_uns = false;
893 break;
894 case 7: /* > */
895 tc = TCG_COND_GT;
896 ext_uns = false;
897 break;
899 case 14: /* OD */
900 case 15: /* EV */
901 return do_cond(ctx, cf, d, res, NULL, NULL);
903 default:
904 g_assert_not_reached();
907 if (!d) {
908 TCGv_i64 tmp = tcg_temp_new_i64();
910 if (ext_uns) {
911 tcg_gen_ext32u_i64(tmp, res);
912 } else {
913 tcg_gen_ext32s_i64(tmp, res);
915 return cond_make_0_tmp(tc, tmp);
917 return cond_make_0(tc, res);
920 /* Similar, but for shift/extract/deposit conditions. */
922 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
923 TCGv_i64 res)
925 unsigned c, f;
927 /* Convert the compressed condition codes to standard.
928 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
929 4-7 are the reverse of 0-3. */
930 c = orig & 3;
931 if (c == 3) {
932 c = 7;
934 f = (orig & 4) / 4;
936 return do_log_cond(ctx, c * 2 + f, d, res);
939 /* Similar, but for unit zero conditions. */
940 static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res)
942 TCGv_i64 tmp;
943 uint64_t d_repl = d ? 0x0000000100000001ull : 1;
944 uint64_t ones = 0, sgns = 0;
946 switch (cf >> 1) {
947 case 1: /* SBW / NBW */
948 if (d) {
949 ones = d_repl;
950 sgns = d_repl << 31;
952 break;
953 case 2: /* SBZ / NBZ */
954 ones = d_repl * 0x01010101u;
955 sgns = ones << 7;
956 break;
957 case 3: /* SHZ / NHZ */
958 ones = d_repl * 0x00010001u;
959 sgns = ones << 15;
960 break;
962 if (ones == 0) {
963 /* Undefined, or 0/1 (never/always). */
964 return cf & 1 ? cond_make_t() : cond_make_f();
968 * See hasless(v,1) from
969 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
971 tmp = tcg_temp_new_i64();
972 tcg_gen_subi_i64(tmp, res, ones);
973 tcg_gen_andc_i64(tmp, tmp, res);
974 tcg_gen_andi_i64(tmp, tmp, sgns);
976 return cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, tmp);
979 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
980 TCGv_i64 cb, TCGv_i64 cb_msb)
982 if (!d) {
983 TCGv_i64 t = tcg_temp_new_i64();
984 tcg_gen_extract_i64(t, cb, 32, 1);
985 return t;
987 return cb_msb;
990 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
992 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
995 /* Compute signed overflow for addition. */
996 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
997 TCGv_i64 in1, TCGv_i64 in2,
998 TCGv_i64 orig_in1, int shift, bool d)
1000 TCGv_i64 sv = tcg_temp_new_i64();
1001 TCGv_i64 tmp = tcg_temp_new_i64();
1003 tcg_gen_xor_i64(sv, res, in1);
1004 tcg_gen_xor_i64(tmp, in1, in2);
1005 tcg_gen_andc_i64(sv, sv, tmp);
1007 switch (shift) {
1008 case 0:
1009 break;
1010 case 1:
1011 /* Shift left by one and compare the sign. */
1012 tcg_gen_add_i64(tmp, orig_in1, orig_in1);
1013 tcg_gen_xor_i64(tmp, tmp, orig_in1);
1014 /* Incorporate into the overflow. */
1015 tcg_gen_or_i64(sv, sv, tmp);
1016 break;
1017 default:
1019 int sign_bit = d ? 63 : 31;
1021 /* Compare the sign against all lower bits. */
1022 tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1);
1023 tcg_gen_xor_i64(tmp, tmp, orig_in1);
1025 * If one of the bits shifting into or through the sign
1026 * differs, then we have overflow.
1028 tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift);
1029 tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero,
1030 tcg_constant_i64(-1), sv);
1033 return sv;
1036 /* Compute unsigned overflow for addition. */
1037 static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb,
1038 TCGv_i64 in1, int shift, bool d)
1040 if (shift == 0) {
1041 return get_carry(ctx, d, cb, cb_msb);
1042 } else {
1043 TCGv_i64 tmp = tcg_temp_new_i64();
1044 tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift);
1045 tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb));
1046 return tmp;
1050 /* Compute signed overflow for subtraction. */
1051 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
1052 TCGv_i64 in1, TCGv_i64 in2)
1054 TCGv_i64 sv = tcg_temp_new_i64();
1055 TCGv_i64 tmp = tcg_temp_new_i64();
1057 tcg_gen_xor_i64(sv, res, in1);
1058 tcg_gen_xor_i64(tmp, in1, in2);
1059 tcg_gen_and_i64(sv, sv, tmp);
1061 return sv;
1064 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
1065 TCGv_i64 in2, unsigned shift, bool is_l,
1066 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1068 TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp;
1069 unsigned c = cf >> 1;
1070 DisasCond cond;
1072 dest = tcg_temp_new_i64();
1073 cb = NULL;
1074 cb_msb = NULL;
1076 in1 = orig_in1;
1077 if (shift) {
1078 tmp = tcg_temp_new_i64();
1079 tcg_gen_shli_i64(tmp, in1, shift);
1080 in1 = tmp;
1083 if (!is_l || cond_need_cb(c)) {
1084 cb_msb = tcg_temp_new_i64();
1085 cb = tcg_temp_new_i64();
1087 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1088 if (is_c) {
1089 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1090 get_psw_carry(ctx, d), ctx->zero);
1092 tcg_gen_xor_i64(cb, in1, in2);
1093 tcg_gen_xor_i64(cb, cb, dest);
1094 } else {
1095 tcg_gen_add_i64(dest, in1, in2);
1096 if (is_c) {
1097 tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1101 /* Compute signed overflow if required. */
1102 sv = NULL;
1103 if (is_tsv || cond_need_sv(c)) {
1104 sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d);
1105 if (is_tsv) {
1106 if (!d) {
1107 tcg_gen_ext32s_i64(sv, sv);
1109 gen_helper_tsv(tcg_env, sv);
1113 /* Compute unsigned overflow if required. */
1114 uv = NULL;
1115 if (cond_need_cb(c)) {
1116 uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d);
1119 /* Emit any conditional trap before any writeback. */
1120 cond = do_cond(ctx, cf, d, dest, uv, sv);
1121 if (is_tc) {
1122 tmp = tcg_temp_new_i64();
1123 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1124 gen_helper_tcond(tcg_env, tmp);
1127 /* Write back the result. */
1128 if (!is_l) {
1129 save_or_nullify(ctx, cpu_psw_cb, cb);
1130 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1132 save_gpr(ctx, rt, dest);
1134 /* Install the new nullification. */
1135 cond_free(&ctx->null_cond);
1136 ctx->null_cond = cond;
1139 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1140 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1142 TCGv_i64 tcg_r1, tcg_r2;
1144 if (a->cf) {
1145 nullify_over(ctx);
1147 tcg_r1 = load_gpr(ctx, a->r1);
1148 tcg_r2 = load_gpr(ctx, a->r2);
1149 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1150 is_tsv, is_tc, is_c, a->cf, a->d);
1151 return nullify_end(ctx);
1154 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1155 bool is_tsv, bool is_tc)
1157 TCGv_i64 tcg_im, tcg_r2;
1159 if (a->cf) {
1160 nullify_over(ctx);
1162 tcg_im = tcg_constant_i64(a->i);
1163 tcg_r2 = load_gpr(ctx, a->r);
1164 /* All ADDI conditions are 32-bit. */
1165 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1166 return nullify_end(ctx);
1169 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1170 TCGv_i64 in2, bool is_tsv, bool is_b,
1171 bool is_tc, unsigned cf, bool d)
1173 TCGv_i64 dest, sv, cb, cb_msb, tmp;
1174 unsigned c = cf >> 1;
1175 DisasCond cond;
1177 dest = tcg_temp_new_i64();
1178 cb = tcg_temp_new_i64();
1179 cb_msb = tcg_temp_new_i64();
1181 if (is_b) {
1182 /* DEST,C = IN1 + ~IN2 + C. */
1183 tcg_gen_not_i64(cb, in2);
1184 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1185 get_psw_carry(ctx, d), ctx->zero);
1186 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1187 tcg_gen_xor_i64(cb, cb, in1);
1188 tcg_gen_xor_i64(cb, cb, dest);
1189 } else {
1191 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1192 * operations by seeding the high word with 1 and subtracting.
1194 TCGv_i64 one = tcg_constant_i64(1);
1195 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1196 tcg_gen_eqv_i64(cb, in1, in2);
1197 tcg_gen_xor_i64(cb, cb, dest);
1200 /* Compute signed overflow if required. */
1201 sv = NULL;
1202 if (is_tsv || cond_need_sv(c)) {
1203 sv = do_sub_sv(ctx, dest, in1, in2);
1204 if (is_tsv) {
1205 if (!d) {
1206 tcg_gen_ext32s_i64(sv, sv);
1208 gen_helper_tsv(tcg_env, sv);
1212 /* Compute the condition. We cannot use the special case for borrow. */
1213 if (!is_b) {
1214 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1215 } else {
1216 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1219 /* Emit any conditional trap before any writeback. */
1220 if (is_tc) {
1221 tmp = tcg_temp_new_i64();
1222 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1223 gen_helper_tcond(tcg_env, tmp);
1226 /* Write back the result. */
1227 save_or_nullify(ctx, cpu_psw_cb, cb);
1228 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1229 save_gpr(ctx, rt, dest);
1231 /* Install the new nullification. */
1232 cond_free(&ctx->null_cond);
1233 ctx->null_cond = cond;
1236 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1237 bool is_tsv, bool is_b, bool is_tc)
1239 TCGv_i64 tcg_r1, tcg_r2;
1241 if (a->cf) {
1242 nullify_over(ctx);
1244 tcg_r1 = load_gpr(ctx, a->r1);
1245 tcg_r2 = load_gpr(ctx, a->r2);
1246 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1247 return nullify_end(ctx);
1250 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1252 TCGv_i64 tcg_im, tcg_r2;
1254 if (a->cf) {
1255 nullify_over(ctx);
1257 tcg_im = tcg_constant_i64(a->i);
1258 tcg_r2 = load_gpr(ctx, a->r);
1259 /* All SUBI conditions are 32-bit. */
1260 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1261 return nullify_end(ctx);
1264 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1265 TCGv_i64 in2, unsigned cf, bool d)
1267 TCGv_i64 dest, sv;
1268 DisasCond cond;
1270 dest = tcg_temp_new_i64();
1271 tcg_gen_sub_i64(dest, in1, in2);
1273 /* Compute signed overflow if required. */
1274 sv = NULL;
1275 if (cond_need_sv(cf >> 1)) {
1276 sv = do_sub_sv(ctx, dest, in1, in2);
1279 /* Form the condition for the compare. */
1280 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1282 /* Clear. */
1283 tcg_gen_movi_i64(dest, 0);
1284 save_gpr(ctx, rt, dest);
1286 /* Install the new nullification. */
1287 cond_free(&ctx->null_cond);
1288 ctx->null_cond = cond;
1291 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1292 TCGv_i64 in2, unsigned cf, bool d,
1293 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1295 TCGv_i64 dest = dest_gpr(ctx, rt);
1297 /* Perform the operation, and writeback. */
1298 fn(dest, in1, in2);
1299 save_gpr(ctx, rt, dest);
1301 /* Install the new nullification. */
1302 cond_free(&ctx->null_cond);
1303 if (cf) {
1304 ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1308 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1309 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1311 TCGv_i64 tcg_r1, tcg_r2;
1313 if (a->cf) {
1314 nullify_over(ctx);
1316 tcg_r1 = load_gpr(ctx, a->r1);
1317 tcg_r2 = load_gpr(ctx, a->r2);
1318 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1319 return nullify_end(ctx);
1322 static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1323 TCGv_i64 in2, unsigned cf, bool d,
1324 bool is_tc, bool is_add)
1326 TCGv_i64 dest = tcg_temp_new_i64();
1327 uint64_t test_cb = 0;
1328 DisasCond cond;
1330 /* Select which carry-out bits to test. */
1331 switch (cf >> 1) {
1332 case 4: /* NDC / SDC -- 4-bit carries */
1333 test_cb = dup_const(MO_8, 0x88);
1334 break;
1335 case 5: /* NWC / SWC -- 32-bit carries */
1336 if (d) {
1337 test_cb = dup_const(MO_32, INT32_MIN);
1338 } else {
1339 cf &= 1; /* undefined -- map to never/always */
1341 break;
1342 case 6: /* NBC / SBC -- 8-bit carries */
1343 test_cb = dup_const(MO_8, INT8_MIN);
1344 break;
1345 case 7: /* NHC / SHC -- 16-bit carries */
1346 test_cb = dup_const(MO_16, INT16_MIN);
1347 break;
1349 if (!d) {
1350 test_cb = (uint32_t)test_cb;
1353 if (!test_cb) {
1354 /* No need to compute carries if we don't need to test them. */
1355 if (is_add) {
1356 tcg_gen_add_i64(dest, in1, in2);
1357 } else {
1358 tcg_gen_sub_i64(dest, in1, in2);
1360 cond = do_unit_zero_cond(cf, d, dest);
1361 } else {
1362 TCGv_i64 cb = tcg_temp_new_i64();
1364 if (d) {
1365 TCGv_i64 cb_msb = tcg_temp_new_i64();
1366 if (is_add) {
1367 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1368 tcg_gen_xor_i64(cb, in1, in2);
1369 } else {
1370 /* See do_sub, !is_b. */
1371 TCGv_i64 one = tcg_constant_i64(1);
1372 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1373 tcg_gen_eqv_i64(cb, in1, in2);
1375 tcg_gen_xor_i64(cb, cb, dest);
1376 tcg_gen_extract2_i64(cb, cb, cb_msb, 1);
1377 } else {
1378 if (is_add) {
1379 tcg_gen_add_i64(dest, in1, in2);
1380 tcg_gen_xor_i64(cb, in1, in2);
1381 } else {
1382 tcg_gen_sub_i64(dest, in1, in2);
1383 tcg_gen_eqv_i64(cb, in1, in2);
1385 tcg_gen_xor_i64(cb, cb, dest);
1386 tcg_gen_shri_i64(cb, cb, 1);
1389 tcg_gen_andi_i64(cb, cb, test_cb);
1390 cond = cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, cb);
1393 if (is_tc) {
1394 TCGv_i64 tmp = tcg_temp_new_i64();
1395 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1396 gen_helper_tcond(tcg_env, tmp);
1398 save_gpr(ctx, rt, dest);
1400 cond_free(&ctx->null_cond);
1401 ctx->null_cond = cond;
1404 #ifndef CONFIG_USER_ONLY
1405 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1406 from the top 2 bits of the base register. There are a few system
1407 instructions that have a 3-bit space specifier, for which SR0 is
1408 not special. To handle this, pass ~SP. */
1409 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1411 TCGv_ptr ptr;
1412 TCGv_i64 tmp;
1413 TCGv_i64 spc;
1415 if (sp != 0) {
1416 if (sp < 0) {
1417 sp = ~sp;
1419 spc = tcg_temp_new_i64();
1420 load_spr(ctx, spc, sp);
1421 return spc;
1423 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1424 return cpu_srH;
1427 ptr = tcg_temp_new_ptr();
1428 tmp = tcg_temp_new_i64();
1429 spc = tcg_temp_new_i64();
1431 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1432 tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1433 tcg_gen_andi_i64(tmp, tmp, 030);
1434 tcg_gen_trunc_i64_ptr(ptr, tmp);
1436 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1437 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1439 return spc;
1441 #endif
1443 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1444 unsigned rb, unsigned rx, int scale, int64_t disp,
1445 unsigned sp, int modify, bool is_phys)
1447 TCGv_i64 base = load_gpr(ctx, rb);
1448 TCGv_i64 ofs;
1449 TCGv_i64 addr;
1451 set_insn_breg(ctx, rb);
1453 /* Note that RX is mutually exclusive with DISP. */
1454 if (rx) {
1455 ofs = tcg_temp_new_i64();
1456 tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1457 tcg_gen_add_i64(ofs, ofs, base);
1458 } else if (disp || modify) {
1459 ofs = tcg_temp_new_i64();
1460 tcg_gen_addi_i64(ofs, base, disp);
1461 } else {
1462 ofs = base;
1465 *pofs = ofs;
1466 *pgva = addr = tcg_temp_new_i64();
1467 tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base,
1468 gva_offset_mask(ctx->tb_flags));
1469 #ifndef CONFIG_USER_ONLY
1470 if (!is_phys) {
1471 tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1473 #endif
1476 /* Emit a memory load. The modify parameter should be
1477 * < 0 for pre-modify,
1478 * > 0 for post-modify,
1479 * = 0 for no base register update.
1481 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1482 unsigned rx, int scale, int64_t disp,
1483 unsigned sp, int modify, MemOp mop)
1485 TCGv_i64 ofs;
1486 TCGv_i64 addr;
1488 /* Caller uses nullify_over/nullify_end. */
1489 assert(ctx->null_cond.c == TCG_COND_NEVER);
1491 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1492 MMU_DISABLED(ctx));
1493 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1494 if (modify) {
1495 save_gpr(ctx, rb, ofs);
1499 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1500 unsigned rx, int scale, int64_t disp,
1501 unsigned sp, int modify, MemOp mop)
1503 TCGv_i64 ofs;
1504 TCGv_i64 addr;
1506 /* Caller uses nullify_over/nullify_end. */
1507 assert(ctx->null_cond.c == TCG_COND_NEVER);
1509 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1510 MMU_DISABLED(ctx));
1511 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1512 if (modify) {
1513 save_gpr(ctx, rb, ofs);
1517 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1518 unsigned rx, int scale, int64_t disp,
1519 unsigned sp, int modify, MemOp mop)
1521 TCGv_i64 ofs;
1522 TCGv_i64 addr;
1524 /* Caller uses nullify_over/nullify_end. */
1525 assert(ctx->null_cond.c == TCG_COND_NEVER);
1527 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1528 MMU_DISABLED(ctx));
1529 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1530 if (modify) {
1531 save_gpr(ctx, rb, ofs);
1535 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1536 unsigned rx, int scale, int64_t disp,
1537 unsigned sp, int modify, MemOp mop)
1539 TCGv_i64 ofs;
1540 TCGv_i64 addr;
1542 /* Caller uses nullify_over/nullify_end. */
1543 assert(ctx->null_cond.c == TCG_COND_NEVER);
1545 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1546 MMU_DISABLED(ctx));
1547 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1548 if (modify) {
1549 save_gpr(ctx, rb, ofs);
1553 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1554 unsigned rx, int scale, int64_t disp,
1555 unsigned sp, int modify, MemOp mop)
1557 TCGv_i64 dest;
1559 nullify_over(ctx);
1561 if (modify == 0) {
1562 /* No base register update. */
1563 dest = dest_gpr(ctx, rt);
1564 } else {
1565 /* Make sure if RT == RB, we see the result of the load. */
1566 dest = tcg_temp_new_i64();
1568 do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1569 save_gpr(ctx, rt, dest);
1571 return nullify_end(ctx);
1574 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1575 unsigned rx, int scale, int64_t disp,
1576 unsigned sp, int modify)
1578 TCGv_i32 tmp;
1580 nullify_over(ctx);
1582 tmp = tcg_temp_new_i32();
1583 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1584 save_frw_i32(rt, tmp);
1586 if (rt == 0) {
1587 gen_helper_loaded_fr0(tcg_env);
1590 return nullify_end(ctx);
1593 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1595 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1596 a->disp, a->sp, a->m);
1599 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1600 unsigned rx, int scale, int64_t disp,
1601 unsigned sp, int modify)
1603 TCGv_i64 tmp;
1605 nullify_over(ctx);
1607 tmp = tcg_temp_new_i64();
1608 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1609 save_frd(rt, tmp);
1611 if (rt == 0) {
1612 gen_helper_loaded_fr0(tcg_env);
1615 return nullify_end(ctx);
1618 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1620 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1621 a->disp, a->sp, a->m);
1624 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1625 int64_t disp, unsigned sp,
1626 int modify, MemOp mop)
1628 nullify_over(ctx);
1629 do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1630 return nullify_end(ctx);
1633 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1634 unsigned rx, int scale, int64_t disp,
1635 unsigned sp, int modify)
1637 TCGv_i32 tmp;
1639 nullify_over(ctx);
1641 tmp = load_frw_i32(rt);
1642 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1644 return nullify_end(ctx);
1647 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1649 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1650 a->disp, a->sp, a->m);
1653 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1654 unsigned rx, int scale, int64_t disp,
1655 unsigned sp, int modify)
1657 TCGv_i64 tmp;
1659 nullify_over(ctx);
1661 tmp = load_frd(rt);
1662 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1664 return nullify_end(ctx);
1667 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1669 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1670 a->disp, a->sp, a->m);
1673 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1674 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1676 TCGv_i32 tmp;
1678 nullify_over(ctx);
1679 tmp = load_frw0_i32(ra);
1681 func(tmp, tcg_env, tmp);
1683 save_frw_i32(rt, tmp);
1684 return nullify_end(ctx);
1687 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1688 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1690 TCGv_i32 dst;
1691 TCGv_i64 src;
1693 nullify_over(ctx);
1694 src = load_frd(ra);
1695 dst = tcg_temp_new_i32();
1697 func(dst, tcg_env, src);
1699 save_frw_i32(rt, dst);
1700 return nullify_end(ctx);
1703 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1704 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1706 TCGv_i64 tmp;
1708 nullify_over(ctx);
1709 tmp = load_frd0(ra);
1711 func(tmp, tcg_env, tmp);
1713 save_frd(rt, tmp);
1714 return nullify_end(ctx);
1717 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1718 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1720 TCGv_i32 src;
1721 TCGv_i64 dst;
1723 nullify_over(ctx);
1724 src = load_frw0_i32(ra);
1725 dst = tcg_temp_new_i64();
1727 func(dst, tcg_env, src);
1729 save_frd(rt, dst);
1730 return nullify_end(ctx);
1733 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1734 unsigned ra, unsigned rb,
1735 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1737 TCGv_i32 a, b;
1739 nullify_over(ctx);
1740 a = load_frw0_i32(ra);
1741 b = load_frw0_i32(rb);
1743 func(a, tcg_env, a, b);
1745 save_frw_i32(rt, a);
1746 return nullify_end(ctx);
1749 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1750 unsigned ra, unsigned rb,
1751 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1753 TCGv_i64 a, b;
1755 nullify_over(ctx);
1756 a = load_frd0(ra);
1757 b = load_frd0(rb);
1759 func(a, tcg_env, a, b);
1761 save_frd(rt, a);
1762 return nullify_end(ctx);
1765 /* Emit an unconditional branch to a direct target, which may or may not
1766 have already had nullification handled. */
1767 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1768 unsigned link, bool is_n)
1770 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1771 if (link != 0) {
1772 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1774 ctx->iaoq_n = dest;
1775 if (is_n) {
1776 ctx->null_cond.c = TCG_COND_ALWAYS;
1778 } else {
1779 nullify_over(ctx);
1781 if (link != 0) {
1782 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1785 if (is_n && use_nullify_skip(ctx)) {
1786 nullify_set(ctx, 0);
1787 gen_goto_tb(ctx, 0, dest, dest + 4);
1788 } else {
1789 nullify_set(ctx, is_n);
1790 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1793 nullify_end(ctx);
1795 nullify_set(ctx, 0);
1796 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1797 ctx->base.is_jmp = DISAS_NORETURN;
1799 return true;
1802 /* Emit a conditional branch to a direct target. If the branch itself
1803 is nullified, we should have already used nullify_over. */
1804 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1805 DisasCond *cond)
1807 uint64_t dest = iaoq_dest(ctx, disp);
1808 TCGLabel *taken = NULL;
1809 TCGCond c = cond->c;
1810 bool n;
1812 assert(ctx->null_cond.c == TCG_COND_NEVER);
1814 /* Handle TRUE and NEVER as direct branches. */
1815 if (c == TCG_COND_ALWAYS) {
1816 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1818 if (c == TCG_COND_NEVER) {
1819 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1822 taken = gen_new_label();
1823 tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1824 cond_free(cond);
1826 /* Not taken: Condition not satisfied; nullify on backward branches. */
1827 n = is_n && disp < 0;
1828 if (n && use_nullify_skip(ctx)) {
1829 nullify_set(ctx, 0);
1830 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1831 } else {
1832 if (!n && ctx->null_lab) {
1833 gen_set_label(ctx->null_lab);
1834 ctx->null_lab = NULL;
1836 nullify_set(ctx, n);
1837 if (ctx->iaoq_n == -1) {
1838 /* The temporary iaoq_n_var died at the branch above.
1839 Regenerate it here instead of saving it. */
1840 tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1842 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1845 gen_set_label(taken);
1847 /* Taken: Condition satisfied; nullify on forward branches. */
1848 n = is_n && disp >= 0;
1849 if (n && use_nullify_skip(ctx)) {
1850 nullify_set(ctx, 0);
1851 gen_goto_tb(ctx, 1, dest, dest + 4);
1852 } else {
1853 nullify_set(ctx, n);
1854 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1857 /* Not taken: the branch itself was nullified. */
1858 if (ctx->null_lab) {
1859 gen_set_label(ctx->null_lab);
1860 ctx->null_lab = NULL;
1861 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1862 } else {
1863 ctx->base.is_jmp = DISAS_NORETURN;
1865 return true;
1868 /* Emit an unconditional branch to an indirect target. This handles
1869 nullification of the branch itself. */
1870 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1871 unsigned link, bool is_n)
1873 TCGv_i64 a0, a1, next, tmp;
1874 TCGCond c;
1876 assert(ctx->null_lab == NULL);
1878 if (ctx->null_cond.c == TCG_COND_NEVER) {
1879 if (link != 0) {
1880 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1882 next = tcg_temp_new_i64();
1883 tcg_gen_mov_i64(next, dest);
1884 if (is_n) {
1885 if (use_nullify_skip(ctx)) {
1886 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1887 tcg_gen_addi_i64(next, next, 4);
1888 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1889 nullify_set(ctx, 0);
1890 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1891 return true;
1893 ctx->null_cond.c = TCG_COND_ALWAYS;
1895 ctx->iaoq_n = -1;
1896 ctx->iaoq_n_var = next;
1897 } else if (is_n && use_nullify_skip(ctx)) {
1898 /* The (conditional) branch, B, nullifies the next insn, N,
1899 and we're allowed to skip execution N (no single-step or
1900 tracepoint in effect). Since the goto_ptr that we must use
1901 for the indirect branch consumes no special resources, we
1902 can (conditionally) skip B and continue execution. */
1903 /* The use_nullify_skip test implies we have a known control path. */
1904 tcg_debug_assert(ctx->iaoq_b != -1);
1905 tcg_debug_assert(ctx->iaoq_n != -1);
1907 /* We do have to handle the non-local temporary, DEST, before
1908 branching. Since IOAQ_F is not really live at this point, we
1909 can simply store DEST optimistically. Similarly with IAOQ_B. */
1910 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1911 next = tcg_temp_new_i64();
1912 tcg_gen_addi_i64(next, dest, 4);
1913 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1915 nullify_over(ctx);
1916 if (link != 0) {
1917 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1919 tcg_gen_lookup_and_goto_ptr();
1920 return nullify_end(ctx);
1921 } else {
1922 c = ctx->null_cond.c;
1923 a0 = ctx->null_cond.a0;
1924 a1 = ctx->null_cond.a1;
1926 tmp = tcg_temp_new_i64();
1927 next = tcg_temp_new_i64();
1929 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1930 tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1931 ctx->iaoq_n = -1;
1932 ctx->iaoq_n_var = next;
1934 if (link != 0) {
1935 tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1938 if (is_n) {
1939 /* The branch nullifies the next insn, which means the state of N
1940 after the branch is the inverse of the state of N that applied
1941 to the branch. */
1942 tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1943 cond_free(&ctx->null_cond);
1944 ctx->null_cond = cond_make_n();
1945 ctx->psw_n_nonzero = true;
1946 } else {
1947 cond_free(&ctx->null_cond);
1950 return true;
1953 /* Implement
1954 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1955 * IAOQ_Next{30..31} ← GR[b]{30..31};
1956 * else
1957 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1958 * which keeps the privilege level from being increased.
1960 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1962 TCGv_i64 dest;
1963 switch (ctx->privilege) {
1964 case 0:
1965 /* Privilege 0 is maximum and is allowed to decrease. */
1966 return offset;
1967 case 3:
1968 /* Privilege 3 is minimum and is never allowed to increase. */
1969 dest = tcg_temp_new_i64();
1970 tcg_gen_ori_i64(dest, offset, 3);
1971 break;
1972 default:
1973 dest = tcg_temp_new_i64();
1974 tcg_gen_andi_i64(dest, offset, -4);
1975 tcg_gen_ori_i64(dest, dest, ctx->privilege);
1976 tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1977 break;
1979 return dest;
1982 #ifdef CONFIG_USER_ONLY
1983 /* On Linux, page zero is normally marked execute only + gateway.
1984 Therefore normal read or write is supposed to fail, but specific
1985 offsets have kernel code mapped to raise permissions to implement
1986 system calls. Handling this via an explicit check here, rather
1987 in than the "be disp(sr2,r0)" instruction that probably sent us
1988 here, is the easiest way to handle the branch delay slot on the
1989 aforementioned BE. */
1990 static void do_page_zero(DisasContext *ctx)
1992 TCGv_i64 tmp;
1994 /* If by some means we get here with PSW[N]=1, that implies that
1995 the B,GATE instruction would be skipped, and we'd fault on the
1996 next insn within the privileged page. */
1997 switch (ctx->null_cond.c) {
1998 case TCG_COND_NEVER:
1999 break;
2000 case TCG_COND_ALWAYS:
2001 tcg_gen_movi_i64(cpu_psw_n, 0);
2002 goto do_sigill;
2003 default:
2004 /* Since this is always the first (and only) insn within the
2005 TB, we should know the state of PSW[N] from TB->FLAGS. */
2006 g_assert_not_reached();
2009 /* Check that we didn't arrive here via some means that allowed
2010 non-sequential instruction execution. Normally the PSW[B] bit
2011 detects this by disallowing the B,GATE instruction to execute
2012 under such conditions. */
2013 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2014 goto do_sigill;
2017 switch (ctx->iaoq_f & -4) {
2018 case 0x00: /* Null pointer call */
2019 gen_excp_1(EXCP_IMP);
2020 ctx->base.is_jmp = DISAS_NORETURN;
2021 break;
2023 case 0xb0: /* LWS */
2024 gen_excp_1(EXCP_SYSCALL_LWS);
2025 ctx->base.is_jmp = DISAS_NORETURN;
2026 break;
2028 case 0xe0: /* SET_THREAD_POINTER */
2029 tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
2030 tmp = tcg_temp_new_i64();
2031 tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
2032 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
2033 tcg_gen_addi_i64(tmp, tmp, 4);
2034 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
2035 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2036 break;
2038 case 0x100: /* SYSCALL */
2039 gen_excp_1(EXCP_SYSCALL);
2040 ctx->base.is_jmp = DISAS_NORETURN;
2041 break;
2043 default:
2044 do_sigill:
2045 gen_excp_1(EXCP_ILL);
2046 ctx->base.is_jmp = DISAS_NORETURN;
2047 break;
2050 #endif
2052 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2054 cond_free(&ctx->null_cond);
2055 return true;
2058 static bool trans_break(DisasContext *ctx, arg_break *a)
2060 return gen_excp_iir(ctx, EXCP_BREAK);
2063 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2065 /* No point in nullifying the memory barrier. */
2066 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2068 cond_free(&ctx->null_cond);
2069 return true;
2072 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2074 unsigned rt = a->t;
2075 TCGv_i64 tmp = dest_gpr(ctx, rt);
2076 tcg_gen_movi_i64(tmp, ctx->iaoq_f & ~3ULL);
2077 save_gpr(ctx, rt, tmp);
2079 cond_free(&ctx->null_cond);
2080 return true;
2083 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2085 unsigned rt = a->t;
2086 unsigned rs = a->sp;
2087 TCGv_i64 t0 = tcg_temp_new_i64();
2089 load_spr(ctx, t0, rs);
2090 tcg_gen_shri_i64(t0, t0, 32);
2092 save_gpr(ctx, rt, t0);
2094 cond_free(&ctx->null_cond);
2095 return true;
2098 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2100 unsigned rt = a->t;
2101 unsigned ctl = a->r;
2102 TCGv_i64 tmp;
2104 switch (ctl) {
2105 case CR_SAR:
2106 if (a->e == 0) {
2107 /* MFSAR without ,W masks low 5 bits. */
2108 tmp = dest_gpr(ctx, rt);
2109 tcg_gen_andi_i64(tmp, cpu_sar, 31);
2110 save_gpr(ctx, rt, tmp);
2111 goto done;
2113 save_gpr(ctx, rt, cpu_sar);
2114 goto done;
2115 case CR_IT: /* Interval Timer */
2116 /* FIXME: Respect PSW_S bit. */
2117 nullify_over(ctx);
2118 tmp = dest_gpr(ctx, rt);
2119 if (translator_io_start(&ctx->base)) {
2120 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2122 gen_helper_read_interval_timer(tmp);
2123 save_gpr(ctx, rt, tmp);
2124 return nullify_end(ctx);
2125 case 26:
2126 case 27:
2127 break;
2128 default:
2129 /* All other control registers are privileged. */
2130 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2131 break;
2134 tmp = tcg_temp_new_i64();
2135 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2136 save_gpr(ctx, rt, tmp);
2138 done:
2139 cond_free(&ctx->null_cond);
2140 return true;
2143 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2145 unsigned rr = a->r;
2146 unsigned rs = a->sp;
2147 TCGv_i64 tmp;
2149 if (rs >= 5) {
2150 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2152 nullify_over(ctx);
2154 tmp = tcg_temp_new_i64();
2155 tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2157 if (rs >= 4) {
2158 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2159 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2160 } else {
2161 tcg_gen_mov_i64(cpu_sr[rs], tmp);
2164 return nullify_end(ctx);
2167 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2169 unsigned ctl = a->t;
2170 TCGv_i64 reg;
2171 TCGv_i64 tmp;
2173 if (ctl == CR_SAR) {
2174 reg = load_gpr(ctx, a->r);
2175 tmp = tcg_temp_new_i64();
2176 tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2177 save_or_nullify(ctx, cpu_sar, tmp);
2179 cond_free(&ctx->null_cond);
2180 return true;
2183 /* All other control registers are privileged or read-only. */
2184 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2186 #ifndef CONFIG_USER_ONLY
2187 nullify_over(ctx);
2189 if (ctx->is_pa20) {
2190 reg = load_gpr(ctx, a->r);
2191 } else {
2192 reg = tcg_temp_new_i64();
2193 tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2196 switch (ctl) {
2197 case CR_IT:
2198 if (translator_io_start(&ctx->base)) {
2199 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2201 gen_helper_write_interval_timer(tcg_env, reg);
2202 break;
2203 case CR_EIRR:
2204 /* Helper modifies interrupt lines and is therefore IO. */
2205 translator_io_start(&ctx->base);
2206 gen_helper_write_eirr(tcg_env, reg);
2207 /* Exit to re-evaluate interrupts in the main loop. */
2208 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2209 break;
2211 case CR_IIASQ:
2212 case CR_IIAOQ:
2213 /* FIXME: Respect PSW_Q bit */
2214 /* The write advances the queue and stores to the back element. */
2215 tmp = tcg_temp_new_i64();
2216 tcg_gen_ld_i64(tmp, tcg_env,
2217 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2218 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2219 tcg_gen_st_i64(reg, tcg_env,
2220 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2221 break;
2223 case CR_PID1:
2224 case CR_PID2:
2225 case CR_PID3:
2226 case CR_PID4:
2227 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2228 #ifndef CONFIG_USER_ONLY
2229 gen_helper_change_prot_id(tcg_env);
2230 #endif
2231 break;
2233 case CR_EIEM:
2234 /* Exit to re-evaluate interrupts in the main loop. */
2235 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2236 /* FALLTHRU */
2237 default:
2238 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2239 break;
2241 return nullify_end(ctx);
2242 #endif
2245 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2247 TCGv_i64 tmp = tcg_temp_new_i64();
2249 tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2250 tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2251 save_or_nullify(ctx, cpu_sar, tmp);
2253 cond_free(&ctx->null_cond);
2254 return true;
2257 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2259 TCGv_i64 dest = dest_gpr(ctx, a->t);
2261 #ifdef CONFIG_USER_ONLY
2262 /* We don't implement space registers in user mode. */
2263 tcg_gen_movi_i64(dest, 0);
2264 #else
2265 tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2266 tcg_gen_shri_i64(dest, dest, 32);
2267 #endif
2268 save_gpr(ctx, a->t, dest);
2270 cond_free(&ctx->null_cond);
2271 return true;
2274 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2276 #ifdef CONFIG_USER_ONLY
2277 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2278 #else
2279 TCGv_i64 tmp;
2281 /* HP-UX 11i and HP ODE use rsm for read-access to PSW */
2282 if (a->i) {
2283 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2286 nullify_over(ctx);
2288 tmp = tcg_temp_new_i64();
2289 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2290 tcg_gen_andi_i64(tmp, tmp, ~a->i);
2291 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2292 save_gpr(ctx, a->t, tmp);
2294 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2295 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2296 return nullify_end(ctx);
2297 #endif
2300 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2302 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2303 #ifndef CONFIG_USER_ONLY
2304 TCGv_i64 tmp;
2306 nullify_over(ctx);
2308 tmp = tcg_temp_new_i64();
2309 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2310 tcg_gen_ori_i64(tmp, tmp, a->i);
2311 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2312 save_gpr(ctx, a->t, tmp);
2314 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2315 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2316 return nullify_end(ctx);
2317 #endif
2320 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2322 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2323 #ifndef CONFIG_USER_ONLY
2324 TCGv_i64 tmp, reg;
2325 nullify_over(ctx);
2327 reg = load_gpr(ctx, a->r);
2328 tmp = tcg_temp_new_i64();
2329 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2331 /* Exit the TB to recognize new interrupts. */
2332 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2333 return nullify_end(ctx);
2334 #endif
2337 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2339 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2340 #ifndef CONFIG_USER_ONLY
2341 nullify_over(ctx);
2343 if (rfi_r) {
2344 gen_helper_rfi_r(tcg_env);
2345 } else {
2346 gen_helper_rfi(tcg_env);
2348 /* Exit the TB to recognize new interrupts. */
2349 tcg_gen_exit_tb(NULL, 0);
2350 ctx->base.is_jmp = DISAS_NORETURN;
2352 return nullify_end(ctx);
2353 #endif
2356 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2358 return do_rfi(ctx, false);
2361 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2363 return do_rfi(ctx, true);
2366 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2368 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2369 #ifndef CONFIG_USER_ONLY
2370 nullify_over(ctx);
2371 gen_helper_halt(tcg_env);
2372 ctx->base.is_jmp = DISAS_NORETURN;
2373 return nullify_end(ctx);
2374 #endif
2377 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2379 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2380 #ifndef CONFIG_USER_ONLY
2381 nullify_over(ctx);
2382 gen_helper_reset(tcg_env);
2383 ctx->base.is_jmp = DISAS_NORETURN;
2384 return nullify_end(ctx);
2385 #endif
2388 static bool do_getshadowregs(DisasContext *ctx)
2390 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2391 nullify_over(ctx);
2392 tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2393 tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2394 tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2395 tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2396 tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2397 tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2398 tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2399 return nullify_end(ctx);
2402 static bool do_putshadowregs(DisasContext *ctx)
2404 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2405 nullify_over(ctx);
2406 tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2407 tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2408 tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2409 tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2410 tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2411 tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2412 tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2413 return nullify_end(ctx);
2416 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2418 return do_getshadowregs(ctx);
2421 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2423 if (a->m) {
2424 TCGv_i64 dest = dest_gpr(ctx, a->b);
2425 TCGv_i64 src1 = load_gpr(ctx, a->b);
2426 TCGv_i64 src2 = load_gpr(ctx, a->x);
2428 /* The only thing we need to do is the base register modification. */
2429 tcg_gen_add_i64(dest, src1, src2);
2430 save_gpr(ctx, a->b, dest);
2432 cond_free(&ctx->null_cond);
2433 return true;
2436 static bool trans_fic(DisasContext *ctx, arg_ldst *a)
2438 /* End TB for flush instruction cache, so we pick up new insns. */
2439 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2440 return trans_nop_addrx(ctx, a);
2443 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2445 TCGv_i64 dest, ofs;
2446 TCGv_i32 level, want;
2447 TCGv_i64 addr;
2449 nullify_over(ctx);
2451 dest = dest_gpr(ctx, a->t);
2452 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2454 if (a->imm) {
2455 level = tcg_constant_i32(a->ri & 3);
2456 } else {
2457 level = tcg_temp_new_i32();
2458 tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2459 tcg_gen_andi_i32(level, level, 3);
2461 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2463 gen_helper_probe(dest, tcg_env, addr, level, want);
2465 save_gpr(ctx, a->t, dest);
2466 return nullify_end(ctx);
2469 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2471 if (ctx->is_pa20) {
2472 return false;
2474 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2475 #ifndef CONFIG_USER_ONLY
2476 TCGv_i64 addr;
2477 TCGv_i64 ofs, reg;
2479 nullify_over(ctx);
2481 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2482 reg = load_gpr(ctx, a->r);
2483 if (a->addr) {
2484 gen_helper_itlba_pa11(tcg_env, addr, reg);
2485 } else {
2486 gen_helper_itlbp_pa11(tcg_env, addr, reg);
2489 /* Exit TB for TLB change if mmu is enabled. */
2490 if (ctx->tb_flags & PSW_C) {
2491 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2493 return nullify_end(ctx);
2494 #endif
2497 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2499 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2500 #ifndef CONFIG_USER_ONLY
2501 TCGv_i64 addr;
2502 TCGv_i64 ofs;
2504 nullify_over(ctx);
2506 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2509 * Page align now, rather than later, so that we can add in the
2510 * page_size field from pa2.0 from the low 4 bits of GR[b].
2512 tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2513 if (ctx->is_pa20) {
2514 tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2517 if (local) {
2518 gen_helper_ptlb_l(tcg_env, addr);
2519 } else {
2520 gen_helper_ptlb(tcg_env, addr);
2523 if (a->m) {
2524 save_gpr(ctx, a->b, ofs);
2527 /* Exit TB for TLB change if mmu is enabled. */
2528 if (ctx->tb_flags & PSW_C) {
2529 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2531 return nullify_end(ctx);
2532 #endif
2535 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2537 return do_pxtlb(ctx, a, false);
2540 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2542 return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2545 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2547 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2548 #ifndef CONFIG_USER_ONLY
2549 nullify_over(ctx);
2551 trans_nop_addrx(ctx, a);
2552 gen_helper_ptlbe(tcg_env);
2554 /* Exit TB for TLB change if mmu is enabled. */
2555 if (ctx->tb_flags & PSW_C) {
2556 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2558 return nullify_end(ctx);
2559 #endif
2563 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2564 * See
2565 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2566 * page 13-9 (195/206)
2568 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2570 if (ctx->is_pa20) {
2571 return false;
2573 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2574 #ifndef CONFIG_USER_ONLY
2575 TCGv_i64 addr, atl, stl;
2576 TCGv_i64 reg;
2578 nullify_over(ctx);
2581 * FIXME:
2582 * if (not (pcxl or pcxl2))
2583 * return gen_illegal(ctx);
2586 atl = tcg_temp_new_i64();
2587 stl = tcg_temp_new_i64();
2588 addr = tcg_temp_new_i64();
2590 tcg_gen_ld32u_i64(stl, tcg_env,
2591 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2592 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2593 tcg_gen_ld32u_i64(atl, tcg_env,
2594 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2595 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2596 tcg_gen_shli_i64(stl, stl, 32);
2597 tcg_gen_or_i64(addr, atl, stl);
2599 reg = load_gpr(ctx, a->r);
2600 if (a->addr) {
2601 gen_helper_itlba_pa11(tcg_env, addr, reg);
2602 } else {
2603 gen_helper_itlbp_pa11(tcg_env, addr, reg);
2606 /* Exit TB for TLB change if mmu is enabled. */
2607 if (ctx->tb_flags & PSW_C) {
2608 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2610 return nullify_end(ctx);
2611 #endif
2614 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2616 if (!ctx->is_pa20) {
2617 return false;
2619 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2620 #ifndef CONFIG_USER_ONLY
2621 nullify_over(ctx);
2623 TCGv_i64 src1 = load_gpr(ctx, a->r1);
2624 TCGv_i64 src2 = load_gpr(ctx, a->r2);
2626 if (a->data) {
2627 gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2628 } else {
2629 gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2632 /* Exit TB for TLB change if mmu is enabled. */
2633 if (ctx->tb_flags & PSW_C) {
2634 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2636 return nullify_end(ctx);
2637 #endif
2640 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2642 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2643 #ifndef CONFIG_USER_ONLY
2644 TCGv_i64 vaddr;
2645 TCGv_i64 ofs, paddr;
2647 nullify_over(ctx);
2649 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2651 paddr = tcg_temp_new_i64();
2652 gen_helper_lpa(paddr, tcg_env, vaddr);
2654 /* Note that physical address result overrides base modification. */
2655 if (a->m) {
2656 save_gpr(ctx, a->b, ofs);
2658 save_gpr(ctx, a->t, paddr);
2660 return nullify_end(ctx);
2661 #endif
2664 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2666 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2668 /* The Coherence Index is an implementation-defined function of the
2669 physical address. Two addresses with the same CI have a coherent
2670 view of the cache. Our implementation is to return 0 for all,
2671 since the entire address space is coherent. */
2672 save_gpr(ctx, a->t, ctx->zero);
2674 cond_free(&ctx->null_cond);
2675 return true;
2678 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2680 return do_add_reg(ctx, a, false, false, false, false);
2683 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2685 return do_add_reg(ctx, a, true, false, false, false);
2688 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2690 return do_add_reg(ctx, a, false, true, false, false);
2693 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2695 return do_add_reg(ctx, a, false, false, false, true);
2698 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2700 return do_add_reg(ctx, a, false, true, false, true);
2703 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2705 return do_sub_reg(ctx, a, false, false, false);
2708 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2710 return do_sub_reg(ctx, a, true, false, false);
2713 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2715 return do_sub_reg(ctx, a, false, false, true);
2718 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2720 return do_sub_reg(ctx, a, true, false, true);
2723 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2725 return do_sub_reg(ctx, a, false, true, false);
2728 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2730 return do_sub_reg(ctx, a, true, true, false);
2733 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2735 return do_log_reg(ctx, a, tcg_gen_andc_i64);
2738 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2740 return do_log_reg(ctx, a, tcg_gen_and_i64);
2743 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2745 if (a->cf == 0) {
2746 unsigned r2 = a->r2;
2747 unsigned r1 = a->r1;
2748 unsigned rt = a->t;
2750 if (rt == 0) { /* NOP */
2751 cond_free(&ctx->null_cond);
2752 return true;
2754 if (r2 == 0) { /* COPY */
2755 if (r1 == 0) {
2756 TCGv_i64 dest = dest_gpr(ctx, rt);
2757 tcg_gen_movi_i64(dest, 0);
2758 save_gpr(ctx, rt, dest);
2759 } else {
2760 save_gpr(ctx, rt, cpu_gr[r1]);
2762 cond_free(&ctx->null_cond);
2763 return true;
2765 #ifndef CONFIG_USER_ONLY
2766 /* These are QEMU extensions and are nops in the real architecture:
2768 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2769 * or %r31,%r31,%r31 -- death loop; offline cpu
2770 * currently implemented as idle.
2772 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2773 /* No need to check for supervisor, as userland can only pause
2774 until the next timer interrupt. */
2775 nullify_over(ctx);
2777 /* Advance the instruction queue. */
2778 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2779 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2780 nullify_set(ctx, 0);
2782 /* Tell the qemu main loop to halt until this cpu has work. */
2783 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2784 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2785 gen_excp_1(EXCP_HALTED);
2786 ctx->base.is_jmp = DISAS_NORETURN;
2788 return nullify_end(ctx);
2790 #endif
2792 return do_log_reg(ctx, a, tcg_gen_or_i64);
2795 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2797 return do_log_reg(ctx, a, tcg_gen_xor_i64);
2800 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2802 TCGv_i64 tcg_r1, tcg_r2;
2804 if (a->cf) {
2805 nullify_over(ctx);
2807 tcg_r1 = load_gpr(ctx, a->r1);
2808 tcg_r2 = load_gpr(ctx, a->r2);
2809 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2810 return nullify_end(ctx);
2813 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2815 TCGv_i64 tcg_r1, tcg_r2, dest;
2817 if (a->cf) {
2818 nullify_over(ctx);
2821 tcg_r1 = load_gpr(ctx, a->r1);
2822 tcg_r2 = load_gpr(ctx, a->r2);
2823 dest = dest_gpr(ctx, a->t);
2825 tcg_gen_xor_i64(dest, tcg_r1, tcg_r2);
2826 save_gpr(ctx, a->t, dest);
2828 cond_free(&ctx->null_cond);
2829 if (a->cf) {
2830 ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
2833 return nullify_end(ctx);
2836 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2838 TCGv_i64 tcg_r1, tcg_r2, tmp;
2840 if (a->cf == 0) {
2841 tcg_r2 = load_gpr(ctx, a->r2);
2842 tmp = dest_gpr(ctx, a->t);
2844 if (a->r1 == 0) {
2845 /* UADDCM r0,src,dst is the common idiom for dst = ~src. */
2846 tcg_gen_not_i64(tmp, tcg_r2);
2847 } else {
2849 * Recall that r1 - r2 == r1 + ~r2 + 1.
2850 * Thus r1 + ~r2 == r1 - r2 - 1,
2851 * which does not require an extra temporary.
2853 tcg_r1 = load_gpr(ctx, a->r1);
2854 tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2);
2855 tcg_gen_subi_i64(tmp, tmp, 1);
2857 save_gpr(ctx, a->t, tmp);
2858 cond_free(&ctx->null_cond);
2859 return true;
2862 nullify_over(ctx);
2863 tcg_r1 = load_gpr(ctx, a->r1);
2864 tcg_r2 = load_gpr(ctx, a->r2);
2865 tmp = tcg_temp_new_i64();
2866 tcg_gen_not_i64(tmp, tcg_r2);
2867 do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true);
2868 return nullify_end(ctx);
2871 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2873 return do_uaddcm(ctx, a, false);
2876 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2878 return do_uaddcm(ctx, a, true);
2881 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2883 TCGv_i64 tmp;
2885 nullify_over(ctx);
2887 tmp = tcg_temp_new_i64();
2888 tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4);
2889 if (!is_i) {
2890 tcg_gen_not_i64(tmp, tmp);
2892 tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2893 tcg_gen_muli_i64(tmp, tmp, 6);
2894 do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp,
2895 a->cf, a->d, false, is_i);
2896 return nullify_end(ctx);
2899 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2901 return do_dcor(ctx, a, false);
2904 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2906 return do_dcor(ctx, a, true);
2909 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2911 TCGv_i64 dest, add1, add2, addc, in1, in2;
2913 nullify_over(ctx);
2915 in1 = load_gpr(ctx, a->r1);
2916 in2 = load_gpr(ctx, a->r2);
2918 add1 = tcg_temp_new_i64();
2919 add2 = tcg_temp_new_i64();
2920 addc = tcg_temp_new_i64();
2921 dest = tcg_temp_new_i64();
2923 /* Form R1 << 1 | PSW[CB]{8}. */
2924 tcg_gen_add_i64(add1, in1, in1);
2925 tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2928 * Add or subtract R2, depending on PSW[V]. Proper computation of
2929 * carry requires that we subtract via + ~R2 + 1, as described in
2930 * the manual. By extracting and masking V, we can produce the
2931 * proper inputs to the addition without movcond.
2933 tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2934 tcg_gen_xor_i64(add2, in2, addc);
2935 tcg_gen_andi_i64(addc, addc, 1);
2937 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2938 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2939 addc, ctx->zero);
2941 /* Write back the result register. */
2942 save_gpr(ctx, a->t, dest);
2944 /* Write back PSW[CB]. */
2945 tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2946 tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2949 * Write back PSW[V] for the division step.
2950 * Shift cb{8} from where it lives in bit 32 to bit 31,
2951 * so that it overlaps r2{32} in bit 31.
2953 tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1);
2954 tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2956 /* Install the new nullification. */
2957 if (a->cf) {
2958 TCGv_i64 sv = NULL, uv = NULL;
2959 if (cond_need_sv(a->cf >> 1)) {
2960 sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false);
2961 } else if (cond_need_cb(a->cf >> 1)) {
2962 uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false);
2964 ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv);
2967 return nullify_end(ctx);
2970 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2972 return do_add_imm(ctx, a, false, false);
2975 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2977 return do_add_imm(ctx, a, true, false);
2980 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2982 return do_add_imm(ctx, a, false, true);
2985 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2987 return do_add_imm(ctx, a, true, true);
2990 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2992 return do_sub_imm(ctx, a, false);
2995 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2997 return do_sub_imm(ctx, a, true);
3000 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
3002 TCGv_i64 tcg_im, tcg_r2;
3004 if (a->cf) {
3005 nullify_over(ctx);
3008 tcg_im = tcg_constant_i64(a->i);
3009 tcg_r2 = load_gpr(ctx, a->r);
3010 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
3012 return nullify_end(ctx);
3015 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
3016 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
3018 TCGv_i64 r1, r2, dest;
3020 if (!ctx->is_pa20) {
3021 return false;
3024 nullify_over(ctx);
3026 r1 = load_gpr(ctx, a->r1);
3027 r2 = load_gpr(ctx, a->r2);
3028 dest = dest_gpr(ctx, a->t);
3030 fn(dest, r1, r2);
3031 save_gpr(ctx, a->t, dest);
3033 return nullify_end(ctx);
3036 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
3037 void (*fn)(TCGv_i64, TCGv_i64, int64_t))
3039 TCGv_i64 r, dest;
3041 if (!ctx->is_pa20) {
3042 return false;
3045 nullify_over(ctx);
3047 r = load_gpr(ctx, a->r);
3048 dest = dest_gpr(ctx, a->t);
3050 fn(dest, r, a->i);
3051 save_gpr(ctx, a->t, dest);
3053 return nullify_end(ctx);
3056 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
3057 void (*fn)(TCGv_i64, TCGv_i64,
3058 TCGv_i64, TCGv_i32))
3060 TCGv_i64 r1, r2, dest;
3062 if (!ctx->is_pa20) {
3063 return false;
3066 nullify_over(ctx);
3068 r1 = load_gpr(ctx, a->r1);
3069 r2 = load_gpr(ctx, a->r2);
3070 dest = dest_gpr(ctx, a->t);
3072 fn(dest, r1, r2, tcg_constant_i32(a->sh));
3073 save_gpr(ctx, a->t, dest);
3075 return nullify_end(ctx);
3078 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
3080 return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
3083 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
3085 return do_multimedia(ctx, a, gen_helper_hadd_ss);
3088 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
3090 return do_multimedia(ctx, a, gen_helper_hadd_us);
3093 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
3095 return do_multimedia(ctx, a, gen_helper_havg);
3098 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
3100 return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
3103 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
3105 return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
3108 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
3110 return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
3113 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
3115 return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
3118 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
3120 return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
3123 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
3125 return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
3128 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
3130 return do_multimedia(ctx, a, gen_helper_hsub_ss);
3133 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
3135 return do_multimedia(ctx, a, gen_helper_hsub_us);
3138 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3140 uint64_t mask = 0xffff0000ffff0000ull;
3141 TCGv_i64 tmp = tcg_temp_new_i64();
3143 tcg_gen_andi_i64(tmp, r2, mask);
3144 tcg_gen_andi_i64(dst, r1, mask);
3145 tcg_gen_shri_i64(tmp, tmp, 16);
3146 tcg_gen_or_i64(dst, dst, tmp);
3149 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
3151 return do_multimedia(ctx, a, gen_mixh_l);
3154 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3156 uint64_t mask = 0x0000ffff0000ffffull;
3157 TCGv_i64 tmp = tcg_temp_new_i64();
3159 tcg_gen_andi_i64(tmp, r1, mask);
3160 tcg_gen_andi_i64(dst, r2, mask);
3161 tcg_gen_shli_i64(tmp, tmp, 16);
3162 tcg_gen_or_i64(dst, dst, tmp);
3165 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
3167 return do_multimedia(ctx, a, gen_mixh_r);
3170 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3172 TCGv_i64 tmp = tcg_temp_new_i64();
3174 tcg_gen_shri_i64(tmp, r2, 32);
3175 tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
3178 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
3180 return do_multimedia(ctx, a, gen_mixw_l);
3183 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3185 tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
3188 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3190 return do_multimedia(ctx, a, gen_mixw_r);
3193 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3195 TCGv_i64 r, t0, t1, t2, t3;
3197 if (!ctx->is_pa20) {
3198 return false;
3201 nullify_over(ctx);
3203 r = load_gpr(ctx, a->r1);
3204 t0 = tcg_temp_new_i64();
3205 t1 = tcg_temp_new_i64();
3206 t2 = tcg_temp_new_i64();
3207 t3 = tcg_temp_new_i64();
3209 tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3210 tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3211 tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3212 tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3214 tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3215 tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3216 tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3218 save_gpr(ctx, a->t, t0);
3219 return nullify_end(ctx);
3222 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3224 if (ctx->is_pa20) {
3226 * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3227 * Any base modification still occurs.
3229 if (a->t == 0) {
3230 return trans_nop_addrx(ctx, a);
3232 } else if (a->size > MO_32) {
3233 return gen_illegal(ctx);
3235 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3236 a->disp, a->sp, a->m, a->size | MO_TE);
3239 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3241 assert(a->x == 0 && a->scale == 0);
3242 if (!ctx->is_pa20 && a->size > MO_32) {
3243 return gen_illegal(ctx);
3245 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3248 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3250 MemOp mop = MO_TE | MO_ALIGN | a->size;
3251 TCGv_i64 dest, ofs;
3252 TCGv_i64 addr;
3254 if (!ctx->is_pa20 && a->size > MO_32) {
3255 return gen_illegal(ctx);
3258 nullify_over(ctx);
3260 if (a->m) {
3261 /* Base register modification. Make sure if RT == RB,
3262 we see the result of the load. */
3263 dest = tcg_temp_new_i64();
3264 } else {
3265 dest = dest_gpr(ctx, a->t);
3268 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0,
3269 a->disp, a->sp, a->m, MMU_DISABLED(ctx));
3272 * For hppa1.1, LDCW is undefined unless aligned mod 16.
3273 * However actual hardware succeeds with aligned mod 4.
3274 * Detect this case and log a GUEST_ERROR.
3276 * TODO: HPPA64 relaxes the over-alignment requirement
3277 * with the ,co completer.
3279 gen_helper_ldc_check(addr);
3281 tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3283 if (a->m) {
3284 save_gpr(ctx, a->b, ofs);
3286 save_gpr(ctx, a->t, dest);
3288 return nullify_end(ctx);
3291 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3293 TCGv_i64 ofs, val;
3294 TCGv_i64 addr;
3296 nullify_over(ctx);
3298 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3299 MMU_DISABLED(ctx));
3300 val = load_gpr(ctx, a->r);
3301 if (a->a) {
3302 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3303 gen_helper_stby_e_parallel(tcg_env, addr, val);
3304 } else {
3305 gen_helper_stby_e(tcg_env, addr, val);
3307 } else {
3308 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3309 gen_helper_stby_b_parallel(tcg_env, addr, val);
3310 } else {
3311 gen_helper_stby_b(tcg_env, addr, val);
3314 if (a->m) {
3315 tcg_gen_andi_i64(ofs, ofs, ~3);
3316 save_gpr(ctx, a->b, ofs);
3319 return nullify_end(ctx);
3322 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3324 TCGv_i64 ofs, val;
3325 TCGv_i64 addr;
3327 if (!ctx->is_pa20) {
3328 return false;
3330 nullify_over(ctx);
3332 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3333 MMU_DISABLED(ctx));
3334 val = load_gpr(ctx, a->r);
3335 if (a->a) {
3336 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3337 gen_helper_stdby_e_parallel(tcg_env, addr, val);
3338 } else {
3339 gen_helper_stdby_e(tcg_env, addr, val);
3341 } else {
3342 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3343 gen_helper_stdby_b_parallel(tcg_env, addr, val);
3344 } else {
3345 gen_helper_stdby_b(tcg_env, addr, val);
3348 if (a->m) {
3349 tcg_gen_andi_i64(ofs, ofs, ~7);
3350 save_gpr(ctx, a->b, ofs);
3353 return nullify_end(ctx);
3356 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3358 int hold_mmu_idx = ctx->mmu_idx;
3360 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3361 ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3362 trans_ld(ctx, a);
3363 ctx->mmu_idx = hold_mmu_idx;
3364 return true;
3367 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3369 int hold_mmu_idx = ctx->mmu_idx;
3371 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3372 ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3373 trans_st(ctx, a);
3374 ctx->mmu_idx = hold_mmu_idx;
3375 return true;
3378 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3380 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3382 tcg_gen_movi_i64(tcg_rt, a->i);
3383 save_gpr(ctx, a->t, tcg_rt);
3384 cond_free(&ctx->null_cond);
3385 return true;
3388 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3390 TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3391 TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3393 tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3394 save_gpr(ctx, 1, tcg_r1);
3395 cond_free(&ctx->null_cond);
3396 return true;
3399 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3401 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3403 /* Special case rb == 0, for the LDI pseudo-op.
3404 The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */
3405 if (a->b == 0) {
3406 tcg_gen_movi_i64(tcg_rt, a->i);
3407 } else {
3408 tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3410 save_gpr(ctx, a->t, tcg_rt);
3411 cond_free(&ctx->null_cond);
3412 return true;
3415 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3416 unsigned c, unsigned f, bool d, unsigned n, int disp)
3418 TCGv_i64 dest, in2, sv;
3419 DisasCond cond;
3421 in2 = load_gpr(ctx, r);
3422 dest = tcg_temp_new_i64();
3424 tcg_gen_sub_i64(dest, in1, in2);
3426 sv = NULL;
3427 if (cond_need_sv(c)) {
3428 sv = do_sub_sv(ctx, dest, in1, in2);
3431 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3432 return do_cbranch(ctx, disp, n, &cond);
3435 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3437 if (!ctx->is_pa20 && a->d) {
3438 return false;
3440 nullify_over(ctx);
3441 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3442 a->c, a->f, a->d, a->n, a->disp);
3445 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3447 if (!ctx->is_pa20 && a->d) {
3448 return false;
3450 nullify_over(ctx);
3451 return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3452 a->c, a->f, a->d, a->n, a->disp);
3455 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3456 unsigned c, unsigned f, unsigned n, int disp)
3458 TCGv_i64 dest, in2, sv, cb_cond;
3459 DisasCond cond;
3460 bool d = false;
3463 * For hppa64, the ADDB conditions change with PSW.W,
3464 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3466 if (ctx->tb_flags & PSW_W) {
3467 d = c >= 5;
3468 if (d) {
3469 c &= 3;
3473 in2 = load_gpr(ctx, r);
3474 dest = tcg_temp_new_i64();
3475 sv = NULL;
3476 cb_cond = NULL;
3478 if (cond_need_cb(c)) {
3479 TCGv_i64 cb = tcg_temp_new_i64();
3480 TCGv_i64 cb_msb = tcg_temp_new_i64();
3482 tcg_gen_movi_i64(cb_msb, 0);
3483 tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3484 tcg_gen_xor_i64(cb, in1, in2);
3485 tcg_gen_xor_i64(cb, cb, dest);
3486 cb_cond = get_carry(ctx, d, cb, cb_msb);
3487 } else {
3488 tcg_gen_add_i64(dest, in1, in2);
3490 if (cond_need_sv(c)) {
3491 sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d);
3494 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3495 save_gpr(ctx, r, dest);
3496 return do_cbranch(ctx, disp, n, &cond);
3499 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3501 nullify_over(ctx);
3502 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3505 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3507 nullify_over(ctx);
3508 return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3511 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3513 TCGv_i64 tmp, tcg_r;
3514 DisasCond cond;
3516 nullify_over(ctx);
3518 tmp = tcg_temp_new_i64();
3519 tcg_r = load_gpr(ctx, a->r);
3520 if (a->d) {
3521 tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3522 } else {
3523 /* Force shift into [32,63] */
3524 tcg_gen_ori_i64(tmp, cpu_sar, 32);
3525 tcg_gen_shl_i64(tmp, tcg_r, tmp);
3528 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3529 return do_cbranch(ctx, a->disp, a->n, &cond);
3532 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3534 TCGv_i64 tmp, tcg_r;
3535 DisasCond cond;
3536 int p;
3538 nullify_over(ctx);
3540 tmp = tcg_temp_new_i64();
3541 tcg_r = load_gpr(ctx, a->r);
3542 p = a->p | (a->d ? 0 : 32);
3543 tcg_gen_shli_i64(tmp, tcg_r, p);
3545 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3546 return do_cbranch(ctx, a->disp, a->n, &cond);
3549 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3551 TCGv_i64 dest;
3552 DisasCond cond;
3554 nullify_over(ctx);
3556 dest = dest_gpr(ctx, a->r2);
3557 if (a->r1 == 0) {
3558 tcg_gen_movi_i64(dest, 0);
3559 } else {
3560 tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3563 /* All MOVB conditions are 32-bit. */
3564 cond = do_sed_cond(ctx, a->c, false, dest);
3565 return do_cbranch(ctx, a->disp, a->n, &cond);
3568 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3570 TCGv_i64 dest;
3571 DisasCond cond;
3573 nullify_over(ctx);
3575 dest = dest_gpr(ctx, a->r);
3576 tcg_gen_movi_i64(dest, a->i);
3578 /* All MOVBI conditions are 32-bit. */
3579 cond = do_sed_cond(ctx, a->c, false, dest);
3580 return do_cbranch(ctx, a->disp, a->n, &cond);
3583 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3585 TCGv_i64 dest, src2;
3587 if (!ctx->is_pa20 && a->d) {
3588 return false;
3590 if (a->c) {
3591 nullify_over(ctx);
3594 dest = dest_gpr(ctx, a->t);
3595 src2 = load_gpr(ctx, a->r2);
3596 if (a->r1 == 0) {
3597 if (a->d) {
3598 tcg_gen_shr_i64(dest, src2, cpu_sar);
3599 } else {
3600 TCGv_i64 tmp = tcg_temp_new_i64();
3602 tcg_gen_ext32u_i64(dest, src2);
3603 tcg_gen_andi_i64(tmp, cpu_sar, 31);
3604 tcg_gen_shr_i64(dest, dest, tmp);
3606 } else if (a->r1 == a->r2) {
3607 if (a->d) {
3608 tcg_gen_rotr_i64(dest, src2, cpu_sar);
3609 } else {
3610 TCGv_i32 t32 = tcg_temp_new_i32();
3611 TCGv_i32 s32 = tcg_temp_new_i32();
3613 tcg_gen_extrl_i64_i32(t32, src2);
3614 tcg_gen_extrl_i64_i32(s32, cpu_sar);
3615 tcg_gen_andi_i32(s32, s32, 31);
3616 tcg_gen_rotr_i32(t32, t32, s32);
3617 tcg_gen_extu_i32_i64(dest, t32);
3619 } else {
3620 TCGv_i64 src1 = load_gpr(ctx, a->r1);
3622 if (a->d) {
3623 TCGv_i64 t = tcg_temp_new_i64();
3624 TCGv_i64 n = tcg_temp_new_i64();
3626 tcg_gen_xori_i64(n, cpu_sar, 63);
3627 tcg_gen_shl_i64(t, src1, n);
3628 tcg_gen_shli_i64(t, t, 1);
3629 tcg_gen_shr_i64(dest, src2, cpu_sar);
3630 tcg_gen_or_i64(dest, dest, t);
3631 } else {
3632 TCGv_i64 t = tcg_temp_new_i64();
3633 TCGv_i64 s = tcg_temp_new_i64();
3635 tcg_gen_concat32_i64(t, src2, src1);
3636 tcg_gen_andi_i64(s, cpu_sar, 31);
3637 tcg_gen_shr_i64(dest, t, s);
3640 save_gpr(ctx, a->t, dest);
3642 /* Install the new nullification. */
3643 cond_free(&ctx->null_cond);
3644 if (a->c) {
3645 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3647 return nullify_end(ctx);
3650 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3652 unsigned width, sa;
3653 TCGv_i64 dest, t2;
3655 if (!ctx->is_pa20 && a->d) {
3656 return false;
3658 if (a->c) {
3659 nullify_over(ctx);
3662 width = a->d ? 64 : 32;
3663 sa = width - 1 - a->cpos;
3665 dest = dest_gpr(ctx, a->t);
3666 t2 = load_gpr(ctx, a->r2);
3667 if (a->r1 == 0) {
3668 tcg_gen_extract_i64(dest, t2, sa, width - sa);
3669 } else if (width == TARGET_LONG_BITS) {
3670 tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3671 } else {
3672 assert(!a->d);
3673 if (a->r1 == a->r2) {
3674 TCGv_i32 t32 = tcg_temp_new_i32();
3675 tcg_gen_extrl_i64_i32(t32, t2);
3676 tcg_gen_rotri_i32(t32, t32, sa);
3677 tcg_gen_extu_i32_i64(dest, t32);
3678 } else {
3679 tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3680 tcg_gen_extract_i64(dest, dest, sa, 32);
3683 save_gpr(ctx, a->t, dest);
3685 /* Install the new nullification. */
3686 cond_free(&ctx->null_cond);
3687 if (a->c) {
3688 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3690 return nullify_end(ctx);
3693 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3695 unsigned widthm1 = a->d ? 63 : 31;
3696 TCGv_i64 dest, src, tmp;
3698 if (!ctx->is_pa20 && a->d) {
3699 return false;
3701 if (a->c) {
3702 nullify_over(ctx);
3705 dest = dest_gpr(ctx, a->t);
3706 src = load_gpr(ctx, a->r);
3707 tmp = tcg_temp_new_i64();
3709 /* Recall that SAR is using big-endian bit numbering. */
3710 tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3711 tcg_gen_xori_i64(tmp, tmp, widthm1);
3713 if (a->se) {
3714 if (!a->d) {
3715 tcg_gen_ext32s_i64(dest, src);
3716 src = dest;
3718 tcg_gen_sar_i64(dest, src, tmp);
3719 tcg_gen_sextract_i64(dest, dest, 0, a->len);
3720 } else {
3721 if (!a->d) {
3722 tcg_gen_ext32u_i64(dest, src);
3723 src = dest;
3725 tcg_gen_shr_i64(dest, src, tmp);
3726 tcg_gen_extract_i64(dest, dest, 0, a->len);
3728 save_gpr(ctx, a->t, dest);
3730 /* Install the new nullification. */
3731 cond_free(&ctx->null_cond);
3732 if (a->c) {
3733 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3735 return nullify_end(ctx);
3738 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3740 unsigned len, cpos, width;
3741 TCGv_i64 dest, src;
3743 if (!ctx->is_pa20 && a->d) {
3744 return false;
3746 if (a->c) {
3747 nullify_over(ctx);
3750 len = a->len;
3751 width = a->d ? 64 : 32;
3752 cpos = width - 1 - a->pos;
3753 if (cpos + len > width) {
3754 len = width - cpos;
3757 dest = dest_gpr(ctx, a->t);
3758 src = load_gpr(ctx, a->r);
3759 if (a->se) {
3760 tcg_gen_sextract_i64(dest, src, cpos, len);
3761 } else {
3762 tcg_gen_extract_i64(dest, src, cpos, len);
3764 save_gpr(ctx, a->t, dest);
3766 /* Install the new nullification. */
3767 cond_free(&ctx->null_cond);
3768 if (a->c) {
3769 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3771 return nullify_end(ctx);
3774 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3776 unsigned len, width;
3777 uint64_t mask0, mask1;
3778 TCGv_i64 dest;
3780 if (!ctx->is_pa20 && a->d) {
3781 return false;
3783 if (a->c) {
3784 nullify_over(ctx);
3787 len = a->len;
3788 width = a->d ? 64 : 32;
3789 if (a->cpos + len > width) {
3790 len = width - a->cpos;
3793 dest = dest_gpr(ctx, a->t);
3794 mask0 = deposit64(0, a->cpos, len, a->i);
3795 mask1 = deposit64(-1, a->cpos, len, a->i);
3797 if (a->nz) {
3798 TCGv_i64 src = load_gpr(ctx, a->t);
3799 tcg_gen_andi_i64(dest, src, mask1);
3800 tcg_gen_ori_i64(dest, dest, mask0);
3801 } else {
3802 tcg_gen_movi_i64(dest, mask0);
3804 save_gpr(ctx, a->t, dest);
3806 /* Install the new nullification. */
3807 cond_free(&ctx->null_cond);
3808 if (a->c) {
3809 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3811 return nullify_end(ctx);
3814 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3816 unsigned rs = a->nz ? a->t : 0;
3817 unsigned len, width;
3818 TCGv_i64 dest, val;
3820 if (!ctx->is_pa20 && a->d) {
3821 return false;
3823 if (a->c) {
3824 nullify_over(ctx);
3827 len = a->len;
3828 width = a->d ? 64 : 32;
3829 if (a->cpos + len > width) {
3830 len = width - a->cpos;
3833 dest = dest_gpr(ctx, a->t);
3834 val = load_gpr(ctx, a->r);
3835 if (rs == 0) {
3836 tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3837 } else {
3838 tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3840 save_gpr(ctx, a->t, dest);
3842 /* Install the new nullification. */
3843 cond_free(&ctx->null_cond);
3844 if (a->c) {
3845 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3847 return nullify_end(ctx);
3850 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3851 bool d, bool nz, unsigned len, TCGv_i64 val)
3853 unsigned rs = nz ? rt : 0;
3854 unsigned widthm1 = d ? 63 : 31;
3855 TCGv_i64 mask, tmp, shift, dest;
3856 uint64_t msb = 1ULL << (len - 1);
3858 dest = dest_gpr(ctx, rt);
3859 shift = tcg_temp_new_i64();
3860 tmp = tcg_temp_new_i64();
3862 /* Convert big-endian bit numbering in SAR to left-shift. */
3863 tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3864 tcg_gen_xori_i64(shift, shift, widthm1);
3866 mask = tcg_temp_new_i64();
3867 tcg_gen_movi_i64(mask, msb + (msb - 1));
3868 tcg_gen_and_i64(tmp, val, mask);
3869 if (rs) {
3870 tcg_gen_shl_i64(mask, mask, shift);
3871 tcg_gen_shl_i64(tmp, tmp, shift);
3872 tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3873 tcg_gen_or_i64(dest, dest, tmp);
3874 } else {
3875 tcg_gen_shl_i64(dest, tmp, shift);
3877 save_gpr(ctx, rt, dest);
3879 /* Install the new nullification. */
3880 cond_free(&ctx->null_cond);
3881 if (c) {
3882 ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3884 return nullify_end(ctx);
3887 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3889 if (!ctx->is_pa20 && a->d) {
3890 return false;
3892 if (a->c) {
3893 nullify_over(ctx);
3895 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3896 load_gpr(ctx, a->r));
3899 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3901 if (!ctx->is_pa20 && a->d) {
3902 return false;
3904 if (a->c) {
3905 nullify_over(ctx);
3907 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3908 tcg_constant_i64(a->i));
3911 static bool trans_be(DisasContext *ctx, arg_be *a)
3913 TCGv_i64 tmp;
3915 #ifdef CONFIG_USER_ONLY
3916 /* ??? It seems like there should be a good way of using
3917 "be disp(sr2, r0)", the canonical gateway entry mechanism
3918 to our advantage. But that appears to be inconvenient to
3919 manage along side branch delay slots. Therefore we handle
3920 entry into the gateway page via absolute address. */
3921 /* Since we don't implement spaces, just branch. Do notice the special
3922 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3923 goto_tb to the TB containing the syscall. */
3924 if (a->b == 0) {
3925 return do_dbranch(ctx, a->disp, a->l, a->n);
3927 #else
3928 nullify_over(ctx);
3929 #endif
3931 tmp = tcg_temp_new_i64();
3932 tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3933 tmp = do_ibranch_priv(ctx, tmp);
3935 #ifdef CONFIG_USER_ONLY
3936 return do_ibranch(ctx, tmp, a->l, a->n);
3937 #else
3938 TCGv_i64 new_spc = tcg_temp_new_i64();
3940 load_spr(ctx, new_spc, a->sp);
3941 if (a->l) {
3942 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3943 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
3945 if (a->n && use_nullify_skip(ctx)) {
3946 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3947 tcg_gen_addi_i64(tmp, tmp, 4);
3948 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3949 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3950 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3951 nullify_set(ctx, 0);
3952 } else {
3953 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3954 if (ctx->iaoq_b == -1) {
3955 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3957 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3958 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3959 nullify_set(ctx, a->n);
3961 tcg_gen_lookup_and_goto_ptr();
3962 ctx->base.is_jmp = DISAS_NORETURN;
3963 return nullify_end(ctx);
3964 #endif
3967 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3969 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3972 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3974 uint64_t dest = iaoq_dest(ctx, a->disp);
3976 nullify_over(ctx);
3978 /* Make sure the caller hasn't done something weird with the queue.
3979 * ??? This is not quite the same as the PSW[B] bit, which would be
3980 * expensive to track. Real hardware will trap for
3981 * b gateway
3982 * b gateway+4 (in delay slot of first branch)
3983 * However, checking for a non-sequential instruction queue *will*
3984 * diagnose the security hole
3985 * b gateway
3986 * b evil
3987 * in which instructions at evil would run with increased privs.
3989 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3990 return gen_illegal(ctx);
3993 #ifndef CONFIG_USER_ONLY
3994 if (ctx->tb_flags & PSW_C) {
3995 int type = hppa_artype_for_page(cpu_env(ctx->cs), ctx->base.pc_next);
3996 /* If we could not find a TLB entry, then we need to generate an
3997 ITLB miss exception so the kernel will provide it.
3998 The resulting TLB fill operation will invalidate this TB and
3999 we will re-translate, at which point we *will* be able to find
4000 the TLB entry and determine if this is in fact a gateway page. */
4001 if (type < 0) {
4002 gen_excp(ctx, EXCP_ITLB_MISS);
4003 return true;
4005 /* No change for non-gateway pages or for priv decrease. */
4006 if (type >= 4 && type - 4 < ctx->privilege) {
4007 dest = deposit64(dest, 0, 2, type - 4);
4009 } else {
4010 dest &= -4; /* priv = 0 */
4012 #endif
4014 if (a->l) {
4015 TCGv_i64 tmp = dest_gpr(ctx, a->l);
4016 if (ctx->privilege < 3) {
4017 tcg_gen_andi_i64(tmp, tmp, -4);
4019 tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
4020 save_gpr(ctx, a->l, tmp);
4023 return do_dbranch(ctx, dest, 0, a->n);
4026 static bool trans_blr(DisasContext *ctx, arg_blr *a)
4028 if (a->x) {
4029 TCGv_i64 tmp = tcg_temp_new_i64();
4030 tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
4031 tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
4032 /* The computation here never changes privilege level. */
4033 return do_ibranch(ctx, tmp, a->l, a->n);
4034 } else {
4035 /* BLR R0,RX is a good way to load PC+8 into RX. */
4036 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
4040 static bool trans_bv(DisasContext *ctx, arg_bv *a)
4042 TCGv_i64 dest;
4044 if (a->x == 0) {
4045 dest = load_gpr(ctx, a->b);
4046 } else {
4047 dest = tcg_temp_new_i64();
4048 tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
4049 tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
4051 dest = do_ibranch_priv(ctx, dest);
4052 return do_ibranch(ctx, dest, 0, a->n);
4055 static bool trans_bve(DisasContext *ctx, arg_bve *a)
4057 TCGv_i64 dest;
4059 #ifdef CONFIG_USER_ONLY
4060 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
4061 return do_ibranch(ctx, dest, a->l, a->n);
4062 #else
4063 nullify_over(ctx);
4064 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
4066 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
4067 if (ctx->iaoq_b == -1) {
4068 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4070 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
4071 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
4072 if (a->l) {
4073 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
4075 nullify_set(ctx, a->n);
4076 tcg_gen_lookup_and_goto_ptr();
4077 ctx->base.is_jmp = DISAS_NORETURN;
4078 return nullify_end(ctx);
4079 #endif
4082 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
4084 /* All branch target stack instructions implement as nop. */
4085 return ctx->is_pa20;
4089 * Float class 0
4092 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4094 tcg_gen_mov_i32(dst, src);
4097 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
4099 uint64_t ret;
4101 if (ctx->is_pa20) {
4102 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
4103 } else {
4104 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
4107 nullify_over(ctx);
4108 save_frd(0, tcg_constant_i64(ret));
4109 return nullify_end(ctx);
4112 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
4114 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
4117 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4119 tcg_gen_mov_i64(dst, src);
4122 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
4124 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
4127 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4129 tcg_gen_andi_i32(dst, src, INT32_MAX);
4132 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
4134 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
4137 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4139 tcg_gen_andi_i64(dst, src, INT64_MAX);
4142 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
4144 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
4147 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
4149 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
4152 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
4154 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
4157 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
4159 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
4162 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
4164 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
4167 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4169 tcg_gen_xori_i32(dst, src, INT32_MIN);
4172 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
4174 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
4177 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4179 tcg_gen_xori_i64(dst, src, INT64_MIN);
4182 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
4184 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
4187 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4189 tcg_gen_ori_i32(dst, src, INT32_MIN);
4192 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4194 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4197 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4199 tcg_gen_ori_i64(dst, src, INT64_MIN);
4202 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4204 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4208 * Float class 1
4211 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4213 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4216 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4218 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4221 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4223 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4226 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4228 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4231 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4233 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4236 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4238 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4241 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4243 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4246 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4248 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4251 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4253 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4256 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4258 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4261 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4263 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4266 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4268 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4271 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4273 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4276 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4278 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4281 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4283 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4286 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4288 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4291 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4293 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4296 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4298 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4301 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4303 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4306 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4308 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4311 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4313 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4316 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4318 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4321 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4323 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4326 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4328 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4331 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4333 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4336 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4338 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4342 * Float class 2
4345 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4347 TCGv_i32 ta, tb, tc, ty;
4349 nullify_over(ctx);
4351 ta = load_frw0_i32(a->r1);
4352 tb = load_frw0_i32(a->r2);
4353 ty = tcg_constant_i32(a->y);
4354 tc = tcg_constant_i32(a->c);
4356 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4358 return nullify_end(ctx);
4361 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4363 TCGv_i64 ta, tb;
4364 TCGv_i32 tc, ty;
4366 nullify_over(ctx);
4368 ta = load_frd0(a->r1);
4369 tb = load_frd0(a->r2);
4370 ty = tcg_constant_i32(a->y);
4371 tc = tcg_constant_i32(a->c);
4373 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4375 return nullify_end(ctx);
4378 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4380 TCGv_i64 t;
4382 nullify_over(ctx);
4384 t = tcg_temp_new_i64();
4385 tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4387 if (a->y == 1) {
4388 int mask;
4389 bool inv = false;
4391 switch (a->c) {
4392 case 0: /* simple */
4393 tcg_gen_andi_i64(t, t, 0x4000000);
4394 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4395 goto done;
4396 case 2: /* rej */
4397 inv = true;
4398 /* fallthru */
4399 case 1: /* acc */
4400 mask = 0x43ff800;
4401 break;
4402 case 6: /* rej8 */
4403 inv = true;
4404 /* fallthru */
4405 case 5: /* acc8 */
4406 mask = 0x43f8000;
4407 break;
4408 case 9: /* acc6 */
4409 mask = 0x43e0000;
4410 break;
4411 case 13: /* acc4 */
4412 mask = 0x4380000;
4413 break;
4414 case 17: /* acc2 */
4415 mask = 0x4200000;
4416 break;
4417 default:
4418 gen_illegal(ctx);
4419 return true;
4421 if (inv) {
4422 TCGv_i64 c = tcg_constant_i64(mask);
4423 tcg_gen_or_i64(t, t, c);
4424 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4425 } else {
4426 tcg_gen_andi_i64(t, t, mask);
4427 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4429 } else {
4430 unsigned cbit = (a->y ^ 1) - 1;
4432 tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4433 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4436 done:
4437 return nullify_end(ctx);
4441 * Float class 2
4444 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4446 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4449 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4451 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4454 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4456 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4459 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4461 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4464 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4466 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4469 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4471 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4474 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4476 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4479 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4481 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4484 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4486 TCGv_i64 x, y;
4488 nullify_over(ctx);
4490 x = load_frw0_i64(a->r1);
4491 y = load_frw0_i64(a->r2);
4492 tcg_gen_mul_i64(x, x, y);
4493 save_frd(a->t, x);
4495 return nullify_end(ctx);
4498 /* Convert the fmpyadd single-precision register encodings to standard. */
4499 static inline int fmpyadd_s_reg(unsigned r)
4501 return (r & 16) * 2 + 16 + (r & 15);
4504 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4506 int tm = fmpyadd_s_reg(a->tm);
4507 int ra = fmpyadd_s_reg(a->ra);
4508 int ta = fmpyadd_s_reg(a->ta);
4509 int rm2 = fmpyadd_s_reg(a->rm2);
4510 int rm1 = fmpyadd_s_reg(a->rm1);
4512 nullify_over(ctx);
4514 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4515 do_fop_weww(ctx, ta, ta, ra,
4516 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4518 return nullify_end(ctx);
4521 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4523 return do_fmpyadd_s(ctx, a, false);
4526 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4528 return do_fmpyadd_s(ctx, a, true);
4531 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4533 nullify_over(ctx);
4535 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4536 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4537 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4539 return nullify_end(ctx);
4542 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4544 return do_fmpyadd_d(ctx, a, false);
4547 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4549 return do_fmpyadd_d(ctx, a, true);
4552 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4554 TCGv_i32 x, y, z;
4556 nullify_over(ctx);
4557 x = load_frw0_i32(a->rm1);
4558 y = load_frw0_i32(a->rm2);
4559 z = load_frw0_i32(a->ra3);
4561 if (a->neg) {
4562 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4563 } else {
4564 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4567 save_frw_i32(a->t, x);
4568 return nullify_end(ctx);
4571 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4573 TCGv_i64 x, y, z;
4575 nullify_over(ctx);
4576 x = load_frd0(a->rm1);
4577 y = load_frd0(a->rm2);
4578 z = load_frd0(a->ra3);
4580 if (a->neg) {
4581 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4582 } else {
4583 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4586 save_frd(a->t, x);
4587 return nullify_end(ctx);
4590 /* Emulate PDC BTLB, called by SeaBIOS-hppa */
4591 static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a)
4593 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4594 #ifndef CONFIG_USER_ONLY
4595 nullify_over(ctx);
4596 gen_helper_diag_btlb(tcg_env);
4597 return nullify_end(ctx);
4598 #endif
4601 /* Print char in %r26 to first serial console, used by SeaBIOS-hppa */
4602 static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a)
4604 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4605 #ifndef CONFIG_USER_ONLY
4606 nullify_over(ctx);
4607 gen_helper_diag_console_output(tcg_env);
4608 return nullify_end(ctx);
4609 #endif
4612 static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4614 return !ctx->is_pa20 && do_getshadowregs(ctx);
4617 static bool trans_diag_getshadowregs_pa2(DisasContext *ctx, arg_empty *a)
4619 return ctx->is_pa20 && do_getshadowregs(ctx);
4622 static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4624 return !ctx->is_pa20 && do_putshadowregs(ctx);
4627 static bool trans_diag_putshadowregs_pa2(DisasContext *ctx, arg_empty *a)
4629 return ctx->is_pa20 && do_putshadowregs(ctx);
4632 static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
4634 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4635 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4636 return true;
4639 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4641 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4642 int bound;
4644 ctx->cs = cs;
4645 ctx->tb_flags = ctx->base.tb->flags;
4646 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4648 #ifdef CONFIG_USER_ONLY
4649 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4650 ctx->mmu_idx = MMU_USER_IDX;
4651 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4652 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4653 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4654 #else
4655 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4656 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4657 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4658 : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
4660 /* Recover the IAOQ values from the GVA + PRIV. */
4661 uint64_t cs_base = ctx->base.tb->cs_base;
4662 uint64_t iasq_f = cs_base & ~0xffffffffull;
4663 int32_t diff = cs_base;
4665 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4666 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4667 #endif
4668 ctx->iaoq_n = -1;
4669 ctx->iaoq_n_var = NULL;
4671 ctx->zero = tcg_constant_i64(0);
4673 /* Bound the number of instructions by those left on the page. */
4674 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4675 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4678 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4680 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4682 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4683 ctx->null_cond = cond_make_f();
4684 ctx->psw_n_nonzero = false;
4685 if (ctx->tb_flags & PSW_N) {
4686 ctx->null_cond.c = TCG_COND_ALWAYS;
4687 ctx->psw_n_nonzero = true;
4689 ctx->null_lab = NULL;
4692 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4694 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4696 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0);
4697 ctx->insn_start_updated = false;
4700 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4702 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4703 CPUHPPAState *env = cpu_env(cs);
4704 DisasJumpType ret;
4706 /* Execute one insn. */
4707 #ifdef CONFIG_USER_ONLY
4708 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4709 do_page_zero(ctx);
4710 ret = ctx->base.is_jmp;
4711 assert(ret != DISAS_NEXT);
4712 } else
4713 #endif
4715 /* Always fetch the insn, even if nullified, so that we check
4716 the page permissions for execute. */
4717 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4719 /* Set up the IA queue for the next insn.
4720 This will be overwritten by a branch. */
4721 if (ctx->iaoq_b == -1) {
4722 ctx->iaoq_n = -1;
4723 ctx->iaoq_n_var = tcg_temp_new_i64();
4724 tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4725 } else {
4726 ctx->iaoq_n = ctx->iaoq_b + 4;
4727 ctx->iaoq_n_var = NULL;
4730 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4731 ctx->null_cond.c = TCG_COND_NEVER;
4732 ret = DISAS_NEXT;
4733 } else {
4734 ctx->insn = insn;
4735 if (!decode(ctx, insn)) {
4736 gen_illegal(ctx);
4738 ret = ctx->base.is_jmp;
4739 assert(ctx->null_lab == NULL);
4743 /* Advance the insn queue. Note that this check also detects
4744 a priority change within the instruction queue. */
4745 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4746 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4747 && use_goto_tb(ctx, ctx->iaoq_b)
4748 && (ctx->null_cond.c == TCG_COND_NEVER
4749 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4750 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4751 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4752 ctx->base.is_jmp = ret = DISAS_NORETURN;
4753 } else {
4754 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4757 ctx->iaoq_f = ctx->iaoq_b;
4758 ctx->iaoq_b = ctx->iaoq_n;
4759 ctx->base.pc_next += 4;
4761 switch (ret) {
4762 case DISAS_NORETURN:
4763 case DISAS_IAQ_N_UPDATED:
4764 break;
4766 case DISAS_NEXT:
4767 case DISAS_IAQ_N_STALE:
4768 case DISAS_IAQ_N_STALE_EXIT:
4769 if (ctx->iaoq_f == -1) {
4770 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4771 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4772 #ifndef CONFIG_USER_ONLY
4773 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4774 #endif
4775 nullify_save(ctx);
4776 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4777 ? DISAS_EXIT
4778 : DISAS_IAQ_N_UPDATED);
4779 } else if (ctx->iaoq_b == -1) {
4780 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4782 break;
4784 default:
4785 g_assert_not_reached();
4789 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4791 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4792 DisasJumpType is_jmp = ctx->base.is_jmp;
4794 switch (is_jmp) {
4795 case DISAS_NORETURN:
4796 break;
4797 case DISAS_TOO_MANY:
4798 case DISAS_IAQ_N_STALE:
4799 case DISAS_IAQ_N_STALE_EXIT:
4800 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4801 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4802 nullify_save(ctx);
4803 /* FALLTHRU */
4804 case DISAS_IAQ_N_UPDATED:
4805 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4806 tcg_gen_lookup_and_goto_ptr();
4807 break;
4809 /* FALLTHRU */
4810 case DISAS_EXIT:
4811 tcg_gen_exit_tb(NULL, 0);
4812 break;
4813 default:
4814 g_assert_not_reached();
4818 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4819 CPUState *cs, FILE *logfile)
4821 target_ulong pc = dcbase->pc_first;
4823 #ifdef CONFIG_USER_ONLY
4824 switch (pc) {
4825 case 0x00:
4826 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4827 return;
4828 case 0xb0:
4829 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4830 return;
4831 case 0xe0:
4832 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4833 return;
4834 case 0x100:
4835 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4836 return;
4838 #endif
4840 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4841 target_disas(logfile, cs, pc, dcbase->tb->size);
4844 static const TranslatorOps hppa_tr_ops = {
4845 .init_disas_context = hppa_tr_init_disas_context,
4846 .tb_start = hppa_tr_tb_start,
4847 .insn_start = hppa_tr_insn_start,
4848 .translate_insn = hppa_tr_translate_insn,
4849 .tb_stop = hppa_tr_tb_stop,
4850 .disas_log = hppa_tr_disas_log,
4853 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4854 vaddr pc, void *host_pc)
4856 DisasContext ctx;
4857 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);