2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
34 typedef struct DisasCond
{
41 typedef struct DisasContext
{
42 struct TranslationBlock
*tb
;
56 bool singlestep_enabled
;
60 /* Return values from translate_one, indicating the state of the TB.
61 Note that zero indicates that we are not exiting the TB. */
66 /* We have emitted one or more goto_tb. No fixup required. */
69 /* We are not using a goto_tb (for whatever reason), but have updated
70 the iaq (for whatever reason), so don't do it again on exit. */
73 /* We are exiting the TB, but have neither emitted a goto_tb, nor
74 updated the iaq for the next instruction to be executed. */
77 /* We are ending the TB with a noreturn function call, e.g. longjmp.
78 No following code will be executed. */
82 typedef struct DisasInsn
{
84 ExitStatus (*trans
)(DisasContext
*ctx
, uint32_t insn
,
85 const struct DisasInsn
*f
);
87 void (*f_ttt
)(TCGv
, TCGv
, TCGv
);
88 void (*f_weww
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
);
89 void (*f_dedd
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
);
90 void (*f_wew
)(TCGv_i32
, TCGv_env
, TCGv_i32
);
91 void (*f_ded
)(TCGv_i64
, TCGv_env
, TCGv_i64
);
92 void (*f_wed
)(TCGv_i32
, TCGv_env
, TCGv_i64
);
93 void (*f_dew
)(TCGv_i64
, TCGv_env
, TCGv_i32
);
97 /* global register indexes */
98 static TCGv_env cpu_env
;
99 static TCGv cpu_gr
[32];
100 static TCGv cpu_iaoq_f
;
101 static TCGv cpu_iaoq_b
;
103 static TCGv cpu_psw_n
;
104 static TCGv cpu_psw_v
;
105 static TCGv cpu_psw_cb
;
106 static TCGv cpu_psw_cb_msb
;
107 static TCGv cpu_cr26
;
108 static TCGv cpu_cr27
;
110 #include "exec/gen-icount.h"
112 void hppa_translate_init(void)
114 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
116 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
117 static const GlobalVar vars
[] = {
131 /* Use the symbolic register names that match the disassembler. */
132 static const char gr_names
[32][4] = {
133 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
134 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
135 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
136 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
139 static bool done_init
= 0;
147 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
148 tcg_ctx
.tcg_env
= cpu_env
;
150 TCGV_UNUSED(cpu_gr
[0]);
151 for (i
= 1; i
< 32; i
++) {
152 cpu_gr
[i
] = tcg_global_mem_new(cpu_env
,
153 offsetof(CPUHPPAState
, gr
[i
]),
157 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
158 const GlobalVar
*v
= &vars
[i
];
159 *v
->var
= tcg_global_mem_new(cpu_env
, v
->ofs
, v
->name
);
163 static DisasCond
cond_make_f(void)
165 DisasCond r
= { .c
= TCG_COND_NEVER
};
171 static DisasCond
cond_make_n(void)
173 DisasCond r
= { .c
= TCG_COND_NE
, .a0_is_n
= true, .a1_is_0
= true };
179 static DisasCond
cond_make_0(TCGCond c
, TCGv a0
)
181 DisasCond r
= { .c
= c
, .a1_is_0
= true };
183 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
184 r
.a0
= tcg_temp_new();
185 tcg_gen_mov_tl(r
.a0
, a0
);
191 static DisasCond
cond_make(TCGCond c
, TCGv a0
, TCGv a1
)
193 DisasCond r
= { .c
= c
};
195 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
196 r
.a0
= tcg_temp_new();
197 tcg_gen_mov_tl(r
.a0
, a0
);
198 r
.a1
= tcg_temp_new();
199 tcg_gen_mov_tl(r
.a1
, a1
);
204 static void cond_prep(DisasCond
*cond
)
207 cond
->a1_is_0
= false;
208 cond
->a1
= tcg_const_tl(0);
212 static void cond_free(DisasCond
*cond
)
216 if (!cond
->a0_is_n
) {
217 tcg_temp_free(cond
->a0
);
219 if (!cond
->a1_is_0
) {
220 tcg_temp_free(cond
->a1
);
222 cond
->a0_is_n
= false;
223 cond
->a1_is_0
= false;
224 TCGV_UNUSED(cond
->a0
);
225 TCGV_UNUSED(cond
->a1
);
227 case TCG_COND_ALWAYS
:
228 cond
->c
= TCG_COND_NEVER
;
235 static TCGv
get_temp(DisasContext
*ctx
)
237 unsigned i
= ctx
->ntemps
++;
238 g_assert(i
< ARRAY_SIZE(ctx
->temps
));
239 return ctx
->temps
[i
] = tcg_temp_new();
242 static TCGv
load_const(DisasContext
*ctx
, target_long v
)
244 TCGv t
= get_temp(ctx
);
245 tcg_gen_movi_tl(t
, v
);
249 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
252 TCGv t
= get_temp(ctx
);
253 tcg_gen_movi_tl(t
, 0);
260 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
262 if (reg
== 0 || ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
263 return get_temp(ctx
);
269 static void save_or_nullify(DisasContext
*ctx
, TCGv dest
, TCGv t
)
271 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
272 cond_prep(&ctx
->null_cond
);
273 tcg_gen_movcond_tl(ctx
->null_cond
.c
, dest
, ctx
->null_cond
.a0
,
274 ctx
->null_cond
.a1
, dest
, t
);
276 tcg_gen_mov_tl(dest
, t
);
280 static void save_gpr(DisasContext
*ctx
, unsigned reg
, TCGv t
)
283 save_or_nullify(ctx
, cpu_gr
[reg
], t
);
287 #ifdef HOST_WORDS_BIGENDIAN
295 static TCGv_i32
load_frw_i32(unsigned rt
)
297 TCGv_i32 ret
= tcg_temp_new_i32();
298 tcg_gen_ld_i32(ret
, cpu_env
,
299 offsetof(CPUHPPAState
, fr
[rt
& 31])
300 + (rt
& 32 ? LO_OFS
: HI_OFS
));
304 static TCGv_i32
load_frw0_i32(unsigned rt
)
307 return tcg_const_i32(0);
309 return load_frw_i32(rt
);
313 static TCGv_i64
load_frw0_i64(unsigned rt
)
316 return tcg_const_i64(0);
318 TCGv_i64 ret
= tcg_temp_new_i64();
319 tcg_gen_ld32u_i64(ret
, cpu_env
,
320 offsetof(CPUHPPAState
, fr
[rt
& 31])
321 + (rt
& 32 ? LO_OFS
: HI_OFS
));
326 static void save_frw_i32(unsigned rt
, TCGv_i32 val
)
328 tcg_gen_st_i32(val
, cpu_env
,
329 offsetof(CPUHPPAState
, fr
[rt
& 31])
330 + (rt
& 32 ? LO_OFS
: HI_OFS
));
336 static TCGv_i64
load_frd(unsigned rt
)
338 TCGv_i64 ret
= tcg_temp_new_i64();
339 tcg_gen_ld_i64(ret
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
343 static TCGv_i64
load_frd0(unsigned rt
)
346 return tcg_const_i64(0);
352 static void save_frd(unsigned rt
, TCGv_i64 val
)
354 tcg_gen_st_i64(val
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
357 /* Skip over the implementation of an insn that has been nullified.
358 Use this when the insn is too complex for a conditional move. */
359 static void nullify_over(DisasContext
*ctx
)
361 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
362 /* The always condition should have been handled in the main loop. */
363 assert(ctx
->null_cond
.c
!= TCG_COND_ALWAYS
);
365 ctx
->null_lab
= gen_new_label();
366 cond_prep(&ctx
->null_cond
);
368 /* If we're using PSW[N], copy it to a temp because... */
369 if (ctx
->null_cond
.a0_is_n
) {
370 ctx
->null_cond
.a0_is_n
= false;
371 ctx
->null_cond
.a0
= tcg_temp_new();
372 tcg_gen_mov_tl(ctx
->null_cond
.a0
, cpu_psw_n
);
374 /* ... we clear it before branching over the implementation,
375 so that (1) it's clear after nullifying this insn and
376 (2) if this insn nullifies the next, PSW[N] is valid. */
377 if (ctx
->psw_n_nonzero
) {
378 ctx
->psw_n_nonzero
= false;
379 tcg_gen_movi_tl(cpu_psw_n
, 0);
382 tcg_gen_brcond_tl(ctx
->null_cond
.c
, ctx
->null_cond
.a0
,
383 ctx
->null_cond
.a1
, ctx
->null_lab
);
384 cond_free(&ctx
->null_cond
);
388 /* Save the current nullification state to PSW[N]. */
389 static void nullify_save(DisasContext
*ctx
)
391 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
392 if (ctx
->psw_n_nonzero
) {
393 tcg_gen_movi_tl(cpu_psw_n
, 0);
397 if (!ctx
->null_cond
.a0_is_n
) {
398 cond_prep(&ctx
->null_cond
);
399 tcg_gen_setcond_tl(ctx
->null_cond
.c
, cpu_psw_n
,
400 ctx
->null_cond
.a0
, ctx
->null_cond
.a1
);
401 ctx
->psw_n_nonzero
= true;
403 cond_free(&ctx
->null_cond
);
406 /* Set a PSW[N] to X. The intention is that this is used immediately
407 before a goto_tb/exit_tb, so that there is no fallthru path to other
408 code within the TB. Therefore we do not update psw_n_nonzero. */
409 static void nullify_set(DisasContext
*ctx
, bool x
)
411 if (ctx
->psw_n_nonzero
|| x
) {
412 tcg_gen_movi_tl(cpu_psw_n
, x
);
416 /* Mark the end of an instruction that may have been nullified.
417 This is the pair to nullify_over. */
418 static ExitStatus
nullify_end(DisasContext
*ctx
, ExitStatus status
)
420 TCGLabel
*null_lab
= ctx
->null_lab
;
422 if (likely(null_lab
== NULL
)) {
423 /* The current insn wasn't conditional or handled the condition
424 applied to it without a branch, so the (new) setting of
425 NULL_COND can be applied directly to the next insn. */
428 ctx
->null_lab
= NULL
;
430 if (likely(ctx
->null_cond
.c
== TCG_COND_NEVER
)) {
431 /* The next instruction will be unconditional,
432 and NULL_COND already reflects that. */
433 gen_set_label(null_lab
);
435 /* The insn that we just executed is itself nullifying the next
436 instruction. Store the condition in the PSW[N] global.
437 We asserted PSW[N] = 0 in nullify_over, so that after the
438 label we have the proper value in place. */
440 gen_set_label(null_lab
);
441 ctx
->null_cond
= cond_make_n();
444 assert(status
!= EXIT_GOTO_TB
&& status
!= EXIT_IAQ_N_UPDATED
);
445 if (status
== EXIT_NORETURN
) {
451 static void copy_iaoq_entry(TCGv dest
, target_ulong ival
, TCGv vval
)
453 if (unlikely(ival
== -1)) {
454 tcg_gen_mov_tl(dest
, vval
);
456 tcg_gen_movi_tl(dest
, ival
);
460 static inline target_ulong
iaoq_dest(DisasContext
*ctx
, target_long disp
)
462 return ctx
->iaoq_f
+ disp
+ 8;
465 static void gen_excp_1(int exception
)
467 TCGv_i32 t
= tcg_const_i32(exception
);
468 gen_helper_excp(cpu_env
, t
);
469 tcg_temp_free_i32(t
);
472 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
)
474 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
475 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
477 gen_excp_1(exception
);
478 return EXIT_NORETURN
;
481 static ExitStatus
gen_illegal(DisasContext
*ctx
)
484 return nullify_end(ctx
, gen_excp(ctx
, EXCP_SIGILL
));
487 static bool use_goto_tb(DisasContext
*ctx
, target_ulong dest
)
489 /* Suppress goto_tb in the case of single-steping and IO. */
490 if ((ctx
->tb
->cflags
& CF_LAST_IO
) || ctx
->singlestep_enabled
) {
496 /* If the next insn is to be nullified, and it's on the same page,
497 and we're not attempting to set a breakpoint on it, then we can
498 totally skip the nullified insn. This avoids creating and
499 executing a TB that merely branches to the next TB. */
500 static bool use_nullify_skip(DisasContext
*ctx
)
502 return (((ctx
->iaoq_b
^ ctx
->iaoq_f
) & TARGET_PAGE_MASK
) == 0
503 && !cpu_breakpoint_test(ctx
->cs
, ctx
->iaoq_b
, BP_ANY
));
506 static void gen_goto_tb(DisasContext
*ctx
, int which
,
507 target_ulong f
, target_ulong b
)
509 if (f
!= -1 && b
!= -1 && use_goto_tb(ctx
, f
)) {
510 tcg_gen_goto_tb(which
);
511 tcg_gen_movi_tl(cpu_iaoq_f
, f
);
512 tcg_gen_movi_tl(cpu_iaoq_b
, b
);
513 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ which
);
515 copy_iaoq_entry(cpu_iaoq_f
, f
, cpu_iaoq_b
);
516 copy_iaoq_entry(cpu_iaoq_b
, b
, ctx
->iaoq_n_var
);
517 if (ctx
->singlestep_enabled
) {
518 gen_excp_1(EXCP_DEBUG
);
525 /* PA has a habit of taking the LSB of a field and using that as the sign,
526 with the rest of the field becoming the least significant bits. */
527 static target_long
low_sextract(uint32_t val
, int pos
, int len
)
529 target_ulong x
= -(target_ulong
)extract32(val
, pos
, 1);
530 x
= (x
<< (len
- 1)) | extract32(val
, pos
+ 1, len
- 1);
534 static unsigned assemble_rt64(uint32_t insn
)
536 unsigned r1
= extract32(insn
, 6, 1);
537 unsigned r0
= extract32(insn
, 0, 5);
541 static unsigned assemble_ra64(uint32_t insn
)
543 unsigned r1
= extract32(insn
, 7, 1);
544 unsigned r0
= extract32(insn
, 21, 5);
548 static unsigned assemble_rb64(uint32_t insn
)
550 unsigned r1
= extract32(insn
, 12, 1);
551 unsigned r0
= extract32(insn
, 16, 5);
555 static unsigned assemble_rc64(uint32_t insn
)
557 unsigned r2
= extract32(insn
, 8, 1);
558 unsigned r1
= extract32(insn
, 13, 3);
559 unsigned r0
= extract32(insn
, 9, 2);
560 return r2
* 32 + r1
* 4 + r0
;
563 static target_long
assemble_12(uint32_t insn
)
565 target_ulong x
= -(target_ulong
)(insn
& 1);
566 x
= (x
<< 1) | extract32(insn
, 2, 1);
567 x
= (x
<< 10) | extract32(insn
, 3, 10);
571 static target_long
assemble_16(uint32_t insn
)
573 /* Take the name from PA2.0, which produces a 16-bit number
574 only with wide mode; otherwise a 14-bit number. Since we don't
575 implement wide mode, this is always the 14-bit number. */
576 return low_sextract(insn
, 0, 14);
579 static target_long
assemble_16a(uint32_t insn
)
581 /* Take the name from PA2.0, which produces a 14-bit shifted number
582 only with wide mode; otherwise a 12-bit shifted number. Since we
583 don't implement wide mode, this is always the 12-bit number. */
584 target_ulong x
= -(target_ulong
)(insn
& 1);
585 x
= (x
<< 11) | extract32(insn
, 2, 11);
589 static target_long
assemble_17(uint32_t insn
)
591 target_ulong x
= -(target_ulong
)(insn
& 1);
592 x
= (x
<< 5) | extract32(insn
, 16, 5);
593 x
= (x
<< 1) | extract32(insn
, 2, 1);
594 x
= (x
<< 10) | extract32(insn
, 3, 10);
598 static target_long
assemble_21(uint32_t insn
)
600 target_ulong x
= -(target_ulong
)(insn
& 1);
601 x
= (x
<< 11) | extract32(insn
, 1, 11);
602 x
= (x
<< 2) | extract32(insn
, 14, 2);
603 x
= (x
<< 5) | extract32(insn
, 16, 5);
604 x
= (x
<< 2) | extract32(insn
, 12, 2);
608 static target_long
assemble_22(uint32_t insn
)
610 target_ulong x
= -(target_ulong
)(insn
& 1);
611 x
= (x
<< 10) | extract32(insn
, 16, 10);
612 x
= (x
<< 1) | extract32(insn
, 2, 1);
613 x
= (x
<< 10) | extract32(insn
, 3, 10);
617 /* The parisc documentation describes only the general interpretation of
618 the conditions, without describing their exact implementation. The
619 interpretations do not stand up well when considering ADD,C and SUB,B.
620 However, considering the Addition, Subtraction and Logical conditions
621 as a whole it would appear that these relations are similar to what
622 a traditional NZCV set of flags would produce. */
624 static DisasCond
do_cond(unsigned cf
, TCGv res
, TCGv cb_msb
, TCGv sv
)
630 case 0: /* Never / TR */
631 cond
= cond_make_f();
633 case 1: /* = / <> (Z / !Z) */
634 cond
= cond_make_0(TCG_COND_EQ
, res
);
636 case 2: /* < / >= (N / !N) */
637 cond
= cond_make_0(TCG_COND_LT
, res
);
639 case 3: /* <= / > (N | Z / !N & !Z) */
640 cond
= cond_make_0(TCG_COND_LE
, res
);
642 case 4: /* NUV / UV (!C / C) */
643 cond
= cond_make_0(TCG_COND_EQ
, cb_msb
);
645 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
646 tmp
= tcg_temp_new();
647 tcg_gen_neg_tl(tmp
, cb_msb
);
648 tcg_gen_and_tl(tmp
, tmp
, res
);
649 cond
= cond_make_0(TCG_COND_EQ
, tmp
);
652 case 6: /* SV / NSV (V / !V) */
653 cond
= cond_make_0(TCG_COND_LT
, sv
);
655 case 7: /* OD / EV */
656 tmp
= tcg_temp_new();
657 tcg_gen_andi_tl(tmp
, res
, 1);
658 cond
= cond_make_0(TCG_COND_NE
, tmp
);
662 g_assert_not_reached();
665 cond
.c
= tcg_invert_cond(cond
.c
);
671 /* Similar, but for the special case of subtraction without borrow, we
672 can use the inputs directly. This can allow other computation to be
673 deleted as unused. */
675 static DisasCond
do_sub_cond(unsigned cf
, TCGv res
, TCGv in1
, TCGv in2
, TCGv sv
)
681 cond
= cond_make(TCG_COND_EQ
, in1
, in2
);
684 cond
= cond_make(TCG_COND_LT
, in1
, in2
);
687 cond
= cond_make(TCG_COND_LE
, in1
, in2
);
689 case 4: /* << / >>= */
690 cond
= cond_make(TCG_COND_LTU
, in1
, in2
);
692 case 5: /* <<= / >> */
693 cond
= cond_make(TCG_COND_LEU
, in1
, in2
);
696 return do_cond(cf
, res
, sv
, sv
);
699 cond
.c
= tcg_invert_cond(cond
.c
);
705 /* Similar, but for logicals, where the carry and overflow bits are not
706 computed, and use of them is undefined. */
708 static DisasCond
do_log_cond(unsigned cf
, TCGv res
)
711 case 4: case 5: case 6:
715 return do_cond(cf
, res
, res
, res
);
718 /* Similar, but for shift/extract/deposit conditions. */
720 static DisasCond
do_sed_cond(unsigned orig
, TCGv res
)
724 /* Convert the compressed condition codes to standard.
725 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
726 4-7 are the reverse of 0-3. */
733 return do_log_cond(c
* 2 + f
, res
);
736 /* Similar, but for unit conditions. */
738 static DisasCond
do_unit_cond(unsigned cf
, TCGv res
, TCGv in1
, TCGv in2
)
745 /* Since we want to test lots of carry-out bits all at once, do not
746 * do our normal thing and compute carry-in of bit B+1 since that
747 * leaves us with carry bits spread across two words.
750 tmp
= tcg_temp_new();
751 tcg_gen_or_tl(cb
, in1
, in2
);
752 tcg_gen_and_tl(tmp
, in1
, in2
);
753 tcg_gen_andc_tl(cb
, cb
, res
);
754 tcg_gen_or_tl(cb
, cb
, tmp
);
759 case 0: /* never / TR */
760 case 1: /* undefined */
761 case 5: /* undefined */
762 cond
= cond_make_f();
765 case 2: /* SBZ / NBZ */
766 /* See hasless(v,1) from
767 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
769 tmp
= tcg_temp_new();
770 tcg_gen_subi_tl(tmp
, res
, 0x01010101u
);
771 tcg_gen_andc_tl(tmp
, tmp
, res
);
772 tcg_gen_andi_tl(tmp
, tmp
, 0x80808080u
);
773 cond
= cond_make_0(TCG_COND_NE
, tmp
);
777 case 3: /* SHZ / NHZ */
778 tmp
= tcg_temp_new();
779 tcg_gen_subi_tl(tmp
, res
, 0x00010001u
);
780 tcg_gen_andc_tl(tmp
, tmp
, res
);
781 tcg_gen_andi_tl(tmp
, tmp
, 0x80008000u
);
782 cond
= cond_make_0(TCG_COND_NE
, tmp
);
786 case 4: /* SDC / NDC */
787 tcg_gen_andi_tl(cb
, cb
, 0x88888888u
);
788 cond
= cond_make_0(TCG_COND_NE
, cb
);
791 case 6: /* SBC / NBC */
792 tcg_gen_andi_tl(cb
, cb
, 0x80808080u
);
793 cond
= cond_make_0(TCG_COND_NE
, cb
);
796 case 7: /* SHC / NHC */
797 tcg_gen_andi_tl(cb
, cb
, 0x80008000u
);
798 cond
= cond_make_0(TCG_COND_NE
, cb
);
802 g_assert_not_reached();
808 cond
.c
= tcg_invert_cond(cond
.c
);
814 /* Compute signed overflow for addition. */
815 static TCGv
do_add_sv(DisasContext
*ctx
, TCGv res
, TCGv in1
, TCGv in2
)
817 TCGv sv
= get_temp(ctx
);
818 TCGv tmp
= tcg_temp_new();
820 tcg_gen_xor_tl(sv
, res
, in1
);
821 tcg_gen_xor_tl(tmp
, in1
, in2
);
822 tcg_gen_andc_tl(sv
, sv
, tmp
);
828 /* Compute signed overflow for subtraction. */
829 static TCGv
do_sub_sv(DisasContext
*ctx
, TCGv res
, TCGv in1
, TCGv in2
)
831 TCGv sv
= get_temp(ctx
);
832 TCGv tmp
= tcg_temp_new();
834 tcg_gen_xor_tl(sv
, res
, in1
);
835 tcg_gen_xor_tl(tmp
, in1
, in2
);
836 tcg_gen_and_tl(sv
, sv
, tmp
);
842 static ExitStatus
do_add(DisasContext
*ctx
, unsigned rt
, TCGv in1
, TCGv in2
,
843 unsigned shift
, bool is_l
, bool is_tsv
, bool is_tc
,
844 bool is_c
, unsigned cf
)
846 TCGv dest
, cb
, cb_msb
, sv
, tmp
;
847 unsigned c
= cf
>> 1;
850 dest
= tcg_temp_new();
856 tcg_gen_shli_tl(tmp
, in1
, shift
);
860 if (!is_l
|| c
== 4 || c
== 5) {
861 TCGv zero
= tcg_const_tl(0);
862 cb_msb
= get_temp(ctx
);
863 tcg_gen_add2_tl(dest
, cb_msb
, in1
, zero
, in2
, zero
);
865 tcg_gen_add2_tl(dest
, cb_msb
, dest
, cb_msb
, cpu_psw_cb_msb
, zero
);
870 tcg_gen_xor_tl(cb
, in1
, in2
);
871 tcg_gen_xor_tl(cb
, cb
, dest
);
874 tcg_gen_add_tl(dest
, in1
, in2
);
876 tcg_gen_add_tl(dest
, dest
, cpu_psw_cb_msb
);
880 /* Compute signed overflow if required. */
882 if (is_tsv
|| c
== 6) {
883 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
885 /* ??? Need to include overflow from shift. */
886 gen_helper_tsv(cpu_env
, sv
);
890 /* Emit any conditional trap before any writeback. */
891 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
894 tmp
= tcg_temp_new();
895 tcg_gen_setcond_tl(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
896 gen_helper_tcond(cpu_env
, tmp
);
900 /* Write back the result. */
902 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
903 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
905 save_gpr(ctx
, rt
, dest
);
908 /* Install the new nullification. */
909 cond_free(&ctx
->null_cond
);
910 ctx
->null_cond
= cond
;
914 static ExitStatus
do_sub(DisasContext
*ctx
, unsigned rt
, TCGv in1
, TCGv in2
,
915 bool is_tsv
, bool is_b
, bool is_tc
, unsigned cf
)
917 TCGv dest
, sv
, cb
, cb_msb
, zero
, tmp
;
918 unsigned c
= cf
>> 1;
921 dest
= tcg_temp_new();
923 cb_msb
= tcg_temp_new();
925 zero
= tcg_const_tl(0);
927 /* DEST,C = IN1 + ~IN2 + C. */
928 tcg_gen_not_tl(cb
, in2
);
929 tcg_gen_add2_tl(dest
, cb_msb
, in1
, zero
, cpu_psw_cb_msb
, zero
);
930 tcg_gen_add2_tl(dest
, cb_msb
, dest
, cb_msb
, cb
, zero
);
931 tcg_gen_xor_tl(cb
, cb
, in1
);
932 tcg_gen_xor_tl(cb
, cb
, dest
);
934 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
935 operations by seeding the high word with 1 and subtracting. */
936 tcg_gen_movi_tl(cb_msb
, 1);
937 tcg_gen_sub2_tl(dest
, cb_msb
, in1
, cb_msb
, in2
, zero
);
938 tcg_gen_eqv_tl(cb
, in1
, in2
);
939 tcg_gen_xor_tl(cb
, cb
, dest
);
943 /* Compute signed overflow if required. */
945 if (is_tsv
|| c
== 6) {
946 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
948 gen_helper_tsv(cpu_env
, sv
);
952 /* Compute the condition. We cannot use the special case for borrow. */
954 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
956 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
959 /* Emit any conditional trap before any writeback. */
962 tmp
= tcg_temp_new();
963 tcg_gen_setcond_tl(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
964 gen_helper_tcond(cpu_env
, tmp
);
968 /* Write back the result. */
969 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
970 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
971 save_gpr(ctx
, rt
, dest
);
974 /* Install the new nullification. */
975 cond_free(&ctx
->null_cond
);
976 ctx
->null_cond
= cond
;
980 static ExitStatus
do_cmpclr(DisasContext
*ctx
, unsigned rt
, TCGv in1
,
981 TCGv in2
, unsigned cf
)
986 dest
= tcg_temp_new();
987 tcg_gen_sub_tl(dest
, in1
, in2
);
989 /* Compute signed overflow if required. */
991 if ((cf
>> 1) == 6) {
992 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
995 /* Form the condition for the compare. */
996 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
999 tcg_gen_movi_tl(dest
, 0);
1000 save_gpr(ctx
, rt
, dest
);
1001 tcg_temp_free(dest
);
1003 /* Install the new nullification. */
1004 cond_free(&ctx
->null_cond
);
1005 ctx
->null_cond
= cond
;
1009 static ExitStatus
do_log(DisasContext
*ctx
, unsigned rt
, TCGv in1
, TCGv in2
,
1010 unsigned cf
, void (*fn
)(TCGv
, TCGv
, TCGv
))
1012 TCGv dest
= dest_gpr(ctx
, rt
);
1014 /* Perform the operation, and writeback. */
1016 save_gpr(ctx
, rt
, dest
);
1018 /* Install the new nullification. */
1019 cond_free(&ctx
->null_cond
);
1021 ctx
->null_cond
= do_log_cond(cf
, dest
);
1026 static ExitStatus
do_unit(DisasContext
*ctx
, unsigned rt
, TCGv in1
,
1027 TCGv in2
, unsigned cf
, bool is_tc
,
1028 void (*fn
)(TCGv
, TCGv
, TCGv
))
1034 dest
= dest_gpr(ctx
, rt
);
1036 save_gpr(ctx
, rt
, dest
);
1037 cond_free(&ctx
->null_cond
);
1039 dest
= tcg_temp_new();
1042 cond
= do_unit_cond(cf
, dest
, in1
, in2
);
1045 TCGv tmp
= tcg_temp_new();
1047 tcg_gen_setcond_tl(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1048 gen_helper_tcond(cpu_env
, tmp
);
1051 save_gpr(ctx
, rt
, dest
);
1053 cond_free(&ctx
->null_cond
);
1054 ctx
->null_cond
= cond
;
1059 /* Emit a memory load. The modify parameter should be
1060 * < 0 for pre-modify,
1061 * > 0 for post-modify,
1062 * = 0 for no base register update.
1064 static void do_load_32(DisasContext
*ctx
, TCGv_i32 dest
, unsigned rb
,
1065 unsigned rx
, int scale
, target_long disp
,
1066 int modify
, TCGMemOp mop
)
1070 /* Caller uses nullify_over/nullify_end. */
1071 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1073 addr
= tcg_temp_new();
1074 base
= load_gpr(ctx
, rb
);
1076 /* Note that RX is mutually exclusive with DISP. */
1078 tcg_gen_shli_tl(addr
, cpu_gr
[rx
], scale
);
1079 tcg_gen_add_tl(addr
, addr
, base
);
1081 tcg_gen_addi_tl(addr
, base
, disp
);
1085 tcg_gen_qemu_ld_i32(dest
, addr
, MMU_USER_IDX
, mop
);
1087 tcg_gen_qemu_ld_i32(dest
, (modify
< 0 ? addr
: base
),
1089 save_gpr(ctx
, rb
, addr
);
1091 tcg_temp_free(addr
);
1094 static void do_load_64(DisasContext
*ctx
, TCGv_i64 dest
, unsigned rb
,
1095 unsigned rx
, int scale
, target_long disp
,
1096 int modify
, TCGMemOp mop
)
1100 /* Caller uses nullify_over/nullify_end. */
1101 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1103 addr
= tcg_temp_new();
1104 base
= load_gpr(ctx
, rb
);
1106 /* Note that RX is mutually exclusive with DISP. */
1108 tcg_gen_shli_tl(addr
, cpu_gr
[rx
], scale
);
1109 tcg_gen_add_tl(addr
, addr
, base
);
1111 tcg_gen_addi_tl(addr
, base
, disp
);
1115 tcg_gen_qemu_ld_i64(dest
, addr
, MMU_USER_IDX
, mop
);
1117 tcg_gen_qemu_ld_i64(dest
, (modify
< 0 ? addr
: base
),
1119 save_gpr(ctx
, rb
, addr
);
1121 tcg_temp_free(addr
);
1124 static void do_store_32(DisasContext
*ctx
, TCGv_i32 src
, unsigned rb
,
1125 unsigned rx
, int scale
, target_long disp
,
1126 int modify
, TCGMemOp mop
)
1130 /* Caller uses nullify_over/nullify_end. */
1131 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1133 addr
= tcg_temp_new();
1134 base
= load_gpr(ctx
, rb
);
1136 /* Note that RX is mutually exclusive with DISP. */
1138 tcg_gen_shli_tl(addr
, cpu_gr
[rx
], scale
);
1139 tcg_gen_add_tl(addr
, addr
, base
);
1141 tcg_gen_addi_tl(addr
, base
, disp
);
1144 tcg_gen_qemu_st_i32(src
, (modify
<= 0 ? addr
: base
), MMU_USER_IDX
, mop
);
1147 save_gpr(ctx
, rb
, addr
);
1149 tcg_temp_free(addr
);
1152 static void do_store_64(DisasContext
*ctx
, TCGv_i64 src
, unsigned rb
,
1153 unsigned rx
, int scale
, target_long disp
,
1154 int modify
, TCGMemOp mop
)
1158 /* Caller uses nullify_over/nullify_end. */
1159 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1161 addr
= tcg_temp_new();
1162 base
= load_gpr(ctx
, rb
);
1164 /* Note that RX is mutually exclusive with DISP. */
1166 tcg_gen_shli_tl(addr
, cpu_gr
[rx
], scale
);
1167 tcg_gen_add_tl(addr
, addr
, base
);
1169 tcg_gen_addi_tl(addr
, base
, disp
);
1172 tcg_gen_qemu_st_i64(src
, (modify
<= 0 ? addr
: base
), MMU_USER_IDX
, mop
);
1175 save_gpr(ctx
, rb
, addr
);
1177 tcg_temp_free(addr
);
1180 #if TARGET_LONG_BITS == 64
1181 #define do_load_tl do_load_64
1182 #define do_store_tl do_store_64
1184 #define do_load_tl do_load_32
1185 #define do_store_tl do_store_32
1188 static ExitStatus
do_load(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1189 unsigned rx
, int scale
, target_long disp
,
1190 int modify
, TCGMemOp mop
)
1197 /* No base register update. */
1198 dest
= dest_gpr(ctx
, rt
);
1200 /* Make sure if RT == RB, we see the result of the load. */
1201 dest
= get_temp(ctx
);
1203 do_load_tl(ctx
, dest
, rb
, rx
, scale
, disp
, modify
, mop
);
1204 save_gpr(ctx
, rt
, dest
);
1206 return nullify_end(ctx
, NO_EXIT
);
1209 static ExitStatus
do_floadw(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1210 unsigned rx
, int scale
, target_long disp
,
1217 tmp
= tcg_temp_new_i32();
1218 do_load_32(ctx
, tmp
, rb
, rx
, scale
, disp
, modify
, MO_TEUL
);
1219 save_frw_i32(rt
, tmp
);
1220 tcg_temp_free_i32(tmp
);
1223 gen_helper_loaded_fr0(cpu_env
);
1226 return nullify_end(ctx
, NO_EXIT
);
1229 static ExitStatus
do_floadd(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1230 unsigned rx
, int scale
, target_long disp
,
1237 tmp
= tcg_temp_new_i64();
1238 do_load_64(ctx
, tmp
, rb
, rx
, scale
, disp
, modify
, MO_TEQ
);
1240 tcg_temp_free_i64(tmp
);
1243 gen_helper_loaded_fr0(cpu_env
);
1246 return nullify_end(ctx
, NO_EXIT
);
1249 static ExitStatus
do_store(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1250 target_long disp
, int modify
, TCGMemOp mop
)
1253 do_store_tl(ctx
, load_gpr(ctx
, rt
), rb
, 0, 0, disp
, modify
, mop
);
1254 return nullify_end(ctx
, NO_EXIT
);
1257 static ExitStatus
do_fstorew(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1258 unsigned rx
, int scale
, target_long disp
,
1265 tmp
= load_frw_i32(rt
);
1266 do_store_32(ctx
, tmp
, rb
, rx
, scale
, disp
, modify
, MO_TEUL
);
1267 tcg_temp_free_i32(tmp
);
1269 return nullify_end(ctx
, NO_EXIT
);
1272 static ExitStatus
do_fstored(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1273 unsigned rx
, int scale
, target_long disp
,
1281 do_store_64(ctx
, tmp
, rb
, rx
, scale
, disp
, modify
, MO_TEQ
);
1282 tcg_temp_free_i64(tmp
);
1284 return nullify_end(ctx
, NO_EXIT
);
1287 static ExitStatus
do_fop_wew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1288 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
1293 tmp
= load_frw0_i32(ra
);
1295 func(tmp
, cpu_env
, tmp
);
1297 save_frw_i32(rt
, tmp
);
1298 tcg_temp_free_i32(tmp
);
1299 return nullify_end(ctx
, NO_EXIT
);
1302 static ExitStatus
do_fop_wed(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1303 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
1310 dst
= tcg_temp_new_i32();
1312 func(dst
, cpu_env
, src
);
1314 tcg_temp_free_i64(src
);
1315 save_frw_i32(rt
, dst
);
1316 tcg_temp_free_i32(dst
);
1317 return nullify_end(ctx
, NO_EXIT
);
1320 static ExitStatus
do_fop_ded(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1321 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
1326 tmp
= load_frd0(ra
);
1328 func(tmp
, cpu_env
, tmp
);
1331 tcg_temp_free_i64(tmp
);
1332 return nullify_end(ctx
, NO_EXIT
);
1335 static ExitStatus
do_fop_dew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1336 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
1342 src
= load_frw0_i32(ra
);
1343 dst
= tcg_temp_new_i64();
1345 func(dst
, cpu_env
, src
);
1347 tcg_temp_free_i32(src
);
1349 tcg_temp_free_i64(dst
);
1350 return nullify_end(ctx
, NO_EXIT
);
1353 static ExitStatus
do_fop_weww(DisasContext
*ctx
, unsigned rt
,
1354 unsigned ra
, unsigned rb
,
1355 void (*func
)(TCGv_i32
, TCGv_env
,
1356 TCGv_i32
, TCGv_i32
))
1361 a
= load_frw0_i32(ra
);
1362 b
= load_frw0_i32(rb
);
1364 func(a
, cpu_env
, a
, b
);
1366 tcg_temp_free_i32(b
);
1367 save_frw_i32(rt
, a
);
1368 tcg_temp_free_i32(a
);
1369 return nullify_end(ctx
, NO_EXIT
);
1372 static ExitStatus
do_fop_dedd(DisasContext
*ctx
, unsigned rt
,
1373 unsigned ra
, unsigned rb
,
1374 void (*func
)(TCGv_i64
, TCGv_env
,
1375 TCGv_i64
, TCGv_i64
))
1383 func(a
, cpu_env
, a
, b
);
1385 tcg_temp_free_i64(b
);
1387 tcg_temp_free_i64(a
);
1388 return nullify_end(ctx
, NO_EXIT
);
1391 /* Emit an unconditional branch to a direct target, which may or may not
1392 have already had nullification handled. */
1393 static ExitStatus
do_dbranch(DisasContext
*ctx
, target_ulong dest
,
1394 unsigned link
, bool is_n
)
1396 if (ctx
->null_cond
.c
== TCG_COND_NEVER
&& ctx
->null_lab
== NULL
) {
1398 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1402 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1409 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1412 if (is_n
&& use_nullify_skip(ctx
)) {
1413 nullify_set(ctx
, 0);
1414 gen_goto_tb(ctx
, 0, dest
, dest
+ 4);
1416 nullify_set(ctx
, is_n
);
1417 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, dest
);
1420 nullify_end(ctx
, NO_EXIT
);
1422 nullify_set(ctx
, 0);
1423 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, ctx
->iaoq_n
);
1424 return EXIT_GOTO_TB
;
1428 /* Emit a conditional branch to a direct target. If the branch itself
1429 is nullified, we should have already used nullify_over. */
1430 static ExitStatus
do_cbranch(DisasContext
*ctx
, target_long disp
, bool is_n
,
1433 target_ulong dest
= iaoq_dest(ctx
, disp
);
1434 TCGLabel
*taken
= NULL
;
1435 TCGCond c
= cond
->c
;
1439 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1441 /* Handle TRUE and NEVER as direct branches. */
1442 if (c
== TCG_COND_ALWAYS
) {
1443 return do_dbranch(ctx
, dest
, 0, is_n
&& disp
>= 0);
1445 if (c
== TCG_COND_NEVER
) {
1446 return do_dbranch(ctx
, ctx
->iaoq_n
, 0, is_n
&& disp
< 0);
1449 taken
= gen_new_label();
1451 tcg_gen_brcond_tl(c
, cond
->a0
, cond
->a1
, taken
);
1454 /* Not taken: Condition not satisfied; nullify on backward branches. */
1455 n
= is_n
&& disp
< 0;
1456 if (n
&& use_nullify_skip(ctx
)) {
1457 nullify_set(ctx
, 0);
1458 gen_goto_tb(ctx
, which
++, ctx
->iaoq_n
, ctx
->iaoq_n
+ 4);
1460 if (!n
&& ctx
->null_lab
) {
1461 gen_set_label(ctx
->null_lab
);
1462 ctx
->null_lab
= NULL
;
1464 nullify_set(ctx
, n
);
1465 gen_goto_tb(ctx
, which
++, ctx
->iaoq_b
, ctx
->iaoq_n
);
1468 gen_set_label(taken
);
1470 /* Taken: Condition satisfied; nullify on forward branches. */
1471 n
= is_n
&& disp
>= 0;
1472 if (n
&& use_nullify_skip(ctx
)) {
1473 nullify_set(ctx
, 0);
1474 gen_goto_tb(ctx
, which
++, dest
, dest
+ 4);
1476 nullify_set(ctx
, n
);
1477 gen_goto_tb(ctx
, which
++, ctx
->iaoq_b
, dest
);
1480 /* Not taken: the branch itself was nullified. */
1481 if (ctx
->null_lab
) {
1482 gen_set_label(ctx
->null_lab
);
1483 ctx
->null_lab
= NULL
;
1485 nullify_set(ctx
, 0);
1486 gen_goto_tb(ctx
, which
, ctx
->iaoq_b
, ctx
->iaoq_n
);
1487 return EXIT_GOTO_TB
;
1489 return EXIT_IAQ_N_STALE
;
1492 return EXIT_GOTO_TB
;
1496 /* Emit an unconditional branch to an indirect target. This handles
1497 nullification of the branch itself. */
1498 static ExitStatus
do_ibranch(DisasContext
*ctx
, TCGv dest
,
1499 unsigned link
, bool is_n
)
1501 TCGv a0
, a1
, next
, tmp
;
1504 assert(ctx
->null_lab
== NULL
);
1506 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
1508 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1510 next
= get_temp(ctx
);
1511 tcg_gen_mov_tl(next
, dest
);
1513 ctx
->iaoq_n_var
= next
;
1515 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1517 } else if (is_n
&& use_nullify_skip(ctx
)) {
1518 /* The (conditional) branch, B, nullifies the next insn, N,
1519 and we're allowed to skip execution N (no single-step or
1520 tracepoint in effect). Since the exit_tb that we must use
1521 for the indirect branch consumes no special resources, we
1522 can (conditionally) skip B and continue execution. */
1523 /* The use_nullify_skip test implies we have a known control path. */
1524 tcg_debug_assert(ctx
->iaoq_b
!= -1);
1525 tcg_debug_assert(ctx
->iaoq_n
!= -1);
1527 /* We do have to handle the non-local temporary, DEST, before
1528 branching. Since IOAQ_F is not really live at this point, we
1529 can simply store DEST optimistically. Similarly with IAOQ_B. */
1530 tcg_gen_mov_tl(cpu_iaoq_f
, dest
);
1531 tcg_gen_addi_tl(cpu_iaoq_b
, dest
, 4);
1535 tcg_gen_movi_tl(cpu_gr
[link
], ctx
->iaoq_n
);
1538 return nullify_end(ctx
, NO_EXIT
);
1540 cond_prep(&ctx
->null_cond
);
1541 c
= ctx
->null_cond
.c
;
1542 a0
= ctx
->null_cond
.a0
;
1543 a1
= ctx
->null_cond
.a1
;
1545 tmp
= tcg_temp_new();
1546 next
= get_temp(ctx
);
1548 copy_iaoq_entry(tmp
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1549 tcg_gen_movcond_tl(c
, next
, a0
, a1
, tmp
, dest
);
1551 ctx
->iaoq_n_var
= next
;
1554 tcg_gen_movcond_tl(c
, cpu_gr
[link
], a0
, a1
, cpu_gr
[link
], tmp
);
1558 /* The branch nullifies the next insn, which means the state of N
1559 after the branch is the inverse of the state of N that applied
1561 tcg_gen_setcond_tl(tcg_invert_cond(c
), cpu_psw_n
, a0
, a1
);
1562 cond_free(&ctx
->null_cond
);
1563 ctx
->null_cond
= cond_make_n();
1564 ctx
->psw_n_nonzero
= true;
1566 cond_free(&ctx
->null_cond
);
1573 /* On Linux, page zero is normally marked execute only + gateway.
1574 Therefore normal read or write is supposed to fail, but specific
1575 offsets have kernel code mapped to raise permissions to implement
1576 system calls. Handling this via an explicit check here, rather
1577 in than the "be disp(sr2,r0)" instruction that probably sent us
1578 here, is the easiest way to handle the branch delay slot on the
1579 aforementioned BE. */
1580 static ExitStatus
do_page_zero(DisasContext
*ctx
)
1582 /* If by some means we get here with PSW[N]=1, that implies that
1583 the B,GATE instruction would be skipped, and we'd fault on the
1584 next insn within the privilaged page. */
1585 switch (ctx
->null_cond
.c
) {
1586 case TCG_COND_NEVER
:
1588 case TCG_COND_ALWAYS
:
1589 tcg_gen_movi_tl(cpu_psw_n
, 0);
1592 /* Since this is always the first (and only) insn within the
1593 TB, we should know the state of PSW[N] from TB->FLAGS. */
1594 g_assert_not_reached();
1597 /* Check that we didn't arrive here via some means that allowed
1598 non-sequential instruction execution. Normally the PSW[B] bit
1599 detects this by disallowing the B,GATE instruction to execute
1600 under such conditions. */
1601 if (ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
1605 switch (ctx
->iaoq_f
) {
1606 case 0x00: /* Null pointer call */
1607 gen_excp_1(EXCP_SIGSEGV
);
1608 return EXIT_NORETURN
;
1610 case 0xb0: /* LWS */
1611 gen_excp_1(EXCP_SYSCALL_LWS
);
1612 return EXIT_NORETURN
;
1614 case 0xe0: /* SET_THREAD_POINTER */
1615 tcg_gen_mov_tl(cpu_cr27
, cpu_gr
[26]);
1616 tcg_gen_mov_tl(cpu_iaoq_f
, cpu_gr
[31]);
1617 tcg_gen_addi_tl(cpu_iaoq_b
, cpu_iaoq_f
, 4);
1618 return EXIT_IAQ_N_UPDATED
;
1620 case 0x100: /* SYSCALL */
1621 gen_excp_1(EXCP_SYSCALL
);
1622 return EXIT_NORETURN
;
1626 gen_excp_1(EXCP_SIGILL
);
1627 return EXIT_NORETURN
;
1631 static ExitStatus
trans_nop(DisasContext
*ctx
, uint32_t insn
,
1632 const DisasInsn
*di
)
1634 cond_free(&ctx
->null_cond
);
1638 static ExitStatus
trans_break(DisasContext
*ctx
, uint32_t insn
,
1639 const DisasInsn
*di
)
1642 return nullify_end(ctx
, gen_excp(ctx
, EXCP_DEBUG
));
1645 static ExitStatus
trans_sync(DisasContext
*ctx
, uint32_t insn
,
1646 const DisasInsn
*di
)
1648 /* No point in nullifying the memory barrier. */
1649 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
1651 cond_free(&ctx
->null_cond
);
1655 static ExitStatus
trans_mfia(DisasContext
*ctx
, uint32_t insn
,
1656 const DisasInsn
*di
)
1658 unsigned rt
= extract32(insn
, 0, 5);
1659 TCGv tmp
= dest_gpr(ctx
, rt
);
1660 tcg_gen_movi_tl(tmp
, ctx
->iaoq_f
);
1661 save_gpr(ctx
, rt
, tmp
);
1663 cond_free(&ctx
->null_cond
);
1667 static ExitStatus
trans_mfsp(DisasContext
*ctx
, uint32_t insn
,
1668 const DisasInsn
*di
)
1670 unsigned rt
= extract32(insn
, 0, 5);
1671 TCGv tmp
= dest_gpr(ctx
, rt
);
1673 /* ??? We don't implement space registers. */
1674 tcg_gen_movi_tl(tmp
, 0);
1675 save_gpr(ctx
, rt
, tmp
);
1677 cond_free(&ctx
->null_cond
);
1681 static ExitStatus
trans_mfctl(DisasContext
*ctx
, uint32_t insn
,
1682 const DisasInsn
*di
)
1684 unsigned rt
= extract32(insn
, 0, 5);
1685 unsigned ctl
= extract32(insn
, 21, 5);
1690 #ifdef TARGET_HPPA64
1691 if (extract32(insn
, 14, 1) == 0) {
1692 /* MFSAR without ,W masks low 5 bits. */
1693 tmp
= dest_gpr(ctx
, rt
);
1694 tcg_gen_andi_tl(tmp
, cpu_sar
, 31);
1695 save_gpr(ctx
, rt
, tmp
);
1699 save_gpr(ctx
, rt
, cpu_sar
);
1701 case 16: /* Interval Timer */
1702 tmp
= dest_gpr(ctx
, rt
);
1703 tcg_gen_movi_tl(tmp
, 0); /* FIXME */
1704 save_gpr(ctx
, rt
, tmp
);
1707 save_gpr(ctx
, rt
, cpu_cr26
);
1710 save_gpr(ctx
, rt
, cpu_cr27
);
1713 /* All other control registers are privileged. */
1714 return gen_illegal(ctx
);
1717 cond_free(&ctx
->null_cond
);
1721 static ExitStatus
trans_mtctl(DisasContext
*ctx
, uint32_t insn
,
1722 const DisasInsn
*di
)
1724 unsigned rin
= extract32(insn
, 16, 5);
1725 unsigned ctl
= extract32(insn
, 21, 5);
1728 if (ctl
== 11) { /* SAR */
1729 tmp
= tcg_temp_new();
1730 tcg_gen_andi_tl(tmp
, load_gpr(ctx
, rin
), TARGET_LONG_BITS
- 1);
1731 save_or_nullify(ctx
, cpu_sar
, tmp
);
1734 /* All other control registers are privileged or read-only. */
1735 return gen_illegal(ctx
);
1738 cond_free(&ctx
->null_cond
);
1742 static ExitStatus
trans_mtsarcm(DisasContext
*ctx
, uint32_t insn
,
1743 const DisasInsn
*di
)
1745 unsigned rin
= extract32(insn
, 16, 5);
1746 TCGv tmp
= tcg_temp_new();
1748 tcg_gen_not_tl(tmp
, load_gpr(ctx
, rin
));
1749 tcg_gen_andi_tl(tmp
, tmp
, TARGET_LONG_BITS
- 1);
1750 save_or_nullify(ctx
, cpu_sar
, tmp
);
1753 cond_free(&ctx
->null_cond
);
1757 static ExitStatus
trans_ldsid(DisasContext
*ctx
, uint32_t insn
,
1758 const DisasInsn
*di
)
1760 unsigned rt
= extract32(insn
, 0, 5);
1761 TCGv dest
= dest_gpr(ctx
, rt
);
1763 /* Since we don't implement space registers, this returns zero. */
1764 tcg_gen_movi_tl(dest
, 0);
1765 save_gpr(ctx
, rt
, dest
);
1767 cond_free(&ctx
->null_cond
);
1771 static const DisasInsn table_system
[] = {
1772 { 0x00000000u
, 0xfc001fe0u
, trans_break
},
1773 /* We don't implement space register, so MTSP is a nop. */
1774 { 0x00001820u
, 0xffe01fffu
, trans_nop
},
1775 { 0x00001840u
, 0xfc00ffffu
, trans_mtctl
},
1776 { 0x016018c0u
, 0xffe0ffffu
, trans_mtsarcm
},
1777 { 0x000014a0u
, 0xffffffe0u
, trans_mfia
},
1778 { 0x000004a0u
, 0xffff1fe0u
, trans_mfsp
},
1779 { 0x000008a0u
, 0xfc1fffe0u
, trans_mfctl
},
1780 { 0x00000400u
, 0xffffffffu
, trans_sync
},
1781 { 0x000010a0u
, 0xfc1f3fe0u
, trans_ldsid
},
1784 static ExitStatus
trans_base_idx_mod(DisasContext
*ctx
, uint32_t insn
,
1785 const DisasInsn
*di
)
1787 unsigned rb
= extract32(insn
, 21, 5);
1788 unsigned rx
= extract32(insn
, 16, 5);
1789 TCGv dest
= dest_gpr(ctx
, rb
);
1790 TCGv src1
= load_gpr(ctx
, rb
);
1791 TCGv src2
= load_gpr(ctx
, rx
);
1793 /* The only thing we need to do is the base register modification. */
1794 tcg_gen_add_tl(dest
, src1
, src2
);
1795 save_gpr(ctx
, rb
, dest
);
1797 cond_free(&ctx
->null_cond
);
1801 static ExitStatus
trans_probe(DisasContext
*ctx
, uint32_t insn
,
1802 const DisasInsn
*di
)
1804 unsigned rt
= extract32(insn
, 0, 5);
1805 unsigned rb
= extract32(insn
, 21, 5);
1806 unsigned is_write
= extract32(insn
, 6, 1);
1811 /* ??? Do something with priv level operand. */
1812 dest
= dest_gpr(ctx
, rt
);
1814 gen_helper_probe_w(dest
, load_gpr(ctx
, rb
));
1816 gen_helper_probe_r(dest
, load_gpr(ctx
, rb
));
1818 save_gpr(ctx
, rt
, dest
);
1819 return nullify_end(ctx
, NO_EXIT
);
1822 static const DisasInsn table_mem_mgmt
[] = {
1823 { 0x04003280u
, 0xfc003fffu
, trans_nop
}, /* fdc, disp */
1824 { 0x04001280u
, 0xfc003fffu
, trans_nop
}, /* fdc, index */
1825 { 0x040012a0u
, 0xfc003fffu
, trans_base_idx_mod
}, /* fdc, index, base mod */
1826 { 0x040012c0u
, 0xfc003fffu
, trans_nop
}, /* fdce */
1827 { 0x040012e0u
, 0xfc003fffu
, trans_base_idx_mod
}, /* fdce, base mod */
1828 { 0x04000280u
, 0xfc001fffu
, trans_nop
}, /* fic 0a */
1829 { 0x040002a0u
, 0xfc001fffu
, trans_base_idx_mod
}, /* fic 0a, base mod */
1830 { 0x040013c0u
, 0xfc003fffu
, trans_nop
}, /* fic 4f */
1831 { 0x040013e0u
, 0xfc003fffu
, trans_base_idx_mod
}, /* fic 4f, base mod */
1832 { 0x040002c0u
, 0xfc001fffu
, trans_nop
}, /* fice */
1833 { 0x040002e0u
, 0xfc001fffu
, trans_base_idx_mod
}, /* fice, base mod */
1834 { 0x04002700u
, 0xfc003fffu
, trans_nop
}, /* pdc */
1835 { 0x04002720u
, 0xfc003fffu
, trans_base_idx_mod
}, /* pdc, base mod */
1836 { 0x04001180u
, 0xfc003fa0u
, trans_probe
}, /* probe */
1837 { 0x04003180u
, 0xfc003fa0u
, trans_probe
}, /* probei */
1840 static ExitStatus
trans_add(DisasContext
*ctx
, uint32_t insn
,
1841 const DisasInsn
*di
)
1843 unsigned r2
= extract32(insn
, 21, 5);
1844 unsigned r1
= extract32(insn
, 16, 5);
1845 unsigned cf
= extract32(insn
, 12, 4);
1846 unsigned ext
= extract32(insn
, 8, 4);
1847 unsigned shift
= extract32(insn
, 6, 2);
1848 unsigned rt
= extract32(insn
, 0, 5);
1849 TCGv tcg_r1
, tcg_r2
;
1853 bool is_tsv
= false;
1857 case 0x6: /* ADD, SHLADD */
1859 case 0xa: /* ADD,L, SHLADD,L */
1862 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1865 case 0x7: /* ADD,C */
1868 case 0xf: /* ADD,C,TSV */
1869 is_c
= is_tsv
= true;
1872 return gen_illegal(ctx
);
1878 tcg_r1
= load_gpr(ctx
, r1
);
1879 tcg_r2
= load_gpr(ctx
, r2
);
1880 ret
= do_add(ctx
, rt
, tcg_r1
, tcg_r2
, shift
, is_l
, is_tsv
, is_tc
, is_c
, cf
);
1881 return nullify_end(ctx
, ret
);
1884 static ExitStatus
trans_sub(DisasContext
*ctx
, uint32_t insn
,
1885 const DisasInsn
*di
)
1887 unsigned r2
= extract32(insn
, 21, 5);
1888 unsigned r1
= extract32(insn
, 16, 5);
1889 unsigned cf
= extract32(insn
, 12, 4);
1890 unsigned ext
= extract32(insn
, 6, 6);
1891 unsigned rt
= extract32(insn
, 0, 5);
1892 TCGv tcg_r1
, tcg_r2
;
1895 bool is_tsv
= false;
1899 case 0x10: /* SUB */
1901 case 0x30: /* SUB,TSV */
1904 case 0x14: /* SUB,B */
1907 case 0x34: /* SUB,B,TSV */
1908 is_b
= is_tsv
= true;
1910 case 0x13: /* SUB,TC */
1913 case 0x33: /* SUB,TSV,TC */
1914 is_tc
= is_tsv
= true;
1917 return gen_illegal(ctx
);
1923 tcg_r1
= load_gpr(ctx
, r1
);
1924 tcg_r2
= load_gpr(ctx
, r2
);
1925 ret
= do_sub(ctx
, rt
, tcg_r1
, tcg_r2
, is_tsv
, is_b
, is_tc
, cf
);
1926 return nullify_end(ctx
, ret
);
1929 static ExitStatus
trans_log(DisasContext
*ctx
, uint32_t insn
,
1930 const DisasInsn
*di
)
1932 unsigned r2
= extract32(insn
, 21, 5);
1933 unsigned r1
= extract32(insn
, 16, 5);
1934 unsigned cf
= extract32(insn
, 12, 4);
1935 unsigned rt
= extract32(insn
, 0, 5);
1936 TCGv tcg_r1
, tcg_r2
;
1942 tcg_r1
= load_gpr(ctx
, r1
);
1943 tcg_r2
= load_gpr(ctx
, r2
);
1944 ret
= do_log(ctx
, rt
, tcg_r1
, tcg_r2
, cf
, di
->f_ttt
);
1945 return nullify_end(ctx
, ret
);
1948 /* OR r,0,t -> COPY (according to gas) */
1949 static ExitStatus
trans_copy(DisasContext
*ctx
, uint32_t insn
,
1950 const DisasInsn
*di
)
1952 unsigned r1
= extract32(insn
, 16, 5);
1953 unsigned rt
= extract32(insn
, 0, 5);
1956 TCGv dest
= dest_gpr(ctx
, rt
);
1957 tcg_gen_movi_tl(dest
, 0);
1958 save_gpr(ctx
, rt
, dest
);
1960 save_gpr(ctx
, rt
, cpu_gr
[r1
]);
1962 cond_free(&ctx
->null_cond
);
1966 static ExitStatus
trans_cmpclr(DisasContext
*ctx
, uint32_t insn
,
1967 const DisasInsn
*di
)
1969 unsigned r2
= extract32(insn
, 21, 5);
1970 unsigned r1
= extract32(insn
, 16, 5);
1971 unsigned cf
= extract32(insn
, 12, 4);
1972 unsigned rt
= extract32(insn
, 0, 5);
1973 TCGv tcg_r1
, tcg_r2
;
1979 tcg_r1
= load_gpr(ctx
, r1
);
1980 tcg_r2
= load_gpr(ctx
, r2
);
1981 ret
= do_cmpclr(ctx
, rt
, tcg_r1
, tcg_r2
, cf
);
1982 return nullify_end(ctx
, ret
);
1985 static ExitStatus
trans_uxor(DisasContext
*ctx
, uint32_t insn
,
1986 const DisasInsn
*di
)
1988 unsigned r2
= extract32(insn
, 21, 5);
1989 unsigned r1
= extract32(insn
, 16, 5);
1990 unsigned cf
= extract32(insn
, 12, 4);
1991 unsigned rt
= extract32(insn
, 0, 5);
1992 TCGv tcg_r1
, tcg_r2
;
1998 tcg_r1
= load_gpr(ctx
, r1
);
1999 tcg_r2
= load_gpr(ctx
, r2
);
2000 ret
= do_unit(ctx
, rt
, tcg_r1
, tcg_r2
, cf
, false, tcg_gen_xor_tl
);
2001 return nullify_end(ctx
, ret
);
2004 static ExitStatus
trans_uaddcm(DisasContext
*ctx
, uint32_t insn
,
2005 const DisasInsn
*di
)
2007 unsigned r2
= extract32(insn
, 21, 5);
2008 unsigned r1
= extract32(insn
, 16, 5);
2009 unsigned cf
= extract32(insn
, 12, 4);
2010 unsigned is_tc
= extract32(insn
, 6, 1);
2011 unsigned rt
= extract32(insn
, 0, 5);
2012 TCGv tcg_r1
, tcg_r2
, tmp
;
2018 tcg_r1
= load_gpr(ctx
, r1
);
2019 tcg_r2
= load_gpr(ctx
, r2
);
2020 tmp
= get_temp(ctx
);
2021 tcg_gen_not_tl(tmp
, tcg_r2
);
2022 ret
= do_unit(ctx
, rt
, tcg_r1
, tmp
, cf
, is_tc
, tcg_gen_add_tl
);
2023 return nullify_end(ctx
, ret
);
2026 static ExitStatus
trans_dcor(DisasContext
*ctx
, uint32_t insn
,
2027 const DisasInsn
*di
)
2029 unsigned r2
= extract32(insn
, 21, 5);
2030 unsigned cf
= extract32(insn
, 12, 4);
2031 unsigned is_i
= extract32(insn
, 6, 1);
2032 unsigned rt
= extract32(insn
, 0, 5);
2038 tmp
= get_temp(ctx
);
2039 tcg_gen_shri_tl(tmp
, cpu_psw_cb
, 3);
2041 tcg_gen_not_tl(tmp
, tmp
);
2043 tcg_gen_andi_tl(tmp
, tmp
, 0x11111111);
2044 tcg_gen_muli_tl(tmp
, tmp
, 6);
2045 ret
= do_unit(ctx
, rt
, tmp
, load_gpr(ctx
, r2
), cf
, false,
2046 is_i
? tcg_gen_add_tl
: tcg_gen_sub_tl
);
2048 return nullify_end(ctx
, ret
);
2051 static ExitStatus
trans_ds(DisasContext
*ctx
, uint32_t insn
,
2052 const DisasInsn
*di
)
2054 unsigned r2
= extract32(insn
, 21, 5);
2055 unsigned r1
= extract32(insn
, 16, 5);
2056 unsigned cf
= extract32(insn
, 12, 4);
2057 unsigned rt
= extract32(insn
, 0, 5);
2058 TCGv dest
, add1
, add2
, addc
, zero
, in1
, in2
;
2062 in1
= load_gpr(ctx
, r1
);
2063 in2
= load_gpr(ctx
, r2
);
2065 add1
= tcg_temp_new();
2066 add2
= tcg_temp_new();
2067 addc
= tcg_temp_new();
2068 dest
= tcg_temp_new();
2069 zero
= tcg_const_tl(0);
2071 /* Form R1 << 1 | PSW[CB]{8}. */
2072 tcg_gen_add_tl(add1
, in1
, in1
);
2073 tcg_gen_add_tl(add1
, add1
, cpu_psw_cb_msb
);
2075 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2076 carry{8} requires that we subtract via + ~R2 + 1, as described in
2077 the manual. By extracting and masking V, we can produce the
2078 proper inputs to the addition without movcond. */
2079 tcg_gen_sari_tl(addc
, cpu_psw_v
, TARGET_LONG_BITS
- 1);
2080 tcg_gen_xor_tl(add2
, in2
, addc
);
2081 tcg_gen_andi_tl(addc
, addc
, 1);
2082 /* ??? This is only correct for 32-bit. */
2083 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, add1
, zero
, add2
, zero
);
2084 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, dest
, cpu_psw_cb_msb
, addc
, zero
);
2086 tcg_temp_free(addc
);
2087 tcg_temp_free(zero
);
2089 /* Write back the result register. */
2090 save_gpr(ctx
, rt
, dest
);
2092 /* Write back PSW[CB]. */
2093 tcg_gen_xor_tl(cpu_psw_cb
, add1
, add2
);
2094 tcg_gen_xor_tl(cpu_psw_cb
, cpu_psw_cb
, dest
);
2096 /* Write back PSW[V] for the division step. */
2097 tcg_gen_neg_tl(cpu_psw_v
, cpu_psw_cb_msb
);
2098 tcg_gen_xor_tl(cpu_psw_v
, cpu_psw_v
, in2
);
2100 /* Install the new nullification. */
2105 /* ??? The lshift is supposed to contribute to overflow. */
2106 sv
= do_add_sv(ctx
, dest
, add1
, add2
);
2108 ctx
->null_cond
= do_cond(cf
, dest
, cpu_psw_cb_msb
, sv
);
2111 tcg_temp_free(add1
);
2112 tcg_temp_free(add2
);
2113 tcg_temp_free(dest
);
2115 return nullify_end(ctx
, NO_EXIT
);
2118 static const DisasInsn table_arith_log
[] = {
2119 { 0x08000240u
, 0xfc00ffffu
, trans_nop
}, /* or x,y,0 */
2120 { 0x08000240u
, 0xffe0ffe0u
, trans_copy
}, /* or x,0,t */
2121 { 0x08000000u
, 0xfc000fe0u
, trans_log
, .f_ttt
= tcg_gen_andc_tl
},
2122 { 0x08000200u
, 0xfc000fe0u
, trans_log
, .f_ttt
= tcg_gen_and_tl
},
2123 { 0x08000240u
, 0xfc000fe0u
, trans_log
, .f_ttt
= tcg_gen_or_tl
},
2124 { 0x08000280u
, 0xfc000fe0u
, trans_log
, .f_ttt
= tcg_gen_xor_tl
},
2125 { 0x08000880u
, 0xfc000fe0u
, trans_cmpclr
},
2126 { 0x08000380u
, 0xfc000fe0u
, trans_uxor
},
2127 { 0x08000980u
, 0xfc000fa0u
, trans_uaddcm
},
2128 { 0x08000b80u
, 0xfc1f0fa0u
, trans_dcor
},
2129 { 0x08000440u
, 0xfc000fe0u
, trans_ds
},
2130 { 0x08000700u
, 0xfc0007e0u
, trans_add
}, /* add */
2131 { 0x08000400u
, 0xfc0006e0u
, trans_sub
}, /* sub; sub,b; sub,tsv */
2132 { 0x080004c0u
, 0xfc0007e0u
, trans_sub
}, /* sub,tc; sub,tsv,tc */
2133 { 0x08000200u
, 0xfc000320u
, trans_add
}, /* shladd */
2136 static ExitStatus
trans_addi(DisasContext
*ctx
, uint32_t insn
)
2138 target_long im
= low_sextract(insn
, 0, 11);
2139 unsigned e1
= extract32(insn
, 11, 1);
2140 unsigned cf
= extract32(insn
, 12, 4);
2141 unsigned rt
= extract32(insn
, 16, 5);
2142 unsigned r2
= extract32(insn
, 21, 5);
2143 unsigned o1
= extract32(insn
, 26, 1);
2144 TCGv tcg_im
, tcg_r2
;
2151 tcg_im
= load_const(ctx
, im
);
2152 tcg_r2
= load_gpr(ctx
, r2
);
2153 ret
= do_add(ctx
, rt
, tcg_im
, tcg_r2
, 0, false, e1
, !o1
, false, cf
);
2155 return nullify_end(ctx
, ret
);
2158 static ExitStatus
trans_subi(DisasContext
*ctx
, uint32_t insn
)
2160 target_long im
= low_sextract(insn
, 0, 11);
2161 unsigned e1
= extract32(insn
, 11, 1);
2162 unsigned cf
= extract32(insn
, 12, 4);
2163 unsigned rt
= extract32(insn
, 16, 5);
2164 unsigned r2
= extract32(insn
, 21, 5);
2165 TCGv tcg_im
, tcg_r2
;
2172 tcg_im
= load_const(ctx
, im
);
2173 tcg_r2
= load_gpr(ctx
, r2
);
2174 ret
= do_sub(ctx
, rt
, tcg_im
, tcg_r2
, e1
, false, false, cf
);
2176 return nullify_end(ctx
, ret
);
2179 static ExitStatus
trans_cmpiclr(DisasContext
*ctx
, uint32_t insn
)
2181 target_long im
= low_sextract(insn
, 0, 11);
2182 unsigned cf
= extract32(insn
, 12, 4);
2183 unsigned rt
= extract32(insn
, 16, 5);
2184 unsigned r2
= extract32(insn
, 21, 5);
2185 TCGv tcg_im
, tcg_r2
;
2192 tcg_im
= load_const(ctx
, im
);
2193 tcg_r2
= load_gpr(ctx
, r2
);
2194 ret
= do_cmpclr(ctx
, rt
, tcg_im
, tcg_r2
, cf
);
2196 return nullify_end(ctx
, ret
);
2199 static ExitStatus
trans_ld_idx_i(DisasContext
*ctx
, uint32_t insn
,
2200 const DisasInsn
*di
)
2202 unsigned rt
= extract32(insn
, 0, 5);
2203 unsigned m
= extract32(insn
, 5, 1);
2204 unsigned sz
= extract32(insn
, 6, 2);
2205 unsigned a
= extract32(insn
, 13, 1);
2206 int disp
= low_sextract(insn
, 16, 5);
2207 unsigned rb
= extract32(insn
, 21, 5);
2208 int modify
= (m
? (a
? -1 : 1) : 0);
2209 TCGMemOp mop
= MO_TE
| sz
;
2211 return do_load(ctx
, rt
, rb
, 0, 0, disp
, modify
, mop
);
2214 static ExitStatus
trans_ld_idx_x(DisasContext
*ctx
, uint32_t insn
,
2215 const DisasInsn
*di
)
2217 unsigned rt
= extract32(insn
, 0, 5);
2218 unsigned m
= extract32(insn
, 5, 1);
2219 unsigned sz
= extract32(insn
, 6, 2);
2220 unsigned u
= extract32(insn
, 13, 1);
2221 unsigned rx
= extract32(insn
, 16, 5);
2222 unsigned rb
= extract32(insn
, 21, 5);
2223 TCGMemOp mop
= MO_TE
| sz
;
2225 return do_load(ctx
, rt
, rb
, rx
, u
? sz
: 0, 0, m
, mop
);
2228 static ExitStatus
trans_st_idx_i(DisasContext
*ctx
, uint32_t insn
,
2229 const DisasInsn
*di
)
2231 int disp
= low_sextract(insn
, 0, 5);
2232 unsigned m
= extract32(insn
, 5, 1);
2233 unsigned sz
= extract32(insn
, 6, 2);
2234 unsigned a
= extract32(insn
, 13, 1);
2235 unsigned rr
= extract32(insn
, 16, 5);
2236 unsigned rb
= extract32(insn
, 21, 5);
2237 int modify
= (m
? (a
? -1 : 1) : 0);
2238 TCGMemOp mop
= MO_TE
| sz
;
2240 return do_store(ctx
, rr
, rb
, disp
, modify
, mop
);
2243 static ExitStatus
trans_ldcw(DisasContext
*ctx
, uint32_t insn
,
2244 const DisasInsn
*di
)
2246 unsigned rt
= extract32(insn
, 0, 5);
2247 unsigned m
= extract32(insn
, 5, 1);
2248 unsigned i
= extract32(insn
, 12, 1);
2249 unsigned au
= extract32(insn
, 13, 1);
2250 unsigned rx
= extract32(insn
, 16, 5);
2251 unsigned rb
= extract32(insn
, 21, 5);
2252 TCGMemOp mop
= MO_TEUL
| MO_ALIGN_16
;
2253 TCGv zero
, addr
, base
, dest
;
2254 int modify
, disp
= 0, scale
= 0;
2258 /* ??? Share more code with do_load and do_load_{32,64}. */
2261 modify
= (m
? (au
? -1 : 1) : 0);
2262 disp
= low_sextract(rx
, 0, 5);
2267 scale
= mop
& MO_SIZE
;
2271 /* Base register modification. Make sure if RT == RB, we see
2272 the result of the load. */
2273 dest
= get_temp(ctx
);
2275 dest
= dest_gpr(ctx
, rt
);
2278 addr
= tcg_temp_new();
2279 base
= load_gpr(ctx
, rb
);
2281 tcg_gen_shli_tl(addr
, cpu_gr
[rx
], scale
);
2282 tcg_gen_add_tl(addr
, addr
, base
);
2284 tcg_gen_addi_tl(addr
, base
, disp
);
2287 zero
= tcg_const_tl(0);
2288 tcg_gen_atomic_xchg_tl(dest
, (modify
<= 0 ? addr
: base
),
2289 zero
, MMU_USER_IDX
, mop
);
2291 save_gpr(ctx
, rb
, addr
);
2293 save_gpr(ctx
, rt
, dest
);
2295 return nullify_end(ctx
, NO_EXIT
);
2298 static ExitStatus
trans_stby(DisasContext
*ctx
, uint32_t insn
,
2299 const DisasInsn
*di
)
2301 target_long disp
= low_sextract(insn
, 0, 5);
2302 unsigned m
= extract32(insn
, 5, 1);
2303 unsigned a
= extract32(insn
, 13, 1);
2304 unsigned rt
= extract32(insn
, 16, 5);
2305 unsigned rb
= extract32(insn
, 21, 5);
2310 addr
= tcg_temp_new();
2311 if (m
|| disp
== 0) {
2312 tcg_gen_mov_tl(addr
, load_gpr(ctx
, rb
));
2314 tcg_gen_addi_tl(addr
, load_gpr(ctx
, rb
), disp
);
2316 val
= load_gpr(ctx
, rt
);
2319 gen_helper_stby_e(cpu_env
, addr
, val
);
2321 gen_helper_stby_b(cpu_env
, addr
, val
);
2325 tcg_gen_addi_tl(addr
, addr
, disp
);
2326 tcg_gen_andi_tl(addr
, addr
, ~3);
2327 save_gpr(ctx
, rb
, addr
);
2329 tcg_temp_free(addr
);
2331 return nullify_end(ctx
, NO_EXIT
);
2334 static const DisasInsn table_index_mem
[] = {
2335 { 0x0c001000u
, 0xfc001300, trans_ld_idx_i
}, /* LD[BHWD], im */
2336 { 0x0c000000u
, 0xfc001300, trans_ld_idx_x
}, /* LD[BHWD], rx */
2337 { 0x0c001200u
, 0xfc001300, trans_st_idx_i
}, /* ST[BHWD] */
2338 { 0x0c0001c0u
, 0xfc0003c0, trans_ldcw
},
2339 { 0x0c001300u
, 0xfc0013c0, trans_stby
},
2342 static ExitStatus
trans_ldil(DisasContext
*ctx
, uint32_t insn
)
2344 unsigned rt
= extract32(insn
, 21, 5);
2345 target_long i
= assemble_21(insn
);
2346 TCGv tcg_rt
= dest_gpr(ctx
, rt
);
2348 tcg_gen_movi_tl(tcg_rt
, i
);
2349 save_gpr(ctx
, rt
, tcg_rt
);
2350 cond_free(&ctx
->null_cond
);
2355 static ExitStatus
trans_addil(DisasContext
*ctx
, uint32_t insn
)
2357 unsigned rt
= extract32(insn
, 21, 5);
2358 target_long i
= assemble_21(insn
);
2359 TCGv tcg_rt
= load_gpr(ctx
, rt
);
2360 TCGv tcg_r1
= dest_gpr(ctx
, 1);
2362 tcg_gen_addi_tl(tcg_r1
, tcg_rt
, i
);
2363 save_gpr(ctx
, 1, tcg_r1
);
2364 cond_free(&ctx
->null_cond
);
2369 static ExitStatus
trans_ldo(DisasContext
*ctx
, uint32_t insn
)
2371 unsigned rb
= extract32(insn
, 21, 5);
2372 unsigned rt
= extract32(insn
, 16, 5);
2373 target_long i
= assemble_16(insn
);
2374 TCGv tcg_rt
= dest_gpr(ctx
, rt
);
2376 /* Special case rb == 0, for the LDI pseudo-op.
2377 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2379 tcg_gen_movi_tl(tcg_rt
, i
);
2381 tcg_gen_addi_tl(tcg_rt
, cpu_gr
[rb
], i
);
2383 save_gpr(ctx
, rt
, tcg_rt
);
2384 cond_free(&ctx
->null_cond
);
2389 static ExitStatus
trans_load(DisasContext
*ctx
, uint32_t insn
,
2390 bool is_mod
, TCGMemOp mop
)
2392 unsigned rb
= extract32(insn
, 21, 5);
2393 unsigned rt
= extract32(insn
, 16, 5);
2394 target_long i
= assemble_16(insn
);
2396 return do_load(ctx
, rt
, rb
, 0, 0, i
, is_mod
? (i
< 0 ? -1 : 1) : 0, mop
);
2399 static ExitStatus
trans_load_w(DisasContext
*ctx
, uint32_t insn
)
2401 unsigned rb
= extract32(insn
, 21, 5);
2402 unsigned rt
= extract32(insn
, 16, 5);
2403 target_long i
= assemble_16a(insn
);
2404 unsigned ext2
= extract32(insn
, 1, 2);
2409 /* FLDW without modification. */
2410 return do_floadw(ctx
, ext2
* 32 + rt
, rb
, 0, 0, i
, 0);
2412 /* LDW with modification. Note that the sign of I selects
2413 post-dec vs pre-inc. */
2414 return do_load(ctx
, rt
, rb
, 0, 0, i
, (i
< 0 ? 1 : -1), MO_TEUL
);
2416 return gen_illegal(ctx
);
2420 static ExitStatus
trans_fload_mod(DisasContext
*ctx
, uint32_t insn
)
2422 target_long i
= assemble_16a(insn
);
2423 unsigned t1
= extract32(insn
, 1, 1);
2424 unsigned a
= extract32(insn
, 2, 1);
2425 unsigned t0
= extract32(insn
, 16, 5);
2426 unsigned rb
= extract32(insn
, 21, 5);
2428 /* FLDW with modification. */
2429 return do_floadw(ctx
, t1
* 32 + t0
, rb
, 0, 0, i
, (a
? -1 : 1));
2432 static ExitStatus
trans_store(DisasContext
*ctx
, uint32_t insn
,
2433 bool is_mod
, TCGMemOp mop
)
2435 unsigned rb
= extract32(insn
, 21, 5);
2436 unsigned rt
= extract32(insn
, 16, 5);
2437 target_long i
= assemble_16(insn
);
2439 return do_store(ctx
, rt
, rb
, i
, is_mod
? (i
< 0 ? -1 : 1) : 0, mop
);
2442 static ExitStatus
trans_store_w(DisasContext
*ctx
, uint32_t insn
)
2444 unsigned rb
= extract32(insn
, 21, 5);
2445 unsigned rt
= extract32(insn
, 16, 5);
2446 target_long i
= assemble_16a(insn
);
2447 unsigned ext2
= extract32(insn
, 1, 2);
2452 /* FSTW without modification. */
2453 return do_fstorew(ctx
, ext2
* 32 + rt
, rb
, 0, 0, i
, 0);
2455 /* LDW with modification. */
2456 return do_store(ctx
, rt
, rb
, i
, (i
< 0 ? 1 : -1), MO_TEUL
);
2458 return gen_illegal(ctx
);
2462 static ExitStatus
trans_fstore_mod(DisasContext
*ctx
, uint32_t insn
)
2464 target_long i
= assemble_16a(insn
);
2465 unsigned t1
= extract32(insn
, 1, 1);
2466 unsigned a
= extract32(insn
, 2, 1);
2467 unsigned t0
= extract32(insn
, 16, 5);
2468 unsigned rb
= extract32(insn
, 21, 5);
2470 /* FSTW with modification. */
2471 return do_fstorew(ctx
, t1
* 32 + t0
, rb
, 0, 0, i
, (a
? -1 : 1));
2474 static ExitStatus
trans_copr_w(DisasContext
*ctx
, uint32_t insn
)
2476 unsigned t0
= extract32(insn
, 0, 5);
2477 unsigned m
= extract32(insn
, 5, 1);
2478 unsigned t1
= extract32(insn
, 6, 1);
2479 unsigned ext3
= extract32(insn
, 7, 3);
2480 /* unsigned cc = extract32(insn, 10, 2); */
2481 unsigned i
= extract32(insn
, 12, 1);
2482 unsigned ua
= extract32(insn
, 13, 1);
2483 unsigned rx
= extract32(insn
, 16, 5);
2484 unsigned rb
= extract32(insn
, 21, 5);
2485 unsigned rt
= t1
* 32 + t0
;
2486 int modify
= (m
? (ua
? -1 : 1) : 0);
2490 scale
= (ua
? 2 : 0);
2494 disp
= low_sextract(rx
, 0, 5);
2497 modify
= (m
? (ua
? -1 : 1) : 0);
2502 return do_floadw(ctx
, rt
, rb
, rx
, scale
, disp
, modify
);
2504 return do_fstorew(ctx
, rt
, rb
, rx
, scale
, disp
, modify
);
2506 return gen_illegal(ctx
);
2509 static ExitStatus
trans_copr_dw(DisasContext
*ctx
, uint32_t insn
)
2511 unsigned rt
= extract32(insn
, 0, 5);
2512 unsigned m
= extract32(insn
, 5, 1);
2513 unsigned ext4
= extract32(insn
, 6, 4);
2514 /* unsigned cc = extract32(insn, 10, 2); */
2515 unsigned i
= extract32(insn
, 12, 1);
2516 unsigned ua
= extract32(insn
, 13, 1);
2517 unsigned rx
= extract32(insn
, 16, 5);
2518 unsigned rb
= extract32(insn
, 21, 5);
2519 int modify
= (m
? (ua
? -1 : 1) : 0);
2523 scale
= (ua
? 3 : 0);
2527 disp
= low_sextract(rx
, 0, 5);
2530 modify
= (m
? (ua
? -1 : 1) : 0);
2535 return do_floadd(ctx
, rt
, rb
, rx
, scale
, disp
, modify
);
2537 return do_fstored(ctx
, rt
, rb
, rx
, scale
, disp
, modify
);
2539 return gen_illegal(ctx
);
2543 static ExitStatus
trans_cmpb(DisasContext
*ctx
, uint32_t insn
,
2544 bool is_true
, bool is_imm
, bool is_dw
)
2546 target_long disp
= assemble_12(insn
) * 4;
2547 unsigned n
= extract32(insn
, 1, 1);
2548 unsigned c
= extract32(insn
, 13, 3);
2549 unsigned r
= extract32(insn
, 21, 5);
2550 unsigned cf
= c
* 2 + !is_true
;
2551 TCGv dest
, in1
, in2
, sv
;
2557 in1
= load_const(ctx
, low_sextract(insn
, 16, 5));
2559 in1
= load_gpr(ctx
, extract32(insn
, 16, 5));
2561 in2
= load_gpr(ctx
, r
);
2562 dest
= get_temp(ctx
);
2564 tcg_gen_sub_tl(dest
, in1
, in2
);
2568 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
2571 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
2572 return do_cbranch(ctx
, disp
, n
, &cond
);
2575 static ExitStatus
trans_addb(DisasContext
*ctx
, uint32_t insn
,
2576 bool is_true
, bool is_imm
)
2578 target_long disp
= assemble_12(insn
) * 4;
2579 unsigned n
= extract32(insn
, 1, 1);
2580 unsigned c
= extract32(insn
, 13, 3);
2581 unsigned r
= extract32(insn
, 21, 5);
2582 unsigned cf
= c
* 2 + !is_true
;
2583 TCGv dest
, in1
, in2
, sv
, cb_msb
;
2589 in1
= load_const(ctx
, low_sextract(insn
, 16, 5));
2591 in1
= load_gpr(ctx
, extract32(insn
, 16, 5));
2593 in2
= load_gpr(ctx
, r
);
2594 dest
= dest_gpr(ctx
, r
);
2596 TCGV_UNUSED(cb_msb
);
2600 tcg_gen_add_tl(dest
, in1
, in2
);
2603 cb_msb
= get_temp(ctx
);
2604 tcg_gen_movi_tl(cb_msb
, 0);
2605 tcg_gen_add2_tl(dest
, cb_msb
, in1
, cb_msb
, in2
, cb_msb
);
2608 tcg_gen_add_tl(dest
, in1
, in2
);
2609 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
2613 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
2614 return do_cbranch(ctx
, disp
, n
, &cond
);
2617 static ExitStatus
trans_bb(DisasContext
*ctx
, uint32_t insn
)
2619 target_long disp
= assemble_12(insn
) * 4;
2620 unsigned n
= extract32(insn
, 1, 1);
2621 unsigned c
= extract32(insn
, 15, 1);
2622 unsigned r
= extract32(insn
, 16, 5);
2623 unsigned p
= extract32(insn
, 21, 5);
2624 unsigned i
= extract32(insn
, 26, 1);
2630 tmp
= tcg_temp_new();
2631 tcg_r
= load_gpr(ctx
, r
);
2633 tcg_gen_shli_tl(tmp
, tcg_r
, p
);
2635 tcg_gen_shl_tl(tmp
, tcg_r
, cpu_sar
);
2638 cond
= cond_make_0(c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
2640 return do_cbranch(ctx
, disp
, n
, &cond
);
2643 static ExitStatus
trans_movb(DisasContext
*ctx
, uint32_t insn
, bool is_imm
)
2645 target_long disp
= assemble_12(insn
) * 4;
2646 unsigned n
= extract32(insn
, 1, 1);
2647 unsigned c
= extract32(insn
, 13, 3);
2648 unsigned t
= extract32(insn
, 16, 5);
2649 unsigned r
= extract32(insn
, 21, 5);
2655 dest
= dest_gpr(ctx
, r
);
2657 tcg_gen_movi_tl(dest
, low_sextract(t
, 0, 5));
2658 } else if (t
== 0) {
2659 tcg_gen_movi_tl(dest
, 0);
2661 tcg_gen_mov_tl(dest
, cpu_gr
[t
]);
2664 cond
= do_sed_cond(c
, dest
);
2665 return do_cbranch(ctx
, disp
, n
, &cond
);
2668 static ExitStatus
trans_shrpw_sar(DisasContext
*ctx
, uint32_t insn
,
2669 const DisasInsn
*di
)
2671 unsigned rt
= extract32(insn
, 0, 5);
2672 unsigned c
= extract32(insn
, 13, 3);
2673 unsigned r1
= extract32(insn
, 16, 5);
2674 unsigned r2
= extract32(insn
, 21, 5);
2681 dest
= dest_gpr(ctx
, rt
);
2683 tcg_gen_ext32u_tl(dest
, load_gpr(ctx
, r2
));
2684 tcg_gen_shr_tl(dest
, dest
, cpu_sar
);
2685 } else if (r1
== r2
) {
2686 TCGv_i32 t32
= tcg_temp_new_i32();
2687 tcg_gen_trunc_tl_i32(t32
, load_gpr(ctx
, r2
));
2688 tcg_gen_rotr_i32(t32
, t32
, cpu_sar
);
2689 tcg_gen_extu_i32_tl(dest
, t32
);
2690 tcg_temp_free_i32(t32
);
2692 TCGv_i64 t
= tcg_temp_new_i64();
2693 TCGv_i64 s
= tcg_temp_new_i64();
2695 tcg_gen_concat_tl_i64(t
, load_gpr(ctx
, r2
), load_gpr(ctx
, r1
));
2696 tcg_gen_extu_tl_i64(s
, cpu_sar
);
2697 tcg_gen_shr_i64(t
, t
, s
);
2698 tcg_gen_trunc_i64_tl(dest
, t
);
2700 tcg_temp_free_i64(t
);
2701 tcg_temp_free_i64(s
);
2703 save_gpr(ctx
, rt
, dest
);
2705 /* Install the new nullification. */
2706 cond_free(&ctx
->null_cond
);
2708 ctx
->null_cond
= do_sed_cond(c
, dest
);
2710 return nullify_end(ctx
, NO_EXIT
);
2713 static ExitStatus
trans_shrpw_imm(DisasContext
*ctx
, uint32_t insn
,
2714 const DisasInsn
*di
)
2716 unsigned rt
= extract32(insn
, 0, 5);
2717 unsigned cpos
= extract32(insn
, 5, 5);
2718 unsigned c
= extract32(insn
, 13, 3);
2719 unsigned r1
= extract32(insn
, 16, 5);
2720 unsigned r2
= extract32(insn
, 21, 5);
2721 unsigned sa
= 31 - cpos
;
2728 dest
= dest_gpr(ctx
, rt
);
2729 t2
= load_gpr(ctx
, r2
);
2731 TCGv_i32 t32
= tcg_temp_new_i32();
2732 tcg_gen_trunc_tl_i32(t32
, t2
);
2733 tcg_gen_rotri_i32(t32
, t32
, sa
);
2734 tcg_gen_extu_i32_tl(dest
, t32
);
2735 tcg_temp_free_i32(t32
);
2736 } else if (r1
== 0) {
2737 tcg_gen_extract_tl(dest
, t2
, sa
, 32 - sa
);
2739 TCGv t0
= tcg_temp_new();
2740 tcg_gen_extract_tl(t0
, t2
, sa
, 32 - sa
);
2741 tcg_gen_deposit_tl(dest
, t0
, cpu_gr
[r1
], 32 - sa
, sa
);
2744 save_gpr(ctx
, rt
, dest
);
2746 /* Install the new nullification. */
2747 cond_free(&ctx
->null_cond
);
2749 ctx
->null_cond
= do_sed_cond(c
, dest
);
2751 return nullify_end(ctx
, NO_EXIT
);
2754 static ExitStatus
trans_extrw_sar(DisasContext
*ctx
, uint32_t insn
,
2755 const DisasInsn
*di
)
2757 unsigned clen
= extract32(insn
, 0, 5);
2758 unsigned is_se
= extract32(insn
, 10, 1);
2759 unsigned c
= extract32(insn
, 13, 3);
2760 unsigned rt
= extract32(insn
, 16, 5);
2761 unsigned rr
= extract32(insn
, 21, 5);
2762 unsigned len
= 32 - clen
;
2763 TCGv dest
, src
, tmp
;
2769 dest
= dest_gpr(ctx
, rt
);
2770 src
= load_gpr(ctx
, rr
);
2771 tmp
= tcg_temp_new();
2773 /* Recall that SAR is using big-endian bit numbering. */
2774 tcg_gen_xori_tl(tmp
, cpu_sar
, TARGET_LONG_BITS
- 1);
2776 tcg_gen_sar_tl(dest
, src
, tmp
);
2777 tcg_gen_sextract_tl(dest
, dest
, 0, len
);
2779 tcg_gen_shr_tl(dest
, src
, tmp
);
2780 tcg_gen_extract_tl(dest
, dest
, 0, len
);
2783 save_gpr(ctx
, rt
, dest
);
2785 /* Install the new nullification. */
2786 cond_free(&ctx
->null_cond
);
2788 ctx
->null_cond
= do_sed_cond(c
, dest
);
2790 return nullify_end(ctx
, NO_EXIT
);
2793 static ExitStatus
trans_extrw_imm(DisasContext
*ctx
, uint32_t insn
,
2794 const DisasInsn
*di
)
2796 unsigned clen
= extract32(insn
, 0, 5);
2797 unsigned pos
= extract32(insn
, 5, 5);
2798 unsigned is_se
= extract32(insn
, 10, 1);
2799 unsigned c
= extract32(insn
, 13, 3);
2800 unsigned rt
= extract32(insn
, 16, 5);
2801 unsigned rr
= extract32(insn
, 21, 5);
2802 unsigned len
= 32 - clen
;
2803 unsigned cpos
= 31 - pos
;
2810 dest
= dest_gpr(ctx
, rt
);
2811 src
= load_gpr(ctx
, rr
);
2813 tcg_gen_sextract_tl(dest
, src
, cpos
, len
);
2815 tcg_gen_extract_tl(dest
, src
, cpos
, len
);
2817 save_gpr(ctx
, rt
, dest
);
2819 /* Install the new nullification. */
2820 cond_free(&ctx
->null_cond
);
2822 ctx
->null_cond
= do_sed_cond(c
, dest
);
2824 return nullify_end(ctx
, NO_EXIT
);
2827 static const DisasInsn table_sh_ex
[] = {
2828 { 0xd0000000u
, 0xfc001fe0u
, trans_shrpw_sar
},
2829 { 0xd0000800u
, 0xfc001c00u
, trans_shrpw_imm
},
2830 { 0xd0001000u
, 0xfc001be0u
, trans_extrw_sar
},
2831 { 0xd0001800u
, 0xfc001800u
, trans_extrw_imm
},
2834 static ExitStatus
trans_depw_imm_c(DisasContext
*ctx
, uint32_t insn
,
2835 const DisasInsn
*di
)
2837 unsigned clen
= extract32(insn
, 0, 5);
2838 unsigned cpos
= extract32(insn
, 5, 5);
2839 unsigned nz
= extract32(insn
, 10, 1);
2840 unsigned c
= extract32(insn
, 13, 3);
2841 target_long val
= low_sextract(insn
, 16, 5);
2842 unsigned rt
= extract32(insn
, 21, 5);
2843 unsigned len
= 32 - clen
;
2844 target_long mask0
, mask1
;
2850 if (cpos
+ len
> 32) {
2854 dest
= dest_gpr(ctx
, rt
);
2855 mask0
= deposit64(0, cpos
, len
, val
);
2856 mask1
= deposit64(-1, cpos
, len
, val
);
2859 TCGv src
= load_gpr(ctx
, rt
);
2861 tcg_gen_andi_tl(dest
, src
, mask1
);
2864 tcg_gen_ori_tl(dest
, src
, mask0
);
2866 tcg_gen_movi_tl(dest
, mask0
);
2868 save_gpr(ctx
, rt
, dest
);
2870 /* Install the new nullification. */
2871 cond_free(&ctx
->null_cond
);
2873 ctx
->null_cond
= do_sed_cond(c
, dest
);
2875 return nullify_end(ctx
, NO_EXIT
);
2878 static ExitStatus
trans_depw_imm(DisasContext
*ctx
, uint32_t insn
,
2879 const DisasInsn
*di
)
2881 unsigned clen
= extract32(insn
, 0, 5);
2882 unsigned cpos
= extract32(insn
, 5, 5);
2883 unsigned nz
= extract32(insn
, 10, 1);
2884 unsigned c
= extract32(insn
, 13, 3);
2885 unsigned rr
= extract32(insn
, 16, 5);
2886 unsigned rt
= extract32(insn
, 21, 5);
2887 unsigned rs
= nz
? rt
: 0;
2888 unsigned len
= 32 - clen
;
2894 if (cpos
+ len
> 32) {
2898 dest
= dest_gpr(ctx
, rt
);
2899 val
= load_gpr(ctx
, rr
);
2901 tcg_gen_deposit_z_tl(dest
, val
, cpos
, len
);
2903 tcg_gen_deposit_tl(dest
, cpu_gr
[rs
], val
, cpos
, len
);
2905 save_gpr(ctx
, rt
, dest
);
2907 /* Install the new nullification. */
2908 cond_free(&ctx
->null_cond
);
2910 ctx
->null_cond
= do_sed_cond(c
, dest
);
2912 return nullify_end(ctx
, NO_EXIT
);
2915 static ExitStatus
trans_depw_sar(DisasContext
*ctx
, uint32_t insn
,
2916 const DisasInsn
*di
)
2918 unsigned clen
= extract32(insn
, 0, 5);
2919 unsigned nz
= extract32(insn
, 10, 1);
2920 unsigned i
= extract32(insn
, 12, 1);
2921 unsigned c
= extract32(insn
, 13, 3);
2922 unsigned rt
= extract32(insn
, 21, 5);
2923 unsigned rs
= nz
? rt
: 0;
2924 unsigned len
= 32 - clen
;
2925 TCGv val
, mask
, tmp
, shift
, dest
;
2926 unsigned msb
= 1U << (len
- 1);
2933 val
= load_const(ctx
, low_sextract(insn
, 16, 5));
2935 val
= load_gpr(ctx
, extract32(insn
, 16, 5));
2937 dest
= dest_gpr(ctx
, rt
);
2938 shift
= tcg_temp_new();
2939 tmp
= tcg_temp_new();
2941 /* Convert big-endian bit numbering in SAR to left-shift. */
2942 tcg_gen_xori_tl(shift
, cpu_sar
, TARGET_LONG_BITS
- 1);
2944 mask
= tcg_const_tl(msb
+ (msb
- 1));
2945 tcg_gen_and_tl(tmp
, val
, mask
);
2947 tcg_gen_shl_tl(mask
, mask
, shift
);
2948 tcg_gen_shl_tl(tmp
, tmp
, shift
);
2949 tcg_gen_andc_tl(dest
, cpu_gr
[rs
], mask
);
2950 tcg_gen_or_tl(dest
, dest
, tmp
);
2952 tcg_gen_shl_tl(dest
, tmp
, shift
);
2954 tcg_temp_free(shift
);
2955 tcg_temp_free(mask
);
2957 save_gpr(ctx
, rt
, dest
);
2959 /* Install the new nullification. */
2960 cond_free(&ctx
->null_cond
);
2962 ctx
->null_cond
= do_sed_cond(c
, dest
);
2964 return nullify_end(ctx
, NO_EXIT
);
2967 static const DisasInsn table_depw
[] = {
2968 { 0xd4000000u
, 0xfc000be0u
, trans_depw_sar
},
2969 { 0xd4000800u
, 0xfc001800u
, trans_depw_imm
},
2970 { 0xd4001800u
, 0xfc001800u
, trans_depw_imm_c
},
2973 static ExitStatus
trans_be(DisasContext
*ctx
, uint32_t insn
, bool is_l
)
2975 unsigned n
= extract32(insn
, 1, 1);
2976 unsigned b
= extract32(insn
, 21, 5);
2977 target_long disp
= assemble_17(insn
);
2979 /* unsigned s = low_uextract(insn, 13, 3); */
2980 /* ??? It seems like there should be a good way of using
2981 "be disp(sr2, r0)", the canonical gateway entry mechanism
2982 to our advantage. But that appears to be inconvenient to
2983 manage along side branch delay slots. Therefore we handle
2984 entry into the gateway page via absolute address. */
2986 /* Since we don't implement spaces, just branch. Do notice the special
2987 case of "be disp(*,r0)" using a direct branch to disp, so that we can
2988 goto_tb to the TB containing the syscall. */
2990 return do_dbranch(ctx
, disp
, is_l
? 31 : 0, n
);
2992 TCGv tmp
= get_temp(ctx
);
2993 tcg_gen_addi_tl(tmp
, load_gpr(ctx
, b
), disp
);
2994 return do_ibranch(ctx
, tmp
, is_l
? 31 : 0, n
);
2998 static ExitStatus
trans_bl(DisasContext
*ctx
, uint32_t insn
,
2999 const DisasInsn
*di
)
3001 unsigned n
= extract32(insn
, 1, 1);
3002 unsigned link
= extract32(insn
, 21, 5);
3003 target_long disp
= assemble_17(insn
);
3005 return do_dbranch(ctx
, iaoq_dest(ctx
, disp
), link
, n
);
3008 static ExitStatus
trans_bl_long(DisasContext
*ctx
, uint32_t insn
,
3009 const DisasInsn
*di
)
3011 unsigned n
= extract32(insn
, 1, 1);
3012 target_long disp
= assemble_22(insn
);
3014 return do_dbranch(ctx
, iaoq_dest(ctx
, disp
), 2, n
);
3017 static ExitStatus
trans_blr(DisasContext
*ctx
, uint32_t insn
,
3018 const DisasInsn
*di
)
3020 unsigned n
= extract32(insn
, 1, 1);
3021 unsigned rx
= extract32(insn
, 16, 5);
3022 unsigned link
= extract32(insn
, 21, 5);
3023 TCGv tmp
= get_temp(ctx
);
3025 tcg_gen_shli_tl(tmp
, load_gpr(ctx
, rx
), 3);
3026 tcg_gen_addi_tl(tmp
, tmp
, ctx
->iaoq_f
+ 8);
3027 return do_ibranch(ctx
, tmp
, link
, n
);
3030 static ExitStatus
trans_bv(DisasContext
*ctx
, uint32_t insn
,
3031 const DisasInsn
*di
)
3033 unsigned n
= extract32(insn
, 1, 1);
3034 unsigned rx
= extract32(insn
, 16, 5);
3035 unsigned rb
= extract32(insn
, 21, 5);
3039 dest
= load_gpr(ctx
, rb
);
3041 dest
= get_temp(ctx
);
3042 tcg_gen_shli_tl(dest
, load_gpr(ctx
, rx
), 3);
3043 tcg_gen_add_tl(dest
, dest
, load_gpr(ctx
, rb
));
3045 return do_ibranch(ctx
, dest
, 0, n
);
3048 static ExitStatus
trans_bve(DisasContext
*ctx
, uint32_t insn
,
3049 const DisasInsn
*di
)
3051 unsigned n
= extract32(insn
, 1, 1);
3052 unsigned rb
= extract32(insn
, 21, 5);
3053 unsigned link
= extract32(insn
, 13, 1) ? 2 : 0;
3055 return do_ibranch(ctx
, load_gpr(ctx
, rb
), link
, n
);
3058 static const DisasInsn table_branch
[] = {
3059 { 0xe8000000u
, 0xfc006000u
, trans_bl
}, /* B,L and B,L,PUSH */
3060 { 0xe800a000u
, 0xfc00e000u
, trans_bl_long
},
3061 { 0xe8004000u
, 0xfc00fffdu
, trans_blr
},
3062 { 0xe800c000u
, 0xfc00fffdu
, trans_bv
},
3063 { 0xe800d000u
, 0xfc00dffcu
, trans_bve
},
3066 static ExitStatus
trans_fop_wew_0c(DisasContext
*ctx
, uint32_t insn
,
3067 const DisasInsn
*di
)
3069 unsigned rt
= extract32(insn
, 0, 5);
3070 unsigned ra
= extract32(insn
, 21, 5);
3071 return do_fop_wew(ctx
, rt
, ra
, di
->f_wew
);
3074 static ExitStatus
trans_fop_wew_0e(DisasContext
*ctx
, uint32_t insn
,
3075 const DisasInsn
*di
)
3077 unsigned rt
= assemble_rt64(insn
);
3078 unsigned ra
= assemble_ra64(insn
);
3079 return do_fop_wew(ctx
, rt
, ra
, di
->f_wew
);
3082 static ExitStatus
trans_fop_ded(DisasContext
*ctx
, uint32_t insn
,
3083 const DisasInsn
*di
)
3085 unsigned rt
= extract32(insn
, 0, 5);
3086 unsigned ra
= extract32(insn
, 21, 5);
3087 return do_fop_ded(ctx
, rt
, ra
, di
->f_ded
);
3090 static ExitStatus
trans_fop_wed_0c(DisasContext
*ctx
, uint32_t insn
,
3091 const DisasInsn
*di
)
3093 unsigned rt
= extract32(insn
, 0, 5);
3094 unsigned ra
= extract32(insn
, 21, 5);
3095 return do_fop_wed(ctx
, rt
, ra
, di
->f_wed
);
3098 static ExitStatus
trans_fop_wed_0e(DisasContext
*ctx
, uint32_t insn
,
3099 const DisasInsn
*di
)
3101 unsigned rt
= assemble_rt64(insn
);
3102 unsigned ra
= extract32(insn
, 21, 5);
3103 return do_fop_wed(ctx
, rt
, ra
, di
->f_wed
);
3106 static ExitStatus
trans_fop_dew_0c(DisasContext
*ctx
, uint32_t insn
,
3107 const DisasInsn
*di
)
3109 unsigned rt
= extract32(insn
, 0, 5);
3110 unsigned ra
= extract32(insn
, 21, 5);
3111 return do_fop_dew(ctx
, rt
, ra
, di
->f_dew
);
3114 static ExitStatus
trans_fop_dew_0e(DisasContext
*ctx
, uint32_t insn
,
3115 const DisasInsn
*di
)
3117 unsigned rt
= extract32(insn
, 0, 5);
3118 unsigned ra
= assemble_ra64(insn
);
3119 return do_fop_dew(ctx
, rt
, ra
, di
->f_dew
);
3122 static ExitStatus
trans_fop_weww_0c(DisasContext
*ctx
, uint32_t insn
,
3123 const DisasInsn
*di
)
3125 unsigned rt
= extract32(insn
, 0, 5);
3126 unsigned rb
= extract32(insn
, 16, 5);
3127 unsigned ra
= extract32(insn
, 21, 5);
3128 return do_fop_weww(ctx
, rt
, ra
, rb
, di
->f_weww
);
3131 static ExitStatus
trans_fop_weww_0e(DisasContext
*ctx
, uint32_t insn
,
3132 const DisasInsn
*di
)
3134 unsigned rt
= assemble_rt64(insn
);
3135 unsigned rb
= assemble_rb64(insn
);
3136 unsigned ra
= assemble_ra64(insn
);
3137 return do_fop_weww(ctx
, rt
, ra
, rb
, di
->f_weww
);
3140 static ExitStatus
trans_fop_dedd(DisasContext
*ctx
, uint32_t insn
,
3141 const DisasInsn
*di
)
3143 unsigned rt
= extract32(insn
, 0, 5);
3144 unsigned rb
= extract32(insn
, 16, 5);
3145 unsigned ra
= extract32(insn
, 21, 5);
3146 return do_fop_dedd(ctx
, rt
, ra
, rb
, di
->f_dedd
);
3149 static void gen_fcpy_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3151 tcg_gen_mov_i32(dst
, src
);
3154 static void gen_fcpy_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3156 tcg_gen_mov_i64(dst
, src
);
3159 static void gen_fabs_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3161 tcg_gen_andi_i32(dst
, src
, INT32_MAX
);
3164 static void gen_fabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3166 tcg_gen_andi_i64(dst
, src
, INT64_MAX
);
3169 static void gen_fneg_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3171 tcg_gen_xori_i32(dst
, src
, INT32_MIN
);
3174 static void gen_fneg_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3176 tcg_gen_xori_i64(dst
, src
, INT64_MIN
);
3179 static void gen_fnegabs_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3181 tcg_gen_ori_i32(dst
, src
, INT32_MIN
);
3184 static void gen_fnegabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3186 tcg_gen_ori_i64(dst
, src
, INT64_MIN
);
3189 static ExitStatus
do_fcmp_s(DisasContext
*ctx
, unsigned ra
, unsigned rb
,
3190 unsigned y
, unsigned c
)
3192 TCGv_i32 ta
, tb
, tc
, ty
;
3196 ta
= load_frw0_i32(ra
);
3197 tb
= load_frw0_i32(rb
);
3198 ty
= tcg_const_i32(y
);
3199 tc
= tcg_const_i32(c
);
3201 gen_helper_fcmp_s(cpu_env
, ta
, tb
, ty
, tc
);
3203 tcg_temp_free_i32(ta
);
3204 tcg_temp_free_i32(tb
);
3205 tcg_temp_free_i32(ty
);
3206 tcg_temp_free_i32(tc
);
3208 return nullify_end(ctx
, NO_EXIT
);
3211 static ExitStatus
trans_fcmp_s_0c(DisasContext
*ctx
, uint32_t insn
,
3212 const DisasInsn
*di
)
3214 unsigned c
= extract32(insn
, 0, 5);
3215 unsigned y
= extract32(insn
, 13, 3);
3216 unsigned rb
= extract32(insn
, 16, 5);
3217 unsigned ra
= extract32(insn
, 21, 5);
3218 return do_fcmp_s(ctx
, ra
, rb
, y
, c
);
3221 static ExitStatus
trans_fcmp_s_0e(DisasContext
*ctx
, uint32_t insn
,
3222 const DisasInsn
*di
)
3224 unsigned c
= extract32(insn
, 0, 5);
3225 unsigned y
= extract32(insn
, 13, 3);
3226 unsigned rb
= assemble_rb64(insn
);
3227 unsigned ra
= assemble_ra64(insn
);
3228 return do_fcmp_s(ctx
, ra
, rb
, y
, c
);
3231 static ExitStatus
trans_fcmp_d(DisasContext
*ctx
, uint32_t insn
,
3232 const DisasInsn
*di
)
3234 unsigned c
= extract32(insn
, 0, 5);
3235 unsigned y
= extract32(insn
, 13, 3);
3236 unsigned rb
= extract32(insn
, 16, 5);
3237 unsigned ra
= extract32(insn
, 21, 5);
3245 ty
= tcg_const_i32(y
);
3246 tc
= tcg_const_i32(c
);
3248 gen_helper_fcmp_d(cpu_env
, ta
, tb
, ty
, tc
);
3250 tcg_temp_free_i64(ta
);
3251 tcg_temp_free_i64(tb
);
3252 tcg_temp_free_i32(ty
);
3253 tcg_temp_free_i32(tc
);
3255 return nullify_end(ctx
, NO_EXIT
);
3258 static ExitStatus
trans_ftest_t(DisasContext
*ctx
, uint32_t insn
,
3259 const DisasInsn
*di
)
3261 unsigned y
= extract32(insn
, 13, 3);
3262 unsigned cbit
= (y
^ 1) - 1;
3268 tcg_gen_ld32u_tl(t
, cpu_env
, offsetof(CPUHPPAState
, fr0_shadow
));
3269 tcg_gen_extract_tl(t
, t
, 21 - cbit
, 1);
3270 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
3273 return nullify_end(ctx
, NO_EXIT
);
3276 static ExitStatus
trans_ftest_q(DisasContext
*ctx
, uint32_t insn
,
3277 const DisasInsn
*di
)
3279 unsigned c
= extract32(insn
, 0, 5);
3287 tcg_gen_ld32u_tl(t
, cpu_env
, offsetof(CPUHPPAState
, fr0_shadow
));
3290 case 0: /* simple */
3291 tcg_gen_andi_tl(t
, t
, 0x4000000);
3292 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
3316 return gen_illegal(ctx
);
3319 TCGv c
= load_const(ctx
, mask
);
3320 tcg_gen_or_tl(t
, t
, c
);
3321 ctx
->null_cond
= cond_make(TCG_COND_EQ
, t
, c
);
3323 tcg_gen_andi_tl(t
, t
, mask
);
3324 ctx
->null_cond
= cond_make_0(TCG_COND_EQ
, t
);
3327 return nullify_end(ctx
, NO_EXIT
);
3330 static ExitStatus
trans_xmpyu(DisasContext
*ctx
, uint32_t insn
,
3331 const DisasInsn
*di
)
3333 unsigned rt
= extract32(insn
, 0, 5);
3334 unsigned rb
= assemble_rb64(insn
);
3335 unsigned ra
= assemble_ra64(insn
);
3340 a
= load_frw0_i64(ra
);
3341 b
= load_frw0_i64(rb
);
3342 tcg_gen_mul_i64(a
, a
, b
);
3344 tcg_temp_free_i64(a
);
3345 tcg_temp_free_i64(b
);
3347 return nullify_end(ctx
, NO_EXIT
);
3350 #define FOP_DED trans_fop_ded, .f_ded
3351 #define FOP_DEDD trans_fop_dedd, .f_dedd
3353 #define FOP_WEW trans_fop_wew_0c, .f_wew
3354 #define FOP_DEW trans_fop_dew_0c, .f_dew
3355 #define FOP_WED trans_fop_wed_0c, .f_wed
3356 #define FOP_WEWW trans_fop_weww_0c, .f_weww
3358 static const DisasInsn table_float_0c
[] = {
3359 /* floating point class zero */
3360 { 0x30004000, 0xfc1fffe0, FOP_WEW
= gen_fcpy_s
},
3361 { 0x30006000, 0xfc1fffe0, FOP_WEW
= gen_fabs_s
},
3362 { 0x30008000, 0xfc1fffe0, FOP_WEW
= gen_helper_fsqrt_s
},
3363 { 0x3000a000, 0xfc1fffe0, FOP_WEW
= gen_helper_frnd_s
},
3364 { 0x3000c000, 0xfc1fffe0, FOP_WEW
= gen_fneg_s
},
3365 { 0x3000e000, 0xfc1fffe0, FOP_WEW
= gen_fnegabs_s
},
3367 { 0x30004800, 0xfc1fffe0, FOP_DED
= gen_fcpy_d
},
3368 { 0x30006800, 0xfc1fffe0, FOP_DED
= gen_fabs_d
},
3369 { 0x30008800, 0xfc1fffe0, FOP_DED
= gen_helper_fsqrt_d
},
3370 { 0x3000a800, 0xfc1fffe0, FOP_DED
= gen_helper_frnd_d
},
3371 { 0x3000c800, 0xfc1fffe0, FOP_DED
= gen_fneg_d
},
3372 { 0x3000e800, 0xfc1fffe0, FOP_DED
= gen_fnegabs_d
},
3374 /* floating point class three */
3375 { 0x30000600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fadd_s
},
3376 { 0x30002600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fsub_s
},
3377 { 0x30004600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fmpy_s
},
3378 { 0x30006600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fdiv_s
},
3380 { 0x30000e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fadd_d
},
3381 { 0x30002e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fsub_d
},
3382 { 0x30004e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fmpy_d
},
3383 { 0x30006e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fdiv_d
},
3385 /* floating point class one */
3387 { 0x30000a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_d_s
},
3388 { 0x30002200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_s_d
},
3390 { 0x30008200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_w_s
},
3391 { 0x30008a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_dw_s
},
3392 { 0x3000a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_w_d
},
3393 { 0x3000aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_dw_d
},
3395 { 0x30010200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_s_w
},
3396 { 0x30010a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_d_w
},
3397 { 0x30012200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_s_dw
},
3398 { 0x30012a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_dw
},
3399 /* float/int truncate */
3400 { 0x30018200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_t_s_w
},
3401 { 0x30018a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_t_d_w
},
3402 { 0x3001a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_t_s_dw
},
3403 { 0x3001aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_dw
},
3405 { 0x30028200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_uw_s
},
3406 { 0x30028a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_udw_s
},
3407 { 0x3002a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_uw_d
},
3408 { 0x3002aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_udw_d
},
3410 { 0x30030200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_s_uw
},
3411 { 0x30030a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_d_uw
},
3412 { 0x30032200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_s_udw
},
3413 { 0x30032a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_udw
},
3414 /* float/uint truncate */
3415 { 0x30038200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_t_s_uw
},
3416 { 0x30038a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_t_d_uw
},
3417 { 0x3003a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_t_s_udw
},
3418 { 0x3003aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_udw
},
3420 /* floating point class two */
3421 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c
},
3422 { 0x30000c00, 0xfc001fe0, trans_fcmp_d
},
3423 { 0x30002420, 0xffffffe0, trans_ftest_q
},
3424 { 0x30000420, 0xffff1fff, trans_ftest_t
},
3426 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
3427 This is machine/revision == 0, which is reserved for simulator. */
3428 { 0x30000000, 0xffffffff, FOP_WEW
= gen_fcpy_s
},
3435 #define FOP_WEW trans_fop_wew_0e, .f_wew
3436 #define FOP_DEW trans_fop_dew_0e, .f_dew
3437 #define FOP_WED trans_fop_wed_0e, .f_wed
3438 #define FOP_WEWW trans_fop_weww_0e, .f_weww
3440 static const DisasInsn table_float_0e
[] = {
3441 /* floating point class zero */
3442 { 0x38004000, 0xfc1fff20, FOP_WEW
= gen_fcpy_s
},
3443 { 0x38006000, 0xfc1fff20, FOP_WEW
= gen_fabs_s
},
3444 { 0x38008000, 0xfc1fff20, FOP_WEW
= gen_helper_fsqrt_s
},
3445 { 0x3800a000, 0xfc1fff20, FOP_WEW
= gen_helper_frnd_s
},
3446 { 0x3800c000, 0xfc1fff20, FOP_WEW
= gen_fneg_s
},
3447 { 0x3800e000, 0xfc1fff20, FOP_WEW
= gen_fnegabs_s
},
3449 { 0x38004800, 0xfc1fffe0, FOP_DED
= gen_fcpy_d
},
3450 { 0x38006800, 0xfc1fffe0, FOP_DED
= gen_fabs_d
},
3451 { 0x38008800, 0xfc1fffe0, FOP_DED
= gen_helper_fsqrt_d
},
3452 { 0x3800a800, 0xfc1fffe0, FOP_DED
= gen_helper_frnd_d
},
3453 { 0x3800c800, 0xfc1fffe0, FOP_DED
= gen_fneg_d
},
3454 { 0x3800e800, 0xfc1fffe0, FOP_DED
= gen_fnegabs_d
},
3456 /* floating point class three */
3457 { 0x38000600, 0xfc00ef20, FOP_WEWW
= gen_helper_fadd_s
},
3458 { 0x38002600, 0xfc00ef20, FOP_WEWW
= gen_helper_fsub_s
},
3459 { 0x38004600, 0xfc00ef20, FOP_WEWW
= gen_helper_fmpy_s
},
3460 { 0x38006600, 0xfc00ef20, FOP_WEWW
= gen_helper_fdiv_s
},
3462 { 0x38000e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fadd_d
},
3463 { 0x38002e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fsub_d
},
3464 { 0x38004e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fmpy_d
},
3465 { 0x38006e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fdiv_d
},
3467 { 0x38004700, 0xfc00ef60, trans_xmpyu
},
3469 /* floating point class one */
3471 { 0x38000a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_d_s
},
3472 { 0x38002200, 0xfc1fffc0, FOP_DEW
= gen_helper_fcnv_s_d
},
3474 { 0x38008200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_w_s
},
3475 { 0x38008a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_dw_s
},
3476 { 0x3800a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_w_d
},
3477 { 0x3800aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_dw_d
},
3479 { 0x38010200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_s_w
},
3480 { 0x38010a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_d_w
},
3481 { 0x38012200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_s_dw
},
3482 { 0x38012a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_dw
},
3483 /* float/int truncate */
3484 { 0x38018200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_t_s_w
},
3485 { 0x38018a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_t_d_w
},
3486 { 0x3801a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_t_s_dw
},
3487 { 0x3801aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_dw
},
3489 { 0x38028200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_uw_s
},
3490 { 0x38028a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_udw_s
},
3491 { 0x3802a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_uw_d
},
3492 { 0x3802aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_udw_d
},
3494 { 0x38030200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_s_uw
},
3495 { 0x38030a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_d_uw
},
3496 { 0x38032200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_s_udw
},
3497 { 0x38032a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_udw
},
3498 /* float/uint truncate */
3499 { 0x38038200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_t_s_uw
},
3500 { 0x38038a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_t_d_uw
},
3501 { 0x3803a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_t_s_udw
},
3502 { 0x3803aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_udw
},
3504 /* floating point class two */
3505 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e
},
3506 { 0x38000c00, 0xfc001fe0, trans_fcmp_d
},
3516 /* Convert the fmpyadd single-precision register encodings to standard. */
3517 static inline int fmpyadd_s_reg(unsigned r
)
3519 return (r
& 16) * 2 + 16 + (r
& 15);
3522 static ExitStatus
trans_fmpyadd(DisasContext
*ctx
, uint32_t insn
, bool is_sub
)
3524 unsigned tm
= extract32(insn
, 0, 5);
3525 unsigned f
= extract32(insn
, 5, 1);
3526 unsigned ra
= extract32(insn
, 6, 5);
3527 unsigned ta
= extract32(insn
, 11, 5);
3528 unsigned rm2
= extract32(insn
, 16, 5);
3529 unsigned rm1
= extract32(insn
, 21, 5);
3533 /* Independent multiply & add/sub, with undefined behaviour
3534 if outputs overlap inputs. */
3536 tm
= fmpyadd_s_reg(tm
);
3537 ra
= fmpyadd_s_reg(ra
);
3538 ta
= fmpyadd_s_reg(ta
);
3539 rm2
= fmpyadd_s_reg(rm2
);
3540 rm1
= fmpyadd_s_reg(rm1
);
3541 do_fop_weww(ctx
, tm
, rm1
, rm2
, gen_helper_fmpy_s
);
3542 do_fop_weww(ctx
, ta
, ta
, ra
,
3543 is_sub
? gen_helper_fsub_s
: gen_helper_fadd_s
);
3545 do_fop_dedd(ctx
, tm
, rm1
, rm2
, gen_helper_fmpy_d
);
3546 do_fop_dedd(ctx
, ta
, ta
, ra
,
3547 is_sub
? gen_helper_fsub_d
: gen_helper_fadd_d
);
3550 return nullify_end(ctx
, NO_EXIT
);
3553 static ExitStatus
trans_fmpyfadd_s(DisasContext
*ctx
, uint32_t insn
,
3554 const DisasInsn
*di
)
3556 unsigned rt
= assemble_rt64(insn
);
3557 unsigned neg
= extract32(insn
, 5, 1);
3558 unsigned rm1
= assemble_ra64(insn
);
3559 unsigned rm2
= assemble_rb64(insn
);
3560 unsigned ra3
= assemble_rc64(insn
);
3564 a
= load_frw0_i32(rm1
);
3565 b
= load_frw0_i32(rm2
);
3566 c
= load_frw0_i32(ra3
);
3569 gen_helper_fmpynfadd_s(a
, cpu_env
, a
, b
, c
);
3571 gen_helper_fmpyfadd_s(a
, cpu_env
, a
, b
, c
);
3574 tcg_temp_free_i32(b
);
3575 tcg_temp_free_i32(c
);
3576 save_frw_i32(rt
, a
);
3577 tcg_temp_free_i32(a
);
3578 return nullify_end(ctx
, NO_EXIT
);
3581 static ExitStatus
trans_fmpyfadd_d(DisasContext
*ctx
, uint32_t insn
,
3582 const DisasInsn
*di
)
3584 unsigned rt
= extract32(insn
, 0, 5);
3585 unsigned neg
= extract32(insn
, 5, 1);
3586 unsigned rm1
= extract32(insn
, 21, 5);
3587 unsigned rm2
= extract32(insn
, 16, 5);
3588 unsigned ra3
= assemble_rc64(insn
);
3597 gen_helper_fmpynfadd_d(a
, cpu_env
, a
, b
, c
);
3599 gen_helper_fmpyfadd_d(a
, cpu_env
, a
, b
, c
);
3602 tcg_temp_free_i64(b
);
3603 tcg_temp_free_i64(c
);
3605 tcg_temp_free_i64(a
);
3606 return nullify_end(ctx
, NO_EXIT
);
3609 static const DisasInsn table_fp_fused
[] = {
3610 { 0xb8000000u
, 0xfc000800u
, trans_fmpyfadd_s
},
3611 { 0xb8000800u
, 0xfc0019c0u
, trans_fmpyfadd_d
}
3614 static ExitStatus
translate_table_int(DisasContext
*ctx
, uint32_t insn
,
3615 const DisasInsn table
[], size_t n
)
3618 for (i
= 0; i
< n
; ++i
) {
3619 if ((insn
& table
[i
].mask
) == table
[i
].insn
) {
3620 return table
[i
].trans(ctx
, insn
, &table
[i
]);
3623 return gen_illegal(ctx
);
3626 #define translate_table(ctx, insn, table) \
3627 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
3629 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
3631 uint32_t opc
= extract32(insn
, 26, 6);
3634 case 0x00: /* system op */
3635 return translate_table(ctx
, insn
, table_system
);
3637 return translate_table(ctx
, insn
, table_mem_mgmt
);
3639 return translate_table(ctx
, insn
, table_arith_log
);
3641 return translate_table(ctx
, insn
, table_index_mem
);
3643 return trans_fmpyadd(ctx
, insn
, false);
3645 return trans_ldil(ctx
, insn
);
3647 return trans_copr_w(ctx
, insn
);
3649 return trans_addil(ctx
, insn
);
3651 return trans_copr_dw(ctx
, insn
);
3653 return translate_table(ctx
, insn
, table_float_0c
);
3655 return trans_ldo(ctx
, insn
);
3657 return translate_table(ctx
, insn
, table_float_0e
);
3660 return trans_load(ctx
, insn
, false, MO_UB
);
3662 return trans_load(ctx
, insn
, false, MO_TEUW
);
3664 return trans_load(ctx
, insn
, false, MO_TEUL
);
3666 return trans_load(ctx
, insn
, true, MO_TEUL
);
3668 return trans_fload_mod(ctx
, insn
);
3670 return trans_load_w(ctx
, insn
);
3672 return trans_store(ctx
, insn
, false, MO_UB
);
3674 return trans_store(ctx
, insn
, false, MO_TEUW
);
3676 return trans_store(ctx
, insn
, false, MO_TEUL
);
3678 return trans_store(ctx
, insn
, true, MO_TEUL
);
3680 return trans_fstore_mod(ctx
, insn
);
3682 return trans_store_w(ctx
, insn
);
3685 return trans_cmpb(ctx
, insn
, true, false, false);
3687 return trans_cmpb(ctx
, insn
, true, true, false);
3689 return trans_cmpb(ctx
, insn
, false, false, false);
3691 return trans_cmpb(ctx
, insn
, false, true, false);
3693 return trans_cmpiclr(ctx
, insn
);
3695 return trans_subi(ctx
, insn
);
3697 return trans_fmpyadd(ctx
, insn
, true);
3699 return trans_cmpb(ctx
, insn
, true, false, true);
3701 return trans_addb(ctx
, insn
, true, false);
3703 return trans_addb(ctx
, insn
, true, true);
3705 return trans_addb(ctx
, insn
, false, false);
3707 return trans_addb(ctx
, insn
, false, true);
3710 return trans_addi(ctx
, insn
);
3712 return translate_table(ctx
, insn
, table_fp_fused
);
3714 return trans_cmpb(ctx
, insn
, false, false, true);
3718 return trans_bb(ctx
, insn
);
3720 return trans_movb(ctx
, insn
, false);
3722 return trans_movb(ctx
, insn
, true);
3724 return translate_table(ctx
, insn
, table_sh_ex
);
3726 return translate_table(ctx
, insn
, table_depw
);
3728 return trans_be(ctx
, insn
, false);
3730 return trans_be(ctx
, insn
, true);
3732 return translate_table(ctx
, insn
, table_branch
);
3734 case 0x04: /* spopn */
3735 case 0x05: /* diag */
3736 case 0x0F: /* product specific */
3739 case 0x07: /* unassigned */
3740 case 0x15: /* unassigned */
3741 case 0x1D: /* unassigned */
3742 case 0x37: /* unassigned */
3743 case 0x3F: /* unassigned */
3747 return gen_illegal(ctx
);
3750 void gen_intermediate_code(CPUHPPAState
*env
, struct TranslationBlock
*tb
)
3752 HPPACPU
*cpu
= hppa_env_get_cpu(env
);
3753 CPUState
*cs
= CPU(cpu
);
3756 int num_insns
, max_insns
, i
;
3760 ctx
.iaoq_f
= tb
->pc
;
3761 ctx
.iaoq_b
= tb
->cs_base
;
3762 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
3765 for (i
= 0; i
< ARRAY_SIZE(ctx
.temps
); ++i
) {
3766 TCGV_UNUSED(ctx
.temps
[i
]);
3769 /* Compute the maximum number of insns to execute, as bounded by
3770 (1) icount, (2) single-stepping, (3) branch delay slots, or
3771 (4) the number of insns remaining on the current page. */
3772 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3773 if (max_insns
== 0) {
3774 max_insns
= CF_COUNT_MASK
;
3776 if (ctx
.singlestep_enabled
|| singlestep
) {
3778 } else if (max_insns
> TCG_MAX_INSNS
) {
3779 max_insns
= TCG_MAX_INSNS
;
3785 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */
3786 ctx
.null_cond
= cond_make_f();
3787 ctx
.psw_n_nonzero
= false;
3788 if (tb
->flags
& 1) {
3789 ctx
.null_cond
.c
= TCG_COND_ALWAYS
;
3790 ctx
.psw_n_nonzero
= true;
3792 ctx
.null_lab
= NULL
;
3795 tcg_gen_insn_start(ctx
.iaoq_f
, ctx
.iaoq_b
);
3798 if (unlikely(cpu_breakpoint_test(cs
, ctx
.iaoq_f
, BP_ANY
))) {
3799 ret
= gen_excp(&ctx
, EXCP_DEBUG
);
3802 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
3806 if (ctx
.iaoq_f
< TARGET_PAGE_SIZE
) {
3807 ret
= do_page_zero(&ctx
);
3808 assert(ret
!= NO_EXIT
);
3810 /* Always fetch the insn, even if nullified, so that we check
3811 the page permissions for execute. */
3812 uint32_t insn
= cpu_ldl_code(env
, ctx
.iaoq_f
);
3814 /* Set up the IA queue for the next insn.
3815 This will be overwritten by a branch. */
3816 if (ctx
.iaoq_b
== -1) {
3818 ctx
.iaoq_n_var
= get_temp(&ctx
);
3819 tcg_gen_addi_tl(ctx
.iaoq_n_var
, cpu_iaoq_b
, 4);
3821 ctx
.iaoq_n
= ctx
.iaoq_b
+ 4;
3822 TCGV_UNUSED(ctx
.iaoq_n_var
);
3825 if (unlikely(ctx
.null_cond
.c
== TCG_COND_ALWAYS
)) {
3826 ctx
.null_cond
.c
= TCG_COND_NEVER
;
3829 ret
= translate_one(&ctx
, insn
);
3830 assert(ctx
.null_lab
== NULL
);
3834 for (i
= 0; i
< ctx
.ntemps
; ++i
) {
3835 tcg_temp_free(ctx
.temps
[i
]);
3836 TCGV_UNUSED(ctx
.temps
[i
]);
3840 /* If we see non-linear instructions, exhaust instruction count,
3841 or run out of buffer space, stop generation. */
3842 /* ??? The non-linear instruction restriction is purely due to
3843 the debugging dump. Otherwise we *could* follow unconditional
3844 branches within the same page. */
3846 && (ctx
.iaoq_b
!= ctx
.iaoq_f
+ 4
3847 || num_insns
>= max_insns
3848 || tcg_op_buf_full())) {
3849 if (ctx
.null_cond
.c
== TCG_COND_NEVER
3850 || ctx
.null_cond
.c
== TCG_COND_ALWAYS
) {
3851 nullify_set(&ctx
, ctx
.null_cond
.c
== TCG_COND_ALWAYS
);
3852 gen_goto_tb(&ctx
, 0, ctx
.iaoq_b
, ctx
.iaoq_n
);
3855 ret
= EXIT_IAQ_N_STALE
;
3859 ctx
.iaoq_f
= ctx
.iaoq_b
;
3860 ctx
.iaoq_b
= ctx
.iaoq_n
;
3861 if (ret
== EXIT_NORETURN
3862 || ret
== EXIT_GOTO_TB
3863 || ret
== EXIT_IAQ_N_UPDATED
) {
3866 if (ctx
.iaoq_f
== -1) {
3867 tcg_gen_mov_tl(cpu_iaoq_f
, cpu_iaoq_b
);
3868 copy_iaoq_entry(cpu_iaoq_b
, ctx
.iaoq_n
, ctx
.iaoq_n_var
);
3870 ret
= EXIT_IAQ_N_UPDATED
;
3873 if (ctx
.iaoq_b
== -1) {
3874 tcg_gen_mov_tl(cpu_iaoq_b
, ctx
.iaoq_n_var
);
3876 } while (ret
== NO_EXIT
);
3878 if (tb
->cflags
& CF_LAST_IO
) {
3886 case EXIT_IAQ_N_STALE
:
3887 copy_iaoq_entry(cpu_iaoq_f
, ctx
.iaoq_f
, cpu_iaoq_f
);
3888 copy_iaoq_entry(cpu_iaoq_b
, ctx
.iaoq_b
, cpu_iaoq_b
);
3891 case EXIT_IAQ_N_UPDATED
:
3892 if (ctx
.singlestep_enabled
) {
3893 gen_excp_1(EXCP_DEBUG
);
3902 gen_tb_end(tb
, num_insns
);
3904 tb
->size
= num_insns
* 4;
3905 tb
->icount
= num_insns
;
3908 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
3909 && qemu_log_in_addr_range(tb
->pc
)) {
3913 qemu_log("IN:\n0x00000000: (null)\n\n");
3916 qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n");
3919 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n");
3922 qemu_log("IN:\n0x00000100: syscall\n\n");
3925 qemu_log("IN: %s\n", lookup_symbol(tb
->pc
));
3926 log_target_disas(cs
, tb
->pc
, tb
->size
, 1);
3935 void restore_state_to_opc(CPUHPPAState
*env
, TranslationBlock
*tb
,
3938 env
->iaoq_f
= data
[0];
3939 if (data
[1] != -1) {
3940 env
->iaoq_b
= data
[1];
3942 /* Since we were executing the instruction at IAOQ_F, and took some
3943 sort of action that provoked the cpu_restore_state, we can infer
3944 that the instruction was not nullified. */