4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env
;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 struct TranslationBlock
*tb
;
58 const DisasInsn
*insn
;
62 bool singlestep_enabled
;
65 /* Information carried about a condition to be evaluated. */
72 struct { TCGv_i64 a
, b
; } s64
;
73 struct { TCGv_i32 a
, b
; } s32
;
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit
[CC_OP_MAX
];
81 static uint64_t inline_branch_miss
[CC_OP_MAX
];
84 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
86 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
87 if (s
->tb
->flags
& FLAG_MASK_32
) {
88 return pc
| 0x80000000;
94 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
97 S390CPU
*cpu
= S390_CPU(cs
);
98 CPUS390XState
*env
= &cpu
->env
;
101 if (env
->cc_op
> 3) {
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
103 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
105 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
106 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
109 for (i
= 0; i
< 16; i
++) {
110 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
112 cpu_fprintf(f
, "\n");
118 for (i
= 0; i
< 16; i
++) {
119 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
121 cpu_fprintf(f
, "\n");
127 for (i
= 0; i
< 32; i
++) {
128 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
129 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
130 cpu_fprintf(f
, (i
% 2) ? "\n" : " ");
133 #ifndef CONFIG_USER_ONLY
134 for (i
= 0; i
< 16; i
++) {
135 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
137 cpu_fprintf(f
, "\n");
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i
= 0; i
< CC_OP_MAX
; i
++) {
146 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
147 inline_branch_miss
[i
], inline_branch_hit
[i
]);
151 cpu_fprintf(f
, "\n");
154 static TCGv_i64 psw_addr
;
155 static TCGv_i64 psw_mask
;
156 static TCGv_i64 gbea
;
158 static TCGv_i32 cc_op
;
159 static TCGv_i64 cc_src
;
160 static TCGv_i64 cc_dst
;
161 static TCGv_i64 cc_vr
;
163 static char cpu_reg_names
[32][4];
164 static TCGv_i64 regs
[16];
165 static TCGv_i64 fregs
[16];
167 void s390x_translate_init(void)
171 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
172 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
173 offsetof(CPUS390XState
, psw
.addr
),
175 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
176 offsetof(CPUS390XState
, psw
.mask
),
178 gbea
= tcg_global_mem_new_i64(cpu_env
,
179 offsetof(CPUS390XState
, gbea
),
182 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
184 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
186 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
188 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
193 regs
[i
] = tcg_global_mem_new(cpu_env
,
194 offsetof(CPUS390XState
, regs
[i
]),
198 for (i
= 0; i
< 16; i
++) {
199 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
200 fregs
[i
] = tcg_global_mem_new(cpu_env
,
201 offsetof(CPUS390XState
, vregs
[i
][0].d
),
202 cpu_reg_names
[i
+ 16]);
206 static TCGv_i64
load_reg(int reg
)
208 TCGv_i64 r
= tcg_temp_new_i64();
209 tcg_gen_mov_i64(r
, regs
[reg
]);
213 static TCGv_i64
load_freg32_i64(int reg
)
215 TCGv_i64 r
= tcg_temp_new_i64();
216 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
220 static void store_reg(int reg
, TCGv_i64 v
)
222 tcg_gen_mov_i64(regs
[reg
], v
);
225 static void store_freg(int reg
, TCGv_i64 v
)
227 tcg_gen_mov_i64(fregs
[reg
], v
);
230 static void store_reg32_i64(int reg
, TCGv_i64 v
)
232 /* 32 bit register writes keep the upper half */
233 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
236 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
238 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
241 static void store_freg32_i64(int reg
, TCGv_i64 v
)
243 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
246 static void return_low128(TCGv_i64 dest
)
248 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
251 static void update_psw_addr(DisasContext
*s
)
254 tcg_gen_movi_i64(psw_addr
, s
->pc
);
257 static void per_branch(DisasContext
*s
, bool to_next
)
259 #ifndef CONFIG_USER_ONLY
260 tcg_gen_movi_i64(gbea
, s
->pc
);
262 if (s
->tb
->flags
& FLAG_MASK_PER
) {
263 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
264 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
266 tcg_temp_free_i64(next_pc
);
272 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
273 TCGv_i64 arg1
, TCGv_i64 arg2
)
275 #ifndef CONFIG_USER_ONLY
276 if (s
->tb
->flags
& FLAG_MASK_PER
) {
277 TCGLabel
*lab
= gen_new_label();
278 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
280 tcg_gen_movi_i64(gbea
, s
->pc
);
281 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
285 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
286 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
287 tcg_temp_free_i64(pc
);
292 static void per_breaking_event(DisasContext
*s
)
294 tcg_gen_movi_i64(gbea
, s
->pc
);
297 static void update_cc_op(DisasContext
*s
)
299 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
300 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
304 static void potential_page_fault(DisasContext
*s
)
310 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
312 return (uint64_t)cpu_lduw_code(env
, pc
);
315 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
317 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
320 static int get_mem_index(DisasContext
*s
)
322 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
323 case PSW_ASC_PRIMARY
>> 32:
325 case PSW_ASC_SECONDARY
>> 32:
327 case PSW_ASC_HOME
>> 32:
335 static void gen_exception(int excp
)
337 TCGv_i32 tmp
= tcg_const_i32(excp
);
338 gen_helper_exception(cpu_env
, tmp
);
339 tcg_temp_free_i32(tmp
);
342 static void gen_program_exception(DisasContext
*s
, int code
)
346 /* Remember what pgm exeption this was. */
347 tmp
= tcg_const_i32(code
);
348 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
349 tcg_temp_free_i32(tmp
);
351 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
352 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
353 tcg_temp_free_i32(tmp
);
355 /* Advance past instruction. */
362 /* Trigger exception. */
363 gen_exception(EXCP_PGM
);
366 static inline void gen_illegal_opcode(DisasContext
*s
)
368 gen_program_exception(s
, PGM_OPERATION
);
371 static inline void gen_trap(DisasContext
*s
)
375 /* Set DXC to 0xff. */
376 t
= tcg_temp_new_i32();
377 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
378 tcg_gen_ori_i32(t
, t
, 0xff00);
379 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
380 tcg_temp_free_i32(t
);
382 gen_program_exception(s
, PGM_DATA
);
385 #ifndef CONFIG_USER_ONLY
386 static void check_privileged(DisasContext
*s
)
388 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
389 gen_program_exception(s
, PGM_PRIVILEGED
);
394 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 TCGv_i64 tmp
= tcg_temp_new_i64();
397 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
399 /* Note that d2 is limited to 20 bits, signed. If we crop negative
400 displacements early we create larger immedate addends. */
402 /* Note that addi optimizes the imm==0 case. */
404 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
405 tcg_gen_addi_i64(tmp
, tmp
, d2
);
407 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
409 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
415 tcg_gen_movi_i64(tmp
, d2
);
418 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
424 static inline bool live_cc_data(DisasContext
*s
)
426 return (s
->cc_op
!= CC_OP_DYNAMIC
427 && s
->cc_op
!= CC_OP_STATIC
431 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
433 if (live_cc_data(s
)) {
434 tcg_gen_discard_i64(cc_src
);
435 tcg_gen_discard_i64(cc_dst
);
436 tcg_gen_discard_i64(cc_vr
);
438 s
->cc_op
= CC_OP_CONST0
+ val
;
441 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
443 if (live_cc_data(s
)) {
444 tcg_gen_discard_i64(cc_src
);
445 tcg_gen_discard_i64(cc_vr
);
447 tcg_gen_mov_i64(cc_dst
, dst
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 if (live_cc_data(s
)) {
455 tcg_gen_discard_i64(cc_vr
);
457 tcg_gen_mov_i64(cc_src
, src
);
458 tcg_gen_mov_i64(cc_dst
, dst
);
462 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
463 TCGv_i64 dst
, TCGv_i64 vr
)
465 tcg_gen_mov_i64(cc_src
, src
);
466 tcg_gen_mov_i64(cc_dst
, dst
);
467 tcg_gen_mov_i64(cc_vr
, vr
);
471 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
473 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
476 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
478 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
481 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
483 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
486 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
488 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
491 /* CC value is in env->cc_op */
492 static void set_cc_static(DisasContext
*s
)
494 if (live_cc_data(s
)) {
495 tcg_gen_discard_i64(cc_src
);
496 tcg_gen_discard_i64(cc_dst
);
497 tcg_gen_discard_i64(cc_vr
);
499 s
->cc_op
= CC_OP_STATIC
;
502 /* calculates cc into cc_op */
503 static void gen_op_calc_cc(DisasContext
*s
)
505 TCGv_i32 local_cc_op
;
508 TCGV_UNUSED_I32(local_cc_op
);
509 TCGV_UNUSED_I64(dummy
);
512 dummy
= tcg_const_i64(0);
526 local_cc_op
= tcg_const_i32(s
->cc_op
);
542 /* s->cc_op is the cc value */
543 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
546 /* env->cc_op already is the cc value */
561 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
566 case CC_OP_LTUGTU_32
:
567 case CC_OP_LTUGTU_64
:
574 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
589 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
592 /* unknown operation - assume 3 arguments and cc_op in env */
593 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
599 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
600 tcg_temp_free_i32(local_cc_op
);
602 if (!TCGV_IS_UNUSED_I64(dummy
)) {
603 tcg_temp_free_i64(dummy
);
606 /* We now have cc in cc_op as constant */
610 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
612 if (unlikely(s
->singlestep_enabled
) ||
613 (s
->tb
->cflags
& CF_LAST_IO
) ||
614 (s
->tb
->flags
& FLAG_MASK_PER
)) {
617 #ifndef CONFIG_USER_ONLY
618 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
619 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
625 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
627 #ifdef DEBUG_INLINE_BRANCHES
628 inline_branch_miss
[cc_op
]++;
632 static void account_inline_branch(DisasContext
*s
, int cc_op
)
634 #ifdef DEBUG_INLINE_BRANCHES
635 inline_branch_hit
[cc_op
]++;
639 /* Table of mask values to comparison codes, given a comparison as input.
640 For such, CC=3 should not be possible. */
641 static const TCGCond ltgt_cond
[16] = {
642 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
643 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
644 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
645 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
646 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
647 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
648 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
649 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
652 /* Table of mask values to comparison codes, given a logic op as input.
653 For such, only CC=0 and CC=1 should be possible. */
654 static const TCGCond nz_cond
[16] = {
655 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
656 TCG_COND_NEVER
, TCG_COND_NEVER
,
657 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
658 TCG_COND_NE
, TCG_COND_NE
,
659 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
660 TCG_COND_EQ
, TCG_COND_EQ
,
661 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
662 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
665 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
666 details required to generate a TCG comparison. */
667 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
670 enum cc_op old_cc_op
= s
->cc_op
;
672 if (mask
== 15 || mask
== 0) {
673 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
676 c
->g1
= c
->g2
= true;
681 /* Find the TCG condition for the mask + cc op. */
687 cond
= ltgt_cond
[mask
];
688 if (cond
== TCG_COND_NEVER
) {
691 account_inline_branch(s
, old_cc_op
);
694 case CC_OP_LTUGTU_32
:
695 case CC_OP_LTUGTU_64
:
696 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
697 if (cond
== TCG_COND_NEVER
) {
700 account_inline_branch(s
, old_cc_op
);
704 cond
= nz_cond
[mask
];
705 if (cond
== TCG_COND_NEVER
) {
708 account_inline_branch(s
, old_cc_op
);
723 account_inline_branch(s
, old_cc_op
);
738 account_inline_branch(s
, old_cc_op
);
742 switch (mask
& 0xa) {
743 case 8: /* src == 0 -> no one bit found */
746 case 2: /* src != 0 -> one bit found */
752 account_inline_branch(s
, old_cc_op
);
758 case 8 | 2: /* vr == 0 */
761 case 4 | 1: /* vr != 0 */
764 case 8 | 4: /* no carry -> vr >= src */
767 case 2 | 1: /* carry -> vr < src */
773 account_inline_branch(s
, old_cc_op
);
778 /* Note that CC=0 is impossible; treat it as dont-care. */
780 case 2: /* zero -> op1 == op2 */
783 case 4 | 1: /* !zero -> op1 != op2 */
786 case 4: /* borrow (!carry) -> op1 < op2 */
789 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
795 account_inline_branch(s
, old_cc_op
);
800 /* Calculate cc value. */
805 /* Jump based on CC. We'll load up the real cond below;
806 the assignment here merely avoids a compiler warning. */
807 account_noninline_branch(s
, old_cc_op
);
808 old_cc_op
= CC_OP_STATIC
;
809 cond
= TCG_COND_NEVER
;
813 /* Load up the arguments of the comparison. */
815 c
->g1
= c
->g2
= false;
819 c
->u
.s32
.a
= tcg_temp_new_i32();
820 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
821 c
->u
.s32
.b
= tcg_const_i32(0);
824 case CC_OP_LTUGTU_32
:
827 c
->u
.s32
.a
= tcg_temp_new_i32();
828 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
829 c
->u
.s32
.b
= tcg_temp_new_i32();
830 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
837 c
->u
.s64
.b
= tcg_const_i64(0);
841 case CC_OP_LTUGTU_64
:
845 c
->g1
= c
->g2
= true;
851 c
->u
.s64
.a
= tcg_temp_new_i64();
852 c
->u
.s64
.b
= tcg_const_i64(0);
853 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
858 c
->u
.s32
.a
= tcg_temp_new_i32();
859 c
->u
.s32
.b
= tcg_temp_new_i32();
860 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
861 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
862 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
864 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
871 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
872 c
->u
.s64
.b
= tcg_const_i64(0);
884 case 0x8 | 0x4 | 0x2: /* cc != 3 */
886 c
->u
.s32
.b
= tcg_const_i32(3);
888 case 0x8 | 0x4 | 0x1: /* cc != 2 */
890 c
->u
.s32
.b
= tcg_const_i32(2);
892 case 0x8 | 0x2 | 0x1: /* cc != 1 */
894 c
->u
.s32
.b
= tcg_const_i32(1);
896 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
899 c
->u
.s32
.a
= tcg_temp_new_i32();
900 c
->u
.s32
.b
= tcg_const_i32(0);
901 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
903 case 0x8 | 0x4: /* cc < 2 */
905 c
->u
.s32
.b
= tcg_const_i32(2);
907 case 0x8: /* cc == 0 */
909 c
->u
.s32
.b
= tcg_const_i32(0);
911 case 0x4 | 0x2 | 0x1: /* cc != 0 */
913 c
->u
.s32
.b
= tcg_const_i32(0);
915 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
918 c
->u
.s32
.a
= tcg_temp_new_i32();
919 c
->u
.s32
.b
= tcg_const_i32(0);
920 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
922 case 0x4: /* cc == 1 */
924 c
->u
.s32
.b
= tcg_const_i32(1);
926 case 0x2 | 0x1: /* cc > 1 */
928 c
->u
.s32
.b
= tcg_const_i32(1);
930 case 0x2: /* cc == 2 */
932 c
->u
.s32
.b
= tcg_const_i32(2);
934 case 0x1: /* cc == 3 */
936 c
->u
.s32
.b
= tcg_const_i32(3);
939 /* CC is masked by something else: (8 >> cc) & mask. */
942 c
->u
.s32
.a
= tcg_const_i32(8);
943 c
->u
.s32
.b
= tcg_const_i32(0);
944 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
945 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
956 static void free_compare(DisasCompare
*c
)
960 tcg_temp_free_i64(c
->u
.s64
.a
);
962 tcg_temp_free_i32(c
->u
.s32
.a
);
967 tcg_temp_free_i64(c
->u
.s64
.b
);
969 tcg_temp_free_i32(c
->u
.s32
.b
);
974 /* ====================================================================== */
975 /* Define the insn format enumeration. */
976 #define F0(N) FMT_##N,
977 #define F1(N, X1) F0(N)
978 #define F2(N, X1, X2) F0(N)
979 #define F3(N, X1, X2, X3) F0(N)
980 #define F4(N, X1, X2, X3, X4) F0(N)
981 #define F5(N, X1, X2, X3, X4, X5) F0(N)
984 #include "insn-format.def"
994 /* Define a structure to hold the decoded fields. We'll store each inside
995 an array indexed by an enum. In order to conserve memory, we'll arrange
996 for fields that do not exist at the same time to overlap, thus the "C"
997 for compact. For checking purposes there is an "O" for original index
998 as well that will be applied to availability bitmaps. */
1000 enum DisasFieldIndexO
{
1023 enum DisasFieldIndexC
{
1054 struct DisasFields
{
1058 unsigned presentC
:16;
1059 unsigned int presentO
;
1063 /* This is the way fields are to be accessed out of DisasFields. */
1064 #define have_field(S, F) have_field1((S), FLD_O_##F)
1065 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1067 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1069 return (f
->presentO
>> c
) & 1;
1072 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1073 enum DisasFieldIndexC c
)
1075 assert(have_field1(f
, o
));
1079 /* Describe the layout of each field in each format. */
1080 typedef struct DisasField
{
1082 unsigned int size
:8;
1083 unsigned int type
:2;
1084 unsigned int indexC
:6;
1085 enum DisasFieldIndexO indexO
:8;
1088 typedef struct DisasFormatInfo
{
1089 DisasField op
[NUM_C_FIELD
];
1092 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1093 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1094 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1095 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1096 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1097 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1098 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1099 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1100 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1101 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1102 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1103 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1104 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1105 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1107 #define F0(N) { { } },
1108 #define F1(N, X1) { { X1 } },
1109 #define F2(N, X1, X2) { { X1, X2 } },
1110 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1111 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1112 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1114 static const DisasFormatInfo format_info
[] = {
1115 #include "insn-format.def"
1133 /* Generally, we'll extract operands into this structures, operate upon
1134 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1135 of routines below for more details. */
1137 bool g_out
, g_out2
, g_in1
, g_in2
;
1138 TCGv_i64 out
, out2
, in1
, in2
;
1142 /* Instructions can place constraints on their operands, raising specification
1143 exceptions if they are violated. To make this easy to automate, each "in1",
1144 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1145 of the following, or 0. To make this easy to document, we'll put the
1146 SPEC_<name> defines next to <name>. */
1148 #define SPEC_r1_even 1
1149 #define SPEC_r2_even 2
1150 #define SPEC_r3_even 4
1151 #define SPEC_r1_f128 8
1152 #define SPEC_r2_f128 16
1154 /* Return values from translate_one, indicating the state of the TB. */
1156 /* Continue the TB. */
1158 /* We have emitted one or more goto_tb. No fixup required. */
1160 /* We are not using a goto_tb (for whatever reason), but have updated
1161 the PC (for whatever reason), so there's no need to do it again on
1164 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1165 updated the PC for the next instruction to be executed. */
1167 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1168 No following code will be executed. */
1172 typedef enum DisasFacility
{
1173 FAC_Z
, /* zarch (default) */
1174 FAC_CASS
, /* compare and swap and store */
1175 FAC_CASS2
, /* compare and swap and store 2*/
1176 FAC_DFP
, /* decimal floating point */
1177 FAC_DFPR
, /* decimal floating point rounding */
1178 FAC_DO
, /* distinct operands */
1179 FAC_EE
, /* execute extensions */
1180 FAC_EI
, /* extended immediate */
1181 FAC_FPE
, /* floating point extension */
1182 FAC_FPSSH
, /* floating point support sign handling */
1183 FAC_FPRGR
, /* FPR-GR transfer */
1184 FAC_GIE
, /* general instructions extension */
1185 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1186 FAC_HW
, /* high-word */
1187 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1188 FAC_MIE
, /* miscellaneous-instruction-extensions */
1189 FAC_LAT
, /* load-and-trap */
1190 FAC_LOC
, /* load/store on condition */
1191 FAC_LD
, /* long displacement */
1192 FAC_PC
, /* population count */
1193 FAC_SCF
, /* store clock fast */
1194 FAC_SFLE
, /* store facility list extended */
1195 FAC_ILA
, /* interlocked access facility 1 */
1201 DisasFacility fac
:8;
1206 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1207 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1208 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1209 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1210 void (*help_cout
)(DisasContext
*, DisasOps
*);
1211 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1216 /* ====================================================================== */
1217 /* Miscellaneous helpers, used by several operations. */
1219 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1220 DisasOps
*o
, int mask
)
1222 int b2
= get_field(f
, b2
);
1223 int d2
= get_field(f
, d2
);
1226 o
->in2
= tcg_const_i64(d2
& mask
);
1228 o
->in2
= get_address(s
, 0, b2
, d2
);
1229 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1233 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1235 if (dest
== s
->next_pc
) {
1236 per_branch(s
, true);
1239 if (use_goto_tb(s
, dest
)) {
1241 per_breaking_event(s
);
1243 tcg_gen_movi_i64(psw_addr
, dest
);
1244 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1245 return EXIT_GOTO_TB
;
1247 tcg_gen_movi_i64(psw_addr
, dest
);
1248 per_branch(s
, false);
1249 return EXIT_PC_UPDATED
;
1253 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1254 bool is_imm
, int imm
, TCGv_i64 cdest
)
1257 uint64_t dest
= s
->pc
+ 2 * imm
;
1260 /* Take care of the special cases first. */
1261 if (c
->cond
== TCG_COND_NEVER
) {
1266 if (dest
== s
->next_pc
) {
1267 /* Branch to next. */
1268 per_branch(s
, true);
1272 if (c
->cond
== TCG_COND_ALWAYS
) {
1273 ret
= help_goto_direct(s
, dest
);
1277 if (TCGV_IS_UNUSED_I64(cdest
)) {
1278 /* E.g. bcr %r0 -> no branch. */
1282 if (c
->cond
== TCG_COND_ALWAYS
) {
1283 tcg_gen_mov_i64(psw_addr
, cdest
);
1284 per_branch(s
, false);
1285 ret
= EXIT_PC_UPDATED
;
1290 if (use_goto_tb(s
, s
->next_pc
)) {
1291 if (is_imm
&& use_goto_tb(s
, dest
)) {
1292 /* Both exits can use goto_tb. */
1295 lab
= gen_new_label();
1297 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1299 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1302 /* Branch not taken. */
1304 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1305 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1309 per_breaking_event(s
);
1311 tcg_gen_movi_i64(psw_addr
, dest
);
1312 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1316 /* Fallthru can use goto_tb, but taken branch cannot. */
1317 /* Store taken branch destination before the brcond. This
1318 avoids having to allocate a new local temp to hold it.
1319 We'll overwrite this in the not taken case anyway. */
1321 tcg_gen_mov_i64(psw_addr
, cdest
);
1324 lab
= gen_new_label();
1326 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1328 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1331 /* Branch not taken. */
1334 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1335 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1339 tcg_gen_movi_i64(psw_addr
, dest
);
1341 per_breaking_event(s
);
1342 ret
= EXIT_PC_UPDATED
;
1345 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1346 Most commonly we're single-stepping or some other condition that
1347 disables all use of goto_tb. Just update the PC and exit. */
1349 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1351 cdest
= tcg_const_i64(dest
);
1355 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1357 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1359 TCGv_i32 t0
= tcg_temp_new_i32();
1360 TCGv_i64 t1
= tcg_temp_new_i64();
1361 TCGv_i64 z
= tcg_const_i64(0);
1362 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1363 tcg_gen_extu_i32_i64(t1
, t0
);
1364 tcg_temp_free_i32(t0
);
1365 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1366 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1367 tcg_temp_free_i64(t1
);
1368 tcg_temp_free_i64(z
);
1372 tcg_temp_free_i64(cdest
);
1374 tcg_temp_free_i64(next
);
1376 ret
= EXIT_PC_UPDATED
;
1384 /* ====================================================================== */
1385 /* The operations. These perform the bulk of the work for any insn,
1386 usually after the operands have been loaded and output initialized. */
1388 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1391 z
= tcg_const_i64(0);
1392 n
= tcg_temp_new_i64();
1393 tcg_gen_neg_i64(n
, o
->in2
);
1394 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1395 tcg_temp_free_i64(n
);
1396 tcg_temp_free_i64(z
);
1400 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1402 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1406 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1408 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1412 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1414 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1415 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1419 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1421 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1425 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1430 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1432 /* The carry flag is the msb of CC, therefore the branch mask that would
1433 create that comparison is 3. Feeding the generated comparison to
1434 setcond produces the carry flag that we desire. */
1435 disas_jcc(s
, &cmp
, 3);
1436 carry
= tcg_temp_new_i64();
1438 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1440 TCGv_i32 t
= tcg_temp_new_i32();
1441 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1442 tcg_gen_extu_i32_i64(carry
, t
);
1443 tcg_temp_free_i32(t
);
1447 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1448 tcg_temp_free_i64(carry
);
1452 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1454 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1458 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1460 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1464 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1466 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1467 return_low128(o
->out2
);
1471 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1473 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1477 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1479 int shift
= s
->insn
->data
& 0xff;
1480 int size
= s
->insn
->data
>> 8;
1481 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1484 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1485 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1486 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1488 /* Produce the CC from only the bits manipulated. */
1489 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1490 set_cc_nz_u64(s
, cc_dst
);
1494 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1496 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1497 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1498 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1499 per_branch(s
, false);
1500 return EXIT_PC_UPDATED
;
1506 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1508 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1509 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1512 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1514 int m1
= get_field(s
->fields
, m1
);
1515 bool is_imm
= have_field(s
->fields
, i2
);
1516 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1519 disas_jcc(s
, &c
, m1
);
1520 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1523 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1525 int r1
= get_field(s
->fields
, r1
);
1526 bool is_imm
= have_field(s
->fields
, i2
);
1527 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1531 c
.cond
= TCG_COND_NE
;
1536 t
= tcg_temp_new_i64();
1537 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1538 store_reg32_i64(r1
, t
);
1539 c
.u
.s32
.a
= tcg_temp_new_i32();
1540 c
.u
.s32
.b
= tcg_const_i32(0);
1541 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1542 tcg_temp_free_i64(t
);
1544 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1547 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1549 int r1
= get_field(s
->fields
, r1
);
1550 int imm
= get_field(s
->fields
, i2
);
1554 c
.cond
= TCG_COND_NE
;
1559 t
= tcg_temp_new_i64();
1560 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1561 tcg_gen_subi_i64(t
, t
, 1);
1562 store_reg32h_i64(r1
, t
);
1563 c
.u
.s32
.a
= tcg_temp_new_i32();
1564 c
.u
.s32
.b
= tcg_const_i32(0);
1565 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1566 tcg_temp_free_i64(t
);
1568 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1571 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1573 int r1
= get_field(s
->fields
, r1
);
1574 bool is_imm
= have_field(s
->fields
, i2
);
1575 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1578 c
.cond
= TCG_COND_NE
;
1583 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1584 c
.u
.s64
.a
= regs
[r1
];
1585 c
.u
.s64
.b
= tcg_const_i64(0);
1587 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1590 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1592 int r1
= get_field(s
->fields
, r1
);
1593 int r3
= get_field(s
->fields
, r3
);
1594 bool is_imm
= have_field(s
->fields
, i2
);
1595 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1599 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1604 t
= tcg_temp_new_i64();
1605 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1606 c
.u
.s32
.a
= tcg_temp_new_i32();
1607 c
.u
.s32
.b
= tcg_temp_new_i32();
1608 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1609 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1610 store_reg32_i64(r1
, t
);
1611 tcg_temp_free_i64(t
);
1613 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1616 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1618 int r1
= get_field(s
->fields
, r1
);
1619 int r3
= get_field(s
->fields
, r3
);
1620 bool is_imm
= have_field(s
->fields
, i2
);
1621 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1624 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1627 if (r1
== (r3
| 1)) {
1628 c
.u
.s64
.b
= load_reg(r3
| 1);
1631 c
.u
.s64
.b
= regs
[r3
| 1];
1635 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1636 c
.u
.s64
.a
= regs
[r1
];
1639 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1642 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1644 int imm
, m3
= get_field(s
->fields
, m3
);
1648 c
.cond
= ltgt_cond
[m3
];
1649 if (s
->insn
->data
) {
1650 c
.cond
= tcg_unsigned_cond(c
.cond
);
1652 c
.is_64
= c
.g1
= c
.g2
= true;
1656 is_imm
= have_field(s
->fields
, i4
);
1658 imm
= get_field(s
->fields
, i4
);
1661 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1662 get_field(s
->fields
, d4
));
1665 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1668 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1670 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1675 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1677 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1682 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1684 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1689 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1691 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1692 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1693 tcg_temp_free_i32(m3
);
1694 gen_set_cc_nz_f32(s
, o
->in2
);
1698 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1700 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1701 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1702 tcg_temp_free_i32(m3
);
1703 gen_set_cc_nz_f64(s
, o
->in2
);
1707 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1709 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1710 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1711 tcg_temp_free_i32(m3
);
1712 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1716 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1718 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1719 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1720 tcg_temp_free_i32(m3
);
1721 gen_set_cc_nz_f32(s
, o
->in2
);
1725 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1727 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1728 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1729 tcg_temp_free_i32(m3
);
1730 gen_set_cc_nz_f64(s
, o
->in2
);
1734 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1736 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1737 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1738 tcg_temp_free_i32(m3
);
1739 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1743 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1745 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1746 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1747 tcg_temp_free_i32(m3
);
1748 gen_set_cc_nz_f32(s
, o
->in2
);
1752 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1754 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1755 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1756 tcg_temp_free_i32(m3
);
1757 gen_set_cc_nz_f64(s
, o
->in2
);
1761 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1763 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1764 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1765 tcg_temp_free_i32(m3
);
1766 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1770 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1772 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1773 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1774 tcg_temp_free_i32(m3
);
1775 gen_set_cc_nz_f32(s
, o
->in2
);
1779 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1781 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1782 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1783 tcg_temp_free_i32(m3
);
1784 gen_set_cc_nz_f64(s
, o
->in2
);
1788 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1790 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1791 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1792 tcg_temp_free_i32(m3
);
1793 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1797 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1799 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1800 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1801 tcg_temp_free_i32(m3
);
1805 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1807 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1808 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1809 tcg_temp_free_i32(m3
);
1813 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1815 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1816 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1817 tcg_temp_free_i32(m3
);
1818 return_low128(o
->out2
);
1822 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1824 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1825 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1826 tcg_temp_free_i32(m3
);
1830 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1832 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1833 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1834 tcg_temp_free_i32(m3
);
1838 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1840 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1841 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1842 tcg_temp_free_i32(m3
);
1843 return_low128(o
->out2
);
1847 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1849 int r2
= get_field(s
->fields
, r2
);
1850 TCGv_i64 len
= tcg_temp_new_i64();
1852 potential_page_fault(s
);
1853 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1855 return_low128(o
->out
);
1857 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1858 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1859 tcg_temp_free_i64(len
);
1864 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1866 int l
= get_field(s
->fields
, l1
);
1871 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1872 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1875 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1876 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1879 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1880 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1883 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1884 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1887 potential_page_fault(s
);
1888 vl
= tcg_const_i32(l
);
1889 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1890 tcg_temp_free_i32(vl
);
1894 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1898 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1900 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1901 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1902 potential_page_fault(s
);
1903 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1904 tcg_temp_free_i32(r1
);
1905 tcg_temp_free_i32(r3
);
1910 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1912 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1913 TCGv_i32 t1
= tcg_temp_new_i32();
1914 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1915 potential_page_fault(s
);
1916 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1918 tcg_temp_free_i32(t1
);
1919 tcg_temp_free_i32(m3
);
1923 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1925 potential_page_fault(s
);
1926 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1928 return_low128(o
->in2
);
1932 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1934 TCGv_i64 t
= tcg_temp_new_i64();
1935 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1936 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1937 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1938 tcg_temp_free_i64(t
);
1942 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1944 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1945 int d2
= get_field(s
->fields
, d2
);
1946 int b2
= get_field(s
->fields
, b2
);
1947 int is_64
= s
->insn
->data
;
1948 TCGv_i64 addr
, mem
, cc
, z
;
1950 /* Note that in1 = R3 (new value) and
1951 in2 = (zero-extended) R1 (expected value). */
1953 /* Load the memory into the (temporary) output. While the PoO only talks
1954 about moving the memory to R1 on inequality, if we include equality it
1955 means that R1 is equal to the memory in all conditions. */
1956 addr
= get_address(s
, 0, b2
, d2
);
1958 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1960 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1963 /* Are the memory and expected values (un)equal? Note that this setcond
1964 produces the output CC value, thus the NE sense of the test. */
1965 cc
= tcg_temp_new_i64();
1966 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1968 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1969 Recall that we are allowed to unconditionally issue the store (and
1970 thus any possible write trap), so (re-)store the original contents
1971 of MEM in case of inequality. */
1972 z
= tcg_const_i64(0);
1973 mem
= tcg_temp_new_i64();
1974 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1976 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1978 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1980 tcg_temp_free_i64(z
);
1981 tcg_temp_free_i64(mem
);
1982 tcg_temp_free_i64(addr
);
1984 /* Store CC back to cc_op. Wait until after the store so that any
1985 exception gets the old cc_op value. */
1986 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1987 tcg_temp_free_i64(cc
);
1992 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1994 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1995 int r1
= get_field(s
->fields
, r1
);
1996 int r3
= get_field(s
->fields
, r3
);
1997 int d2
= get_field(s
->fields
, d2
);
1998 int b2
= get_field(s
->fields
, b2
);
1999 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
2001 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2003 addrh
= get_address(s
, 0, b2
, d2
);
2004 addrl
= get_address(s
, 0, b2
, d2
+ 8);
2005 outh
= tcg_temp_new_i64();
2006 outl
= tcg_temp_new_i64();
2008 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
2009 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
2011 /* Fold the double-word compare with arithmetic. */
2012 cc
= tcg_temp_new_i64();
2013 z
= tcg_temp_new_i64();
2014 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
2015 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
2016 tcg_gen_or_i64(cc
, cc
, z
);
2017 tcg_gen_movi_i64(z
, 0);
2018 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
2020 memh
= tcg_temp_new_i64();
2021 meml
= tcg_temp_new_i64();
2022 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
2023 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
2024 tcg_temp_free_i64(z
);
2026 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
2027 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
2028 tcg_temp_free_i64(memh
);
2029 tcg_temp_free_i64(meml
);
2030 tcg_temp_free_i64(addrh
);
2031 tcg_temp_free_i64(addrl
);
2033 /* Save back state now that we've passed all exceptions. */
2034 tcg_gen_mov_i64(regs
[r1
], outh
);
2035 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
2036 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2037 tcg_temp_free_i64(outh
);
2038 tcg_temp_free_i64(outl
);
2039 tcg_temp_free_i64(cc
);
2044 #ifndef CONFIG_USER_ONLY
2045 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2047 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2048 check_privileged(s
);
2049 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
2050 tcg_temp_free_i32(r1
);
2056 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2058 TCGv_i64 t1
= tcg_temp_new_i64();
2059 TCGv_i32 t2
= tcg_temp_new_i32();
2060 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2061 gen_helper_cvd(t1
, t2
);
2062 tcg_temp_free_i32(t2
);
2063 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2064 tcg_temp_free_i64(t1
);
2068 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2070 int m3
= get_field(s
->fields
, m3
);
2071 TCGLabel
*lab
= gen_new_label();
2074 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2075 if (s
->insn
->data
) {
2076 c
= tcg_unsigned_cond(c
);
2078 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2087 #ifndef CONFIG_USER_ONLY
2088 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2090 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2091 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2092 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2094 check_privileged(s
);
2098 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2100 tcg_temp_free_i32(func_code
);
2101 tcg_temp_free_i32(r3
);
2102 tcg_temp_free_i32(r1
);
2107 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2109 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2110 return_low128(o
->out
);
2114 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2116 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2117 return_low128(o
->out
);
2121 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2123 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2124 return_low128(o
->out
);
2128 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2130 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2131 return_low128(o
->out
);
2135 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2137 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2141 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2143 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2147 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2149 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2150 return_low128(o
->out2
);
2154 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2156 int r2
= get_field(s
->fields
, r2
);
2157 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2161 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2163 /* No cache information provided. */
2164 tcg_gen_movi_i64(o
->out
, -1);
2168 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2170 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2174 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2176 int r1
= get_field(s
->fields
, r1
);
2177 int r2
= get_field(s
->fields
, r2
);
2178 TCGv_i64 t
= tcg_temp_new_i64();
2180 /* Note the "subsequently" in the PoO, which implies a defined result
2181 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2182 tcg_gen_shri_i64(t
, psw_mask
, 32);
2183 store_reg32_i64(r1
, t
);
2185 store_reg32_i64(r2
, psw_mask
);
2188 tcg_temp_free_i64(t
);
2192 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2194 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2195 tb->flags, (ab)use the tb->cs_base field as the address of
2196 the template in memory, and grab 8 bits of tb->flags/cflags for
2197 the contents of the register. We would then recognize all this
2198 in gen_intermediate_code_internal, generating code for exactly
2199 one instruction. This new TB then gets executed normally.
2201 On the other hand, this seems to be mostly used for modifying
2202 MVC inside of memcpy, which needs a helper call anyway. So
2203 perhaps this doesn't bear thinking about any further. */
2210 tmp
= tcg_const_i64(s
->next_pc
);
2211 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2212 tcg_temp_free_i64(tmp
);
2217 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2219 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2220 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2221 tcg_temp_free_i32(m3
);
2225 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2227 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2228 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2229 tcg_temp_free_i32(m3
);
2233 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2235 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2236 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2237 return_low128(o
->out2
);
2238 tcg_temp_free_i32(m3
);
2242 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2244 /* We'll use the original input for cc computation, since we get to
2245 compare that against 0, which ought to be better than comparing
2246 the real output against 64. It also lets cc_dst be a convenient
2247 temporary during our computation. */
2248 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2250 /* R1 = IN ? CLZ(IN) : 64. */
2251 gen_helper_clz(o
->out
, o
->in2
);
2253 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2254 value by 64, which is undefined. But since the shift is 64 iff the
2255 input is zero, we still get the correct result after and'ing. */
2256 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2257 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2258 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2262 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2264 int m3
= get_field(s
->fields
, m3
);
2265 int pos
, len
, base
= s
->insn
->data
;
2266 TCGv_i64 tmp
= tcg_temp_new_i64();
2271 /* Effectively a 32-bit load. */
2272 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2279 /* Effectively a 16-bit load. */
2280 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2288 /* Effectively an 8-bit load. */
2289 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2294 pos
= base
+ ctz32(m3
) * 8;
2295 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2296 ccm
= ((1ull << len
) - 1) << pos
;
2300 /* This is going to be a sequence of loads and inserts. */
2301 pos
= base
+ 32 - 8;
2305 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2306 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2307 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2310 m3
= (m3
<< 1) & 0xf;
2316 tcg_gen_movi_i64(tmp
, ccm
);
2317 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2318 tcg_temp_free_i64(tmp
);
2322 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2324 int shift
= s
->insn
->data
& 0xff;
2325 int size
= s
->insn
->data
>> 8;
2326 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2330 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2335 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2337 t1
= tcg_temp_new_i64();
2338 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2339 tcg_gen_shri_i64(t1
, t1
, 36);
2340 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2342 tcg_gen_extu_i32_i64(t1
, cc_op
);
2343 tcg_gen_shli_i64(t1
, t1
, 28);
2344 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2345 tcg_temp_free_i64(t1
);
2349 #ifndef CONFIG_USER_ONLY
2350 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2352 check_privileged(s
);
2353 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2357 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2359 check_privileged(s
);
2360 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2365 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2367 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2371 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2373 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2377 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2379 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2383 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2385 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2389 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2391 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2392 return_low128(o
->out2
);
2396 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2398 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2399 return_low128(o
->out2
);
2403 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2405 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2409 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2411 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2415 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2417 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2421 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2423 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2427 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2429 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2433 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2435 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2439 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2441 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2445 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2447 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2451 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2453 TCGLabel
*lab
= gen_new_label();
2454 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2455 /* The value is stored even in case of trap. */
2456 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2462 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2464 TCGLabel
*lab
= gen_new_label();
2465 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2466 /* The value is stored even in case of trap. */
2467 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2473 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2475 TCGLabel
*lab
= gen_new_label();
2476 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2477 /* The value is stored even in case of trap. */
2478 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2484 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2486 TCGLabel
*lab
= gen_new_label();
2487 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2488 /* The value is stored even in case of trap. */
2489 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2495 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2497 TCGLabel
*lab
= gen_new_label();
2498 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2499 /* The value is stored even in case of trap. */
2500 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2506 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2510 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2513 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2517 TCGv_i32 t32
= tcg_temp_new_i32();
2520 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2523 t
= tcg_temp_new_i64();
2524 tcg_gen_extu_i32_i64(t
, t32
);
2525 tcg_temp_free_i32(t32
);
2527 z
= tcg_const_i64(0);
2528 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2529 tcg_temp_free_i64(t
);
2530 tcg_temp_free_i64(z
);
2536 #ifndef CONFIG_USER_ONLY
2537 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2539 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2540 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2541 check_privileged(s
);
2542 potential_page_fault(s
);
2543 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2544 tcg_temp_free_i32(r1
);
2545 tcg_temp_free_i32(r3
);
2549 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2551 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2552 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2553 check_privileged(s
);
2554 potential_page_fault(s
);
2555 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2556 tcg_temp_free_i32(r1
);
2557 tcg_temp_free_i32(r3
);
2560 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2562 check_privileged(s
);
2563 potential_page_fault(s
);
2564 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2569 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2573 check_privileged(s
);
2574 per_breaking_event(s
);
2576 t1
= tcg_temp_new_i64();
2577 t2
= tcg_temp_new_i64();
2578 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2579 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2580 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2581 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2582 tcg_gen_shli_i64(t1
, t1
, 32);
2583 gen_helper_load_psw(cpu_env
, t1
, t2
);
2584 tcg_temp_free_i64(t1
);
2585 tcg_temp_free_i64(t2
);
2586 return EXIT_NORETURN
;
2589 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2593 check_privileged(s
);
2594 per_breaking_event(s
);
2596 t1
= tcg_temp_new_i64();
2597 t2
= tcg_temp_new_i64();
2598 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2599 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2600 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2601 gen_helper_load_psw(cpu_env
, t1
, t2
);
2602 tcg_temp_free_i64(t1
);
2603 tcg_temp_free_i64(t2
);
2604 return EXIT_NORETURN
;
2608 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2610 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2611 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2612 potential_page_fault(s
);
2613 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2614 tcg_temp_free_i32(r1
);
2615 tcg_temp_free_i32(r3
);
2619 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2621 int r1
= get_field(s
->fields
, r1
);
2622 int r3
= get_field(s
->fields
, r3
);
2625 /* Only one register to read. */
2626 t1
= tcg_temp_new_i64();
2627 if (unlikely(r1
== r3
)) {
2628 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2629 store_reg32_i64(r1
, t1
);
2634 /* First load the values of the first and last registers to trigger
2635 possible page faults. */
2636 t2
= tcg_temp_new_i64();
2637 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2638 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2639 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2640 store_reg32_i64(r1
, t1
);
2641 store_reg32_i64(r3
, t2
);
2643 /* Only two registers to read. */
2644 if (((r1
+ 1) & 15) == r3
) {
2650 /* Then load the remaining registers. Page fault can't occur. */
2652 tcg_gen_movi_i64(t2
, 4);
2655 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2656 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2657 store_reg32_i64(r1
, t1
);
2665 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2667 int r1
= get_field(s
->fields
, r1
);
2668 int r3
= get_field(s
->fields
, r3
);
2671 /* Only one register to read. */
2672 t1
= tcg_temp_new_i64();
2673 if (unlikely(r1
== r3
)) {
2674 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2675 store_reg32h_i64(r1
, t1
);
2680 /* First load the values of the first and last registers to trigger
2681 possible page faults. */
2682 t2
= tcg_temp_new_i64();
2683 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2684 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2685 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2686 store_reg32h_i64(r1
, t1
);
2687 store_reg32h_i64(r3
, t2
);
2689 /* Only two registers to read. */
2690 if (((r1
+ 1) & 15) == r3
) {
2696 /* Then load the remaining registers. Page fault can't occur. */
2698 tcg_gen_movi_i64(t2
, 4);
2701 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2702 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2703 store_reg32h_i64(r1
, t1
);
2711 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2713 int r1
= get_field(s
->fields
, r1
);
2714 int r3
= get_field(s
->fields
, r3
);
2717 /* Only one register to read. */
2718 if (unlikely(r1
== r3
)) {
2719 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2723 /* First load the values of the first and last registers to trigger
2724 possible page faults. */
2725 t1
= tcg_temp_new_i64();
2726 t2
= tcg_temp_new_i64();
2727 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2728 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2729 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2730 tcg_gen_mov_i64(regs
[r1
], t1
);
2733 /* Only two registers to read. */
2734 if (((r1
+ 1) & 15) == r3
) {
2739 /* Then load the remaining registers. Page fault can't occur. */
2741 tcg_gen_movi_i64(t1
, 8);
2744 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2745 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2752 #ifndef CONFIG_USER_ONLY
2753 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2755 check_privileged(s
);
2756 potential_page_fault(s
);
2757 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2761 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2763 check_privileged(s
);
2764 potential_page_fault(s
);
2765 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2770 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2773 o
->g_out
= o
->g_in2
;
2774 TCGV_UNUSED_I64(o
->in2
);
2779 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2781 int b2
= get_field(s
->fields
, b2
);
2782 TCGv ar1
= tcg_temp_new_i64();
2785 o
->g_out
= o
->g_in2
;
2786 TCGV_UNUSED_I64(o
->in2
);
2789 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2790 case PSW_ASC_PRIMARY
>> 32:
2791 tcg_gen_movi_i64(ar1
, 0);
2793 case PSW_ASC_ACCREG
>> 32:
2794 tcg_gen_movi_i64(ar1
, 1);
2796 case PSW_ASC_SECONDARY
>> 32:
2798 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2800 tcg_gen_movi_i64(ar1
, 0);
2803 case PSW_ASC_HOME
>> 32:
2804 tcg_gen_movi_i64(ar1
, 2);
2808 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2809 tcg_temp_free_i64(ar1
);
2814 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2818 o
->g_out
= o
->g_in1
;
2819 o
->g_out2
= o
->g_in2
;
2820 TCGV_UNUSED_I64(o
->in1
);
2821 TCGV_UNUSED_I64(o
->in2
);
2822 o
->g_in1
= o
->g_in2
= false;
2826 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2828 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2829 potential_page_fault(s
);
2830 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2831 tcg_temp_free_i32(l
);
2835 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2837 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2838 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2839 potential_page_fault(s
);
2840 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2841 tcg_temp_free_i32(r1
);
2842 tcg_temp_free_i32(r2
);
2847 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2849 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2850 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2851 potential_page_fault(s
);
2852 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2853 tcg_temp_free_i32(r1
);
2854 tcg_temp_free_i32(r3
);
2859 #ifndef CONFIG_USER_ONLY
2860 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2862 int r1
= get_field(s
->fields
, l1
);
2863 check_privileged(s
);
2864 potential_page_fault(s
);
2865 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2870 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2872 int r1
= get_field(s
->fields
, l1
);
2873 check_privileged(s
);
2874 potential_page_fault(s
);
2875 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2881 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2883 potential_page_fault(s
);
2884 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2889 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2891 potential_page_fault(s
);
2892 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2894 return_low128(o
->in2
);
2898 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2900 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2904 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2906 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2910 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2912 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2916 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2918 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2922 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2924 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2928 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2930 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2931 return_low128(o
->out2
);
2935 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2937 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2938 return_low128(o
->out2
);
2942 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2944 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2945 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2946 tcg_temp_free_i64(r3
);
2950 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2952 int r3
= get_field(s
->fields
, r3
);
2953 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2957 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2959 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2960 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2961 tcg_temp_free_i64(r3
);
2965 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2967 int r3
= get_field(s
->fields
, r3
);
2968 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2972 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2975 z
= tcg_const_i64(0);
2976 n
= tcg_temp_new_i64();
2977 tcg_gen_neg_i64(n
, o
->in2
);
2978 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
2979 tcg_temp_free_i64(n
);
2980 tcg_temp_free_i64(z
);
2984 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2986 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2990 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2992 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2996 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2998 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2999 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3003 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3005 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3006 potential_page_fault(s
);
3007 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3008 tcg_temp_free_i32(l
);
3013 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3015 tcg_gen_neg_i64(o
->out
, o
->in2
);
3019 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3021 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3025 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3027 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3031 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3033 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3034 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3038 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3040 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3041 potential_page_fault(s
);
3042 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3043 tcg_temp_free_i32(l
);
3048 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3050 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3054 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3056 int shift
= s
->insn
->data
& 0xff;
3057 int size
= s
->insn
->data
>> 8;
3058 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3061 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3062 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3064 /* Produce the CC from only the bits manipulated. */
3065 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3066 set_cc_nz_u64(s
, cc_dst
);
3070 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3072 gen_helper_popcnt(o
->out
, o
->in2
);
3076 #ifndef CONFIG_USER_ONLY
3077 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3079 check_privileged(s
);
3080 gen_helper_ptlb(cpu_env
);
3085 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3087 int i3
= get_field(s
->fields
, i3
);
3088 int i4
= get_field(s
->fields
, i4
);
3089 int i5
= get_field(s
->fields
, i5
);
3090 int do_zero
= i4
& 0x80;
3091 uint64_t mask
, imask
, pmask
;
3094 /* Adjust the arguments for the specific insn. */
3095 switch (s
->fields
->op2
) {
3096 case 0x55: /* risbg */
3101 case 0x5d: /* risbhg */
3104 pmask
= 0xffffffff00000000ull
;
3106 case 0x51: /* risblg */
3109 pmask
= 0x00000000ffffffffull
;
3115 /* MASK is the set of bits to be inserted from R2.
3116 Take care for I3/I4 wraparound. */
3119 mask
^= pmask
>> i4
>> 1;
3121 mask
|= ~(pmask
>> i4
>> 1);
3125 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3126 insns, we need to keep the other half of the register. */
3127 imask
= ~mask
| ~pmask
;
3129 if (s
->fields
->op2
== 0x55) {
3136 /* In some cases we can implement this with deposit, which can be more
3137 efficient on some hosts. */
3138 if (~mask
== imask
&& i3
<= i4
) {
3139 if (s
->fields
->op2
== 0x5d) {
3142 /* Note that we rotate the bits to be inserted to the lsb, not to
3143 the position as described in the PoO. */
3146 rot
= (i5
- pos
) & 63;
3152 /* Rotate the input as necessary. */
3153 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3155 /* Insert the selected bits into the output. */
3157 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3158 } else if (imask
== 0) {
3159 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3161 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3162 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3163 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3168 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3170 int i3
= get_field(s
->fields
, i3
);
3171 int i4
= get_field(s
->fields
, i4
);
3172 int i5
= get_field(s
->fields
, i5
);
3175 /* If this is a test-only form, arrange to discard the result. */
3177 o
->out
= tcg_temp_new_i64();
3185 /* MASK is the set of bits to be operated on from R2.
3186 Take care for I3/I4 wraparound. */
3189 mask
^= ~0ull >> i4
>> 1;
3191 mask
|= ~(~0ull >> i4
>> 1);
3194 /* Rotate the input as necessary. */
3195 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3198 switch (s
->fields
->op2
) {
3199 case 0x55: /* AND */
3200 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3201 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3204 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3205 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3207 case 0x57: /* XOR */
3208 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3209 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3216 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3217 set_cc_nz_u64(s
, cc_dst
);
3221 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3223 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3227 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3229 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3233 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3235 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3239 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3241 TCGv_i32 t1
= tcg_temp_new_i32();
3242 TCGv_i32 t2
= tcg_temp_new_i32();
3243 TCGv_i32 to
= tcg_temp_new_i32();
3244 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3245 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3246 tcg_gen_rotl_i32(to
, t1
, t2
);
3247 tcg_gen_extu_i32_i64(o
->out
, to
);
3248 tcg_temp_free_i32(t1
);
3249 tcg_temp_free_i32(t2
);
3250 tcg_temp_free_i32(to
);
3254 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3256 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3260 #ifndef CONFIG_USER_ONLY
3261 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3263 check_privileged(s
);
3264 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3269 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3271 check_privileged(s
);
3272 gen_helper_sacf(cpu_env
, o
->in2
);
3273 /* Addressing mode has changed, so end the block. */
3274 return EXIT_PC_STALE
;
3278 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3280 int sam
= s
->insn
->data
;
3296 /* Bizarre but true, we check the address of the current insn for the
3297 specification exception, not the next to be executed. Thus the PoO
3298 documents that Bad Things Happen two bytes before the end. */
3299 if (s
->pc
& ~mask
) {
3300 gen_program_exception(s
, PGM_SPECIFICATION
);
3301 return EXIT_NORETURN
;
3305 tsam
= tcg_const_i64(sam
);
3306 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3307 tcg_temp_free_i64(tsam
);
3309 /* Always exit the TB, since we (may have) changed execution mode. */
3310 return EXIT_PC_STALE
;
3313 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3315 int r1
= get_field(s
->fields
, r1
);
3316 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3320 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3322 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3326 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3328 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3332 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3334 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3335 return_low128(o
->out2
);
3339 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3341 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3345 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3347 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3351 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3353 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3354 return_low128(o
->out2
);
3358 #ifndef CONFIG_USER_ONLY
3359 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3361 check_privileged(s
);
3362 potential_page_fault(s
);
3363 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3368 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3370 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3371 check_privileged(s
);
3372 potential_page_fault(s
);
3373 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3374 tcg_temp_free_i32(r1
);
3379 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3386 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3388 /* We want to store when the condition is fulfilled, so branch
3389 out when it's not */
3390 c
.cond
= tcg_invert_cond(c
.cond
);
3392 lab
= gen_new_label();
3394 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3396 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3400 r1
= get_field(s
->fields
, r1
);
3401 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3402 if (s
->insn
->data
) {
3403 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3405 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3407 tcg_temp_free_i64(a
);
3413 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3415 uint64_t sign
= 1ull << s
->insn
->data
;
3416 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3417 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3418 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3419 /* The arithmetic left shift is curious in that it does not affect
3420 the sign bit. Copy that over from the source unchanged. */
3421 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3422 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3423 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3427 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3429 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3433 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3435 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3439 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3441 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3445 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3447 gen_helper_sfpc(cpu_env
, o
->in2
);
3451 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3453 gen_helper_sfas(cpu_env
, o
->in2
);
3457 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3459 int b2
= get_field(s
->fields
, b2
);
3460 int d2
= get_field(s
->fields
, d2
);
3461 TCGv_i64 t1
= tcg_temp_new_i64();
3462 TCGv_i64 t2
= tcg_temp_new_i64();
3465 switch (s
->fields
->op2
) {
3466 case 0x99: /* SRNM */
3469 case 0xb8: /* SRNMB */
3472 case 0xb9: /* SRNMT */
3478 mask
= (1 << len
) - 1;
3480 /* Insert the value into the appropriate field of the FPC. */
3482 tcg_gen_movi_i64(t1
, d2
& mask
);
3484 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3485 tcg_gen_andi_i64(t1
, t1
, mask
);
3487 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3488 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3489 tcg_temp_free_i64(t1
);
3491 /* Then install the new FPC to set the rounding mode in fpu_status. */
3492 gen_helper_sfpc(cpu_env
, t2
);
3493 tcg_temp_free_i64(t2
);
3497 #ifndef CONFIG_USER_ONLY
3498 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3500 check_privileged(s
);
3501 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3502 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3506 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3508 check_privileged(s
);
3509 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3513 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3515 check_privileged(s
);
3516 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3520 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3522 check_privileged(s
);
3523 /* ??? Surely cpu address != cpu number. In any case the previous
3524 version of this stored more than the required half-word, so it
3525 is unlikely this has ever been tested. */
3526 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3530 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3532 gen_helper_stck(o
->out
, cpu_env
);
3533 /* ??? We don't implement clock states. */
3534 gen_op_movi_cc(s
, 0);
3538 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3540 TCGv_i64 c1
= tcg_temp_new_i64();
3541 TCGv_i64 c2
= tcg_temp_new_i64();
3542 gen_helper_stck(c1
, cpu_env
);
3543 /* Shift the 64-bit value into its place as a zero-extended
3544 104-bit value. Note that "bit positions 64-103 are always
3545 non-zero so that they compare differently to STCK"; we set
3546 the least significant bit to 1. */
3547 tcg_gen_shli_i64(c2
, c1
, 56);
3548 tcg_gen_shri_i64(c1
, c1
, 8);
3549 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3550 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3551 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3552 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3553 tcg_temp_free_i64(c1
);
3554 tcg_temp_free_i64(c2
);
3555 /* ??? We don't implement clock states. */
3556 gen_op_movi_cc(s
, 0);
3560 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3562 check_privileged(s
);
3563 gen_helper_sckc(cpu_env
, o
->in2
);
3567 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3569 check_privileged(s
);
3570 gen_helper_stckc(o
->out
, cpu_env
);
3574 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3576 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3577 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3578 check_privileged(s
);
3579 potential_page_fault(s
);
3580 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3581 tcg_temp_free_i32(r1
);
3582 tcg_temp_free_i32(r3
);
3586 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3588 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3589 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3590 check_privileged(s
);
3591 potential_page_fault(s
);
3592 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3593 tcg_temp_free_i32(r1
);
3594 tcg_temp_free_i32(r3
);
3598 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3600 TCGv_i64 t1
= tcg_temp_new_i64();
3602 check_privileged(s
);
3603 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3604 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3605 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3606 tcg_temp_free_i64(t1
);
3611 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3613 check_privileged(s
);
3614 gen_helper_spt(cpu_env
, o
->in2
);
3618 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3621 /* We really ought to have more complete indication of facilities
3622 that we implement. Address this when STFLE is implemented. */
3623 check_privileged(s
);
3624 f
= tcg_const_i64(0xc0000000);
3625 a
= tcg_const_i64(200);
3626 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3627 tcg_temp_free_i64(f
);
3628 tcg_temp_free_i64(a
);
3632 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3634 check_privileged(s
);
3635 gen_helper_stpt(o
->out
, cpu_env
);
3639 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3641 check_privileged(s
);
3642 potential_page_fault(s
);
3643 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3648 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3650 check_privileged(s
);
3651 gen_helper_spx(cpu_env
, o
->in2
);
3655 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3657 check_privileged(s
);
3658 potential_page_fault(s
);
3659 gen_helper_xsch(cpu_env
, regs
[1]);
3664 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3666 check_privileged(s
);
3667 potential_page_fault(s
);
3668 gen_helper_csch(cpu_env
, regs
[1]);
3673 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3675 check_privileged(s
);
3676 potential_page_fault(s
);
3677 gen_helper_hsch(cpu_env
, regs
[1]);
3682 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3684 check_privileged(s
);
3685 potential_page_fault(s
);
3686 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3691 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3693 check_privileged(s
);
3694 potential_page_fault(s
);
3695 gen_helper_rchp(cpu_env
, regs
[1]);
3700 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3702 check_privileged(s
);
3703 potential_page_fault(s
);
3704 gen_helper_rsch(cpu_env
, regs
[1]);
3709 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3711 check_privileged(s
);
3712 potential_page_fault(s
);
3713 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
3718 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
3720 check_privileged(s
);
3721 potential_page_fault(s
);
3722 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
3727 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
3729 check_privileged(s
);
3730 potential_page_fault(s
);
3731 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
3736 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
3738 check_privileged(s
);
3739 potential_page_fault(s
);
3740 gen_helper_chsc(cpu_env
, o
->in2
);
3745 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3747 check_privileged(s
);
3748 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3749 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3753 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3755 uint64_t i2
= get_field(s
->fields
, i2
);
3758 check_privileged(s
);
3760 /* It is important to do what the instruction name says: STORE THEN.
3761 If we let the output hook perform the store then if we fault and
3762 restart, we'll have the wrong SYSTEM MASK in place. */
3763 t
= tcg_temp_new_i64();
3764 tcg_gen_shri_i64(t
, psw_mask
, 56);
3765 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3766 tcg_temp_free_i64(t
);
3768 if (s
->fields
->op
== 0xac) {
3769 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3770 (i2
<< 56) | 0x00ffffffffffffffull
);
3772 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3777 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3779 check_privileged(s
);
3780 potential_page_fault(s
);
3781 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3785 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3787 check_privileged(s
);
3788 potential_page_fault(s
);
3789 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3794 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3796 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3800 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3802 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3806 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3808 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3812 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3814 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3818 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3820 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3821 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3822 potential_page_fault(s
);
3823 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3824 tcg_temp_free_i32(r1
);
3825 tcg_temp_free_i32(r3
);
3829 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3831 int m3
= get_field(s
->fields
, m3
);
3832 int pos
, base
= s
->insn
->data
;
3833 TCGv_i64 tmp
= tcg_temp_new_i64();
3835 pos
= base
+ ctz32(m3
) * 8;
3838 /* Effectively a 32-bit store. */
3839 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3840 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3846 /* Effectively a 16-bit store. */
3847 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3848 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3855 /* Effectively an 8-bit store. */
3856 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3857 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3861 /* This is going to be a sequence of shifts and stores. */
3862 pos
= base
+ 32 - 8;
3865 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3866 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3867 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3869 m3
= (m3
<< 1) & 0xf;
3874 tcg_temp_free_i64(tmp
);
3878 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3880 int r1
= get_field(s
->fields
, r1
);
3881 int r3
= get_field(s
->fields
, r3
);
3882 int size
= s
->insn
->data
;
3883 TCGv_i64 tsize
= tcg_const_i64(size
);
3887 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3889 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3894 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3898 tcg_temp_free_i64(tsize
);
3902 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3904 int r1
= get_field(s
->fields
, r1
);
3905 int r3
= get_field(s
->fields
, r3
);
3906 TCGv_i64 t
= tcg_temp_new_i64();
3907 TCGv_i64 t4
= tcg_const_i64(4);
3908 TCGv_i64 t32
= tcg_const_i64(32);
3911 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3912 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3916 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3920 tcg_temp_free_i64(t
);
3921 tcg_temp_free_i64(t4
);
3922 tcg_temp_free_i64(t32
);
3926 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3928 potential_page_fault(s
);
3929 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3931 return_low128(o
->in2
);
3935 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3937 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3941 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3946 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3948 /* The !borrow flag is the msb of CC. Since we want the inverse of
3949 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3950 disas_jcc(s
, &cmp
, 8 | 4);
3951 borrow
= tcg_temp_new_i64();
3953 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3955 TCGv_i32 t
= tcg_temp_new_i32();
3956 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3957 tcg_gen_extu_i32_i64(borrow
, t
);
3958 tcg_temp_free_i32(t
);
3962 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3963 tcg_temp_free_i64(borrow
);
3967 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3974 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3975 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3976 tcg_temp_free_i32(t
);
3978 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3979 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3980 tcg_temp_free_i32(t
);
3982 gen_exception(EXCP_SVC
);
3983 return EXIT_NORETURN
;
3986 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3988 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3993 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3995 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
4000 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4002 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
4007 #ifndef CONFIG_USER_ONLY
4008 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4010 potential_page_fault(s
);
4011 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4017 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4019 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4020 potential_page_fault(s
);
4021 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4022 tcg_temp_free_i32(l
);
4027 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4029 potential_page_fault(s
);
4030 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4031 return_low128(o
->out2
);
4036 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4038 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4039 potential_page_fault(s
);
4040 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4041 tcg_temp_free_i32(l
);
4046 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4048 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4049 potential_page_fault(s
);
4050 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4051 tcg_temp_free_i32(l
);
4055 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4057 int d1
= get_field(s
->fields
, d1
);
4058 int d2
= get_field(s
->fields
, d2
);
4059 int b1
= get_field(s
->fields
, b1
);
4060 int b2
= get_field(s
->fields
, b2
);
4061 int l
= get_field(s
->fields
, l1
);
4064 o
->addr1
= get_address(s
, 0, b1
, d1
);
4066 /* If the addresses are identical, this is a store/memset of zero. */
4067 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4068 o
->in2
= tcg_const_i64(0);
4072 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4075 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4079 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4082 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4086 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4089 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4093 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4095 gen_op_movi_cc(s
, 0);
4099 /* But in general we'll defer to a helper. */
4100 o
->in2
= get_address(s
, 0, b2
, d2
);
4101 t32
= tcg_const_i32(l
);
4102 potential_page_fault(s
);
4103 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4104 tcg_temp_free_i32(t32
);
4109 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4111 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4115 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4117 int shift
= s
->insn
->data
& 0xff;
4118 int size
= s
->insn
->data
>> 8;
4119 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4122 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4123 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4125 /* Produce the CC from only the bits manipulated. */
4126 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4127 set_cc_nz_u64(s
, cc_dst
);
4131 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4133 o
->out
= tcg_const_i64(0);
4137 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4139 o
->out
= tcg_const_i64(0);
4145 /* ====================================================================== */
4146 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4147 the original inputs), update the various cc data structures in order to
4148 be able to compute the new condition code. */
4150 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4152 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4155 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4157 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4160 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4162 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4165 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4167 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4170 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4172 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4175 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4177 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4180 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4182 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4185 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4187 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4190 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4192 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4195 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4197 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4200 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4202 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4205 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4207 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4210 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4212 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4215 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4217 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4220 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4222 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4225 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4227 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4230 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4232 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4235 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4237 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4240 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4242 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4245 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4247 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4248 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4251 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4253 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4256 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4258 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4261 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4263 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4266 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4268 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4271 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4273 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4276 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4278 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4281 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4283 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4286 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4288 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4291 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4293 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4296 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4298 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4301 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4303 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4306 /* ====================================================================== */
4307 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4308 with the TCG register to which we will write. Used in combination with
4309 the "wout" generators, in some cases we need a new temporary, and in
4310 some cases we can write to a TCG global. */
4312 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4314 o
->out
= tcg_temp_new_i64();
4316 #define SPEC_prep_new 0
4318 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4320 o
->out
= tcg_temp_new_i64();
4321 o
->out2
= tcg_temp_new_i64();
4323 #define SPEC_prep_new_P 0
4325 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4327 o
->out
= regs
[get_field(f
, r1
)];
4330 #define SPEC_prep_r1 0
4332 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4334 int r1
= get_field(f
, r1
);
4336 o
->out2
= regs
[r1
+ 1];
4337 o
->g_out
= o
->g_out2
= true;
4339 #define SPEC_prep_r1_P SPEC_r1_even
4341 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4343 o
->out
= fregs
[get_field(f
, r1
)];
4346 #define SPEC_prep_f1 0
4348 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4350 int r1
= get_field(f
, r1
);
4352 o
->out2
= fregs
[r1
+ 2];
4353 o
->g_out
= o
->g_out2
= true;
4355 #define SPEC_prep_x1 SPEC_r1_f128
4357 /* ====================================================================== */
4358 /* The "Write OUTput" generators. These generally perform some non-trivial
4359 copy of data to TCG globals, or to main memory. The trivial cases are
4360 generally handled by having a "prep" generator install the TCG global
4361 as the destination of the operation. */
4363 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4365 store_reg(get_field(f
, r1
), o
->out
);
4367 #define SPEC_wout_r1 0
4369 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4371 int r1
= get_field(f
, r1
);
4372 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4374 #define SPEC_wout_r1_8 0
4376 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4378 int r1
= get_field(f
, r1
);
4379 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4381 #define SPEC_wout_r1_16 0
4383 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4385 store_reg32_i64(get_field(f
, r1
), o
->out
);
4387 #define SPEC_wout_r1_32 0
4389 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4391 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4393 #define SPEC_wout_r1_32h 0
4395 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4397 int r1
= get_field(f
, r1
);
4398 store_reg32_i64(r1
, o
->out
);
4399 store_reg32_i64(r1
+ 1, o
->out2
);
4401 #define SPEC_wout_r1_P32 SPEC_r1_even
4403 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4405 int r1
= get_field(f
, r1
);
4406 store_reg32_i64(r1
+ 1, o
->out
);
4407 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4408 store_reg32_i64(r1
, o
->out
);
4410 #define SPEC_wout_r1_D32 SPEC_r1_even
4412 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4414 store_freg32_i64(get_field(f
, r1
), o
->out
);
4416 #define SPEC_wout_e1 0
4418 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4420 store_freg(get_field(f
, r1
), o
->out
);
4422 #define SPEC_wout_f1 0
4424 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4426 int f1
= get_field(s
->fields
, r1
);
4427 store_freg(f1
, o
->out
);
4428 store_freg(f1
+ 2, o
->out2
);
4430 #define SPEC_wout_x1 SPEC_r1_f128
4432 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4434 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4435 store_reg32_i64(get_field(f
, r1
), o
->out
);
4438 #define SPEC_wout_cond_r1r2_32 0
4440 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4442 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4443 store_freg32_i64(get_field(f
, r1
), o
->out
);
4446 #define SPEC_wout_cond_e1e2 0
4448 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4450 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4452 #define SPEC_wout_m1_8 0
4454 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4456 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4458 #define SPEC_wout_m1_16 0
4460 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4462 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4464 #define SPEC_wout_m1_32 0
4466 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4468 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4470 #define SPEC_wout_m1_64 0
4472 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4474 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4476 #define SPEC_wout_m2_32 0
4478 static void wout_m2_32_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4480 /* XXX release reservation */
4481 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4482 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4484 #define SPEC_wout_m2_32_r1_atomic 0
4486 static void wout_m2_64_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4488 /* XXX release reservation */
4489 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4490 store_reg(get_field(f
, r1
), o
->in2
);
4492 #define SPEC_wout_m2_64_r1_atomic 0
4494 /* ====================================================================== */
4495 /* The "INput 1" generators. These load the first operand to an insn. */
4497 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4499 o
->in1
= load_reg(get_field(f
, r1
));
4501 #define SPEC_in1_r1 0
4503 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4505 o
->in1
= regs
[get_field(f
, r1
)];
4508 #define SPEC_in1_r1_o 0
4510 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4512 o
->in1
= tcg_temp_new_i64();
4513 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4515 #define SPEC_in1_r1_32s 0
4517 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4519 o
->in1
= tcg_temp_new_i64();
4520 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4522 #define SPEC_in1_r1_32u 0
4524 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4526 o
->in1
= tcg_temp_new_i64();
4527 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4529 #define SPEC_in1_r1_sr32 0
4531 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4533 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4535 #define SPEC_in1_r1p1 SPEC_r1_even
4537 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4539 o
->in1
= tcg_temp_new_i64();
4540 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4542 #define SPEC_in1_r1p1_32s SPEC_r1_even
4544 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4546 o
->in1
= tcg_temp_new_i64();
4547 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4549 #define SPEC_in1_r1p1_32u SPEC_r1_even
4551 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4553 int r1
= get_field(f
, r1
);
4554 o
->in1
= tcg_temp_new_i64();
4555 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4557 #define SPEC_in1_r1_D32 SPEC_r1_even
4559 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4561 o
->in1
= load_reg(get_field(f
, r2
));
4563 #define SPEC_in1_r2 0
4565 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4567 o
->in1
= tcg_temp_new_i64();
4568 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
4570 #define SPEC_in1_r2_sr32 0
4572 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4574 o
->in1
= load_reg(get_field(f
, r3
));
4576 #define SPEC_in1_r3 0
4578 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4580 o
->in1
= regs
[get_field(f
, r3
)];
4583 #define SPEC_in1_r3_o 0
4585 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4587 o
->in1
= tcg_temp_new_i64();
4588 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4590 #define SPEC_in1_r3_32s 0
4592 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4594 o
->in1
= tcg_temp_new_i64();
4595 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4597 #define SPEC_in1_r3_32u 0
4599 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4601 int r3
= get_field(f
, r3
);
4602 o
->in1
= tcg_temp_new_i64();
4603 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4605 #define SPEC_in1_r3_D32 SPEC_r3_even
4607 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4609 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4611 #define SPEC_in1_e1 0
4613 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4615 o
->in1
= fregs
[get_field(f
, r1
)];
4618 #define SPEC_in1_f1_o 0
4620 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4622 int r1
= get_field(f
, r1
);
4624 o
->out2
= fregs
[r1
+ 2];
4625 o
->g_out
= o
->g_out2
= true;
4627 #define SPEC_in1_x1_o SPEC_r1_f128
4629 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4631 o
->in1
= fregs
[get_field(f
, r3
)];
4634 #define SPEC_in1_f3_o 0
4636 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4638 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4640 #define SPEC_in1_la1 0
4642 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4644 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4645 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4647 #define SPEC_in1_la2 0
4649 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4652 o
->in1
= tcg_temp_new_i64();
4653 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4655 #define SPEC_in1_m1_8u 0
4657 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4660 o
->in1
= tcg_temp_new_i64();
4661 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4663 #define SPEC_in1_m1_16s 0
4665 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4668 o
->in1
= tcg_temp_new_i64();
4669 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4671 #define SPEC_in1_m1_16u 0
4673 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4676 o
->in1
= tcg_temp_new_i64();
4677 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4679 #define SPEC_in1_m1_32s 0
4681 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4684 o
->in1
= tcg_temp_new_i64();
4685 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4687 #define SPEC_in1_m1_32u 0
4689 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4692 o
->in1
= tcg_temp_new_i64();
4693 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4695 #define SPEC_in1_m1_64 0
4697 /* ====================================================================== */
4698 /* The "INput 2" generators. These load the second operand to an insn. */
4700 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4702 o
->in2
= regs
[get_field(f
, r1
)];
4705 #define SPEC_in2_r1_o 0
4707 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4709 o
->in2
= tcg_temp_new_i64();
4710 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4712 #define SPEC_in2_r1_16u 0
4714 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4716 o
->in2
= tcg_temp_new_i64();
4717 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4719 #define SPEC_in2_r1_32u 0
4721 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4723 int r1
= get_field(f
, r1
);
4724 o
->in2
= tcg_temp_new_i64();
4725 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4727 #define SPEC_in2_r1_D32 SPEC_r1_even
4729 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4731 o
->in2
= load_reg(get_field(f
, r2
));
4733 #define SPEC_in2_r2 0
4735 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4737 o
->in2
= regs
[get_field(f
, r2
)];
4740 #define SPEC_in2_r2_o 0
4742 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4744 int r2
= get_field(f
, r2
);
4746 o
->in2
= load_reg(r2
);
4749 #define SPEC_in2_r2_nz 0
4751 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4753 o
->in2
= tcg_temp_new_i64();
4754 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4756 #define SPEC_in2_r2_8s 0
4758 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4760 o
->in2
= tcg_temp_new_i64();
4761 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4763 #define SPEC_in2_r2_8u 0
4765 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4767 o
->in2
= tcg_temp_new_i64();
4768 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4770 #define SPEC_in2_r2_16s 0
4772 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4774 o
->in2
= tcg_temp_new_i64();
4775 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4777 #define SPEC_in2_r2_16u 0
4779 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4781 o
->in2
= load_reg(get_field(f
, r3
));
4783 #define SPEC_in2_r3 0
4785 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4787 o
->in2
= tcg_temp_new_i64();
4788 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
4790 #define SPEC_in2_r3_sr32 0
4792 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4794 o
->in2
= tcg_temp_new_i64();
4795 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4797 #define SPEC_in2_r2_32s 0
4799 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4801 o
->in2
= tcg_temp_new_i64();
4802 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4804 #define SPEC_in2_r2_32u 0
4806 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4808 o
->in2
= tcg_temp_new_i64();
4809 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
4811 #define SPEC_in2_r2_sr32 0
4813 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4815 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4817 #define SPEC_in2_e2 0
4819 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4821 o
->in2
= fregs
[get_field(f
, r2
)];
4824 #define SPEC_in2_f2_o 0
4826 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4828 int r2
= get_field(f
, r2
);
4830 o
->in2
= fregs
[r2
+ 2];
4831 o
->g_in1
= o
->g_in2
= true;
4833 #define SPEC_in2_x2_o SPEC_r2_f128
4835 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4837 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4839 #define SPEC_in2_ra2 0
4841 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4843 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4844 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4846 #define SPEC_in2_a2 0
4848 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4850 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4852 #define SPEC_in2_ri2 0
4854 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4856 help_l2_shift(s
, f
, o
, 31);
4858 #define SPEC_in2_sh32 0
4860 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4862 help_l2_shift(s
, f
, o
, 63);
4864 #define SPEC_in2_sh64 0
4866 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4869 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4871 #define SPEC_in2_m2_8u 0
4873 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4876 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4878 #define SPEC_in2_m2_16s 0
4880 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4883 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4885 #define SPEC_in2_m2_16u 0
4887 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4890 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4892 #define SPEC_in2_m2_32s 0
4894 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4897 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4899 #define SPEC_in2_m2_32u 0
4901 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4904 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4906 #define SPEC_in2_m2_64 0
4908 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4911 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4913 #define SPEC_in2_mri2_16u 0
4915 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4918 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4920 #define SPEC_in2_mri2_32s 0
4922 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4925 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4927 #define SPEC_in2_mri2_32u 0
4929 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4932 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4934 #define SPEC_in2_mri2_64 0
4936 static void in2_m2_32s_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4938 /* XXX should reserve the address */
4940 o
->in2
= tcg_temp_new_i64();
4941 tcg_gen_qemu_ld32s(o
->in2
, o
->addr1
, get_mem_index(s
));
4943 #define SPEC_in2_m2_32s_atomic 0
4945 static void in2_m2_64_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4947 /* XXX should reserve the address */
4949 o
->in2
= tcg_temp_new_i64();
4950 tcg_gen_qemu_ld64(o
->in2
, o
->addr1
, get_mem_index(s
));
4952 #define SPEC_in2_m2_64_atomic 0
4954 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4956 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4958 #define SPEC_in2_i2 0
4960 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4962 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4964 #define SPEC_in2_i2_8u 0
4966 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4968 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4970 #define SPEC_in2_i2_16u 0
4972 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4974 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4976 #define SPEC_in2_i2_32u 0
4978 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4980 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4981 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4983 #define SPEC_in2_i2_16u_shl 0
4985 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4987 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4988 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4990 #define SPEC_in2_i2_32u_shl 0
4992 #ifndef CONFIG_USER_ONLY
4993 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4995 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
4997 #define SPEC_in2_insn 0
5000 /* ====================================================================== */
5002 /* Find opc within the table of insns. This is formulated as a switch
5003 statement so that (1) we get compile-time notice of cut-paste errors
5004 for duplicated opcodes, and (2) the compiler generates the binary
5005 search tree, rather than us having to post-process the table. */
5007 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5008 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5010 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5012 enum DisasInsnEnum
{
5013 #include "insn-data.def"
5017 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5021 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5023 .help_in1 = in1_##I1, \
5024 .help_in2 = in2_##I2, \
5025 .help_prep = prep_##P, \
5026 .help_wout = wout_##W, \
5027 .help_cout = cout_##CC, \
5028 .help_op = op_##OP, \
5032 /* Allow 0 to be used for NULL in the table below. */
5040 #define SPEC_in1_0 0
5041 #define SPEC_in2_0 0
5042 #define SPEC_prep_0 0
5043 #define SPEC_wout_0 0
5045 static const DisasInsn insn_info
[] = {
5046 #include "insn-data.def"
5050 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5051 case OPC: return &insn_info[insn_ ## NM];
5053 static const DisasInsn
*lookup_opc(uint16_t opc
)
5056 #include "insn-data.def"
5065 /* Extract a field from the insn. The INSN should be left-aligned in
5066 the uint64_t so that we can more easily utilize the big-bit-endian
5067 definitions we extract from the Principals of Operation. */
5069 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5077 /* Zero extract the field from the insn. */
5078 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5080 /* Sign-extend, or un-swap the field as necessary. */
5082 case 0: /* unsigned */
5084 case 1: /* signed */
5085 assert(f
->size
<= 32);
5086 m
= 1u << (f
->size
- 1);
5089 case 2: /* dl+dh split, signed 20 bit. */
5090 r
= ((int8_t)r
<< 12) | (r
>> 8);
5096 /* Validate that the "compressed" encoding we selected above is valid.
5097 I.e. we havn't make two different original fields overlap. */
5098 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5099 o
->presentC
|= 1 << f
->indexC
;
5100 o
->presentO
|= 1 << f
->indexO
;
5102 o
->c
[f
->indexC
] = r
;
5105 /* Lookup the insn at the current PC, extracting the operands into O and
5106 returning the info struct for the insn. Returns NULL for invalid insn. */
5108 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5111 uint64_t insn
, pc
= s
->pc
;
5113 const DisasInsn
*info
;
5115 insn
= ld_code2(env
, pc
);
5116 op
= (insn
>> 8) & 0xff;
5117 ilen
= get_ilen(op
);
5118 s
->next_pc
= s
->pc
+ ilen
;
5125 insn
= ld_code4(env
, pc
) << 32;
5128 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5134 /* We can't actually determine the insn format until we've looked up
5135 the full insn opcode. Which we can't do without locating the
5136 secondary opcode. Assume by default that OP2 is at bit 40; for
5137 those smaller insns that don't actually have a secondary opcode
5138 this will correctly result in OP2 = 0. */
5144 case 0xb2: /* S, RRF, RRE */
5145 case 0xb3: /* RRE, RRD, RRF */
5146 case 0xb9: /* RRE, RRF */
5147 case 0xe5: /* SSE, SIL */
5148 op2
= (insn
<< 8) >> 56;
5152 case 0xc0: /* RIL */
5153 case 0xc2: /* RIL */
5154 case 0xc4: /* RIL */
5155 case 0xc6: /* RIL */
5156 case 0xc8: /* SSF */
5157 case 0xcc: /* RIL */
5158 op2
= (insn
<< 12) >> 60;
5160 case 0xd0 ... 0xdf: /* SS */
5166 case 0xee ... 0xf3: /* SS */
5167 case 0xf8 ... 0xfd: /* SS */
5171 op2
= (insn
<< 40) >> 56;
5175 memset(f
, 0, sizeof(*f
));
5180 /* Lookup the instruction. */
5181 info
= lookup_opc(op
<< 8 | op2
);
5183 /* If we found it, extract the operands. */
5185 DisasFormat fmt
= info
->fmt
;
5188 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5189 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5195 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5197 const DisasInsn
*insn
;
5198 ExitStatus ret
= NO_EXIT
;
5202 /* Search for the insn in the table. */
5203 insn
= extract_insn(env
, s
, &f
);
5205 /* Not found means unimplemented/illegal opcode. */
5207 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5209 gen_illegal_opcode(s
);
5210 return EXIT_NORETURN
;
5213 #ifndef CONFIG_USER_ONLY
5214 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5215 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5216 gen_helper_per_ifetch(cpu_env
, addr
);
5217 tcg_temp_free_i64(addr
);
5221 /* Check for insn specification exceptions. */
5223 int spec
= insn
->spec
, excp
= 0, r
;
5225 if (spec
& SPEC_r1_even
) {
5226 r
= get_field(&f
, r1
);
5228 excp
= PGM_SPECIFICATION
;
5231 if (spec
& SPEC_r2_even
) {
5232 r
= get_field(&f
, r2
);
5234 excp
= PGM_SPECIFICATION
;
5237 if (spec
& SPEC_r3_even
) {
5238 r
= get_field(&f
, r3
);
5240 excp
= PGM_SPECIFICATION
;
5243 if (spec
& SPEC_r1_f128
) {
5244 r
= get_field(&f
, r1
);
5246 excp
= PGM_SPECIFICATION
;
5249 if (spec
& SPEC_r2_f128
) {
5250 r
= get_field(&f
, r2
);
5252 excp
= PGM_SPECIFICATION
;
5256 gen_program_exception(s
, excp
);
5257 return EXIT_NORETURN
;
5261 /* Set up the strutures we use to communicate with the helpers. */
5264 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5265 TCGV_UNUSED_I64(o
.out
);
5266 TCGV_UNUSED_I64(o
.out2
);
5267 TCGV_UNUSED_I64(o
.in1
);
5268 TCGV_UNUSED_I64(o
.in2
);
5269 TCGV_UNUSED_I64(o
.addr1
);
5271 /* Implement the instruction. */
5272 if (insn
->help_in1
) {
5273 insn
->help_in1(s
, &f
, &o
);
5275 if (insn
->help_in2
) {
5276 insn
->help_in2(s
, &f
, &o
);
5278 if (insn
->help_prep
) {
5279 insn
->help_prep(s
, &f
, &o
);
5281 if (insn
->help_op
) {
5282 ret
= insn
->help_op(s
, &o
);
5284 if (insn
->help_wout
) {
5285 insn
->help_wout(s
, &f
, &o
);
5287 if (insn
->help_cout
) {
5288 insn
->help_cout(s
, &o
);
5291 /* Free any temporaries created by the helpers. */
5292 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5293 tcg_temp_free_i64(o
.out
);
5295 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5296 tcg_temp_free_i64(o
.out2
);
5298 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5299 tcg_temp_free_i64(o
.in1
);
5301 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5302 tcg_temp_free_i64(o
.in2
);
5304 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5305 tcg_temp_free_i64(o
.addr1
);
5308 #ifndef CONFIG_USER_ONLY
5309 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5310 /* An exception might be triggered, save PSW if not already done. */
5311 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5312 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5318 /* Call the helper to check for a possible PER exception. */
5319 gen_helper_per_check_exception(cpu_env
);
5323 /* Advance to the next instruction. */
5328 void gen_intermediate_code(CPUS390XState
*env
, struct TranslationBlock
*tb
)
5330 S390CPU
*cpu
= s390_env_get_cpu(env
);
5331 CPUState
*cs
= CPU(cpu
);
5333 target_ulong pc_start
;
5334 uint64_t next_page_start
;
5335 int num_insns
, max_insns
;
5342 if (!(tb
->flags
& FLAG_MASK_64
)) {
5343 pc_start
&= 0x7fffffff;
5348 dc
.cc_op
= CC_OP_DYNAMIC
;
5349 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5351 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5354 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5355 if (max_insns
== 0) {
5356 max_insns
= CF_COUNT_MASK
;
5358 if (max_insns
> TCG_MAX_INSNS
) {
5359 max_insns
= TCG_MAX_INSNS
;
5365 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5368 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5369 status
= EXIT_PC_STALE
;
5371 /* The address covered by the breakpoint must be included in
5372 [tb->pc, tb->pc + tb->size) in order to for it to be
5373 properly cleared -- thus we increment the PC here so that
5374 the logic setting tb->size below does the right thing. */
5379 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5384 if (status
== NO_EXIT
) {
5385 status
= translate_one(env
, &dc
);
5388 /* If we reach a page boundary, are single stepping,
5389 or exhaust instruction count, stop generation. */
5390 if (status
== NO_EXIT
5391 && (dc
.pc
>= next_page_start
5392 || tcg_op_buf_full()
5393 || num_insns
>= max_insns
5395 || cs
->singlestep_enabled
)) {
5396 status
= EXIT_PC_STALE
;
5398 } while (status
== NO_EXIT
);
5400 if (tb
->cflags
& CF_LAST_IO
) {
5409 update_psw_addr(&dc
);
5411 case EXIT_PC_UPDATED
:
5412 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5413 cc op type is in env */
5415 /* Exit the TB, either by raising a debug exception or by return. */
5417 gen_exception(EXCP_DEBUG
);
5426 gen_tb_end(tb
, num_insns
);
5428 tb
->size
= dc
.pc
- pc_start
;
5429 tb
->icount
= num_insns
;
5431 #if defined(S390X_DEBUG_DISAS)
5432 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5433 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5434 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5440 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5443 int cc_op
= data
[1];
5444 env
->psw
.addr
= data
[0];
5445 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {