4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "exec/translator.h"
47 #include "qemu/atomic128.h"
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext
;
52 typedef struct DisasInsn DisasInsn
;
53 typedef struct DisasFields DisasFields
;
56 * Define a structure to hold the decoded fields. We'll store each inside
57 * an array indexed by an enum. In order to conserve memory, we'll arrange
58 * for fields that do not exist at the same time to overlap, thus the "C"
59 * for compact. For checking purposes there is an "O" for original index
60 * as well that will be applied to availability bitmaps.
63 enum DisasFieldIndexO
{
92 enum DisasFieldIndexC
{
133 unsigned presentC
:16;
134 unsigned int presentO
;
138 struct DisasContext
{
139 DisasContextBase base
;
140 const DisasInsn
*insn
;
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
154 /* Information carried about a condition to be evaluated. */
161 struct { TCGv_i64 a
, b
; } s64
;
162 struct { TCGv_i32 a
, b
; } s32
;
166 #ifdef DEBUG_INLINE_BRANCHES
167 static uint64_t inline_branch_hit
[CC_OP_MAX
];
168 static uint64_t inline_branch_miss
[CC_OP_MAX
];
171 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
175 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
176 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
177 tcg_gen_movi_i64(out
, pc
);
182 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
183 tmp
= tcg_const_i64(pc
);
184 tcg_gen_deposit_i64(out
, out
, tmp
, 0, 32);
185 tcg_temp_free_i64(tmp
);
188 static TCGv_i64 psw_addr
;
189 static TCGv_i64 psw_mask
;
190 static TCGv_i64 gbea
;
192 static TCGv_i32 cc_op
;
193 static TCGv_i64 cc_src
;
194 static TCGv_i64 cc_dst
;
195 static TCGv_i64 cc_vr
;
197 static char cpu_reg_names
[16][4];
198 static TCGv_i64 regs
[16];
200 void s390x_translate_init(void)
204 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
205 offsetof(CPUS390XState
, psw
.addr
),
207 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
208 offsetof(CPUS390XState
, psw
.mask
),
210 gbea
= tcg_global_mem_new_i64(cpu_env
,
211 offsetof(CPUS390XState
, gbea
),
214 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
216 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
218 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
220 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
223 for (i
= 0; i
< 16; i
++) {
224 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
225 regs
[i
] = tcg_global_mem_new(cpu_env
,
226 offsetof(CPUS390XState
, regs
[i
]),
231 static inline int vec_full_reg_offset(uint8_t reg
)
234 return offsetof(CPUS390XState
, vregs
[reg
][0]);
237 static inline int vec_reg_offset(uint8_t reg
, uint8_t enr
, MemOp es
)
239 /* Convert element size (es) - e.g. MO_8 - to bytes */
240 const uint8_t bytes
= 1 << es
;
241 int offs
= enr
* bytes
;
244 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
245 * of the 16 byte vector, on both, little and big endian systems.
247 * Big Endian (target/possible host)
248 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
249 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
250 * W: [ 0][ 1] - [ 2][ 3]
253 * Little Endian (possible host)
254 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
255 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
256 * W: [ 1][ 0] - [ 3][ 2]
259 * For 16 byte elements, the two 8 byte halves will not form a host
260 * int128 if the host is little endian, since they're in the wrong order.
261 * Some operations (e.g. xor) do not care. For operations like addition,
262 * the two 8 byte elements have to be loaded separately. Let's force all
263 * 16 byte operations to handle it in a special way.
265 g_assert(es
<= MO_64
);
269 return offs
+ vec_full_reg_offset(reg
);
272 static inline int freg64_offset(uint8_t reg
)
275 return vec_reg_offset(reg
, 0, MO_64
);
278 static inline int freg32_offset(uint8_t reg
)
281 return vec_reg_offset(reg
, 0, MO_32
);
284 static TCGv_i64
load_reg(int reg
)
286 TCGv_i64 r
= tcg_temp_new_i64();
287 tcg_gen_mov_i64(r
, regs
[reg
]);
291 static TCGv_i64
load_freg(int reg
)
293 TCGv_i64 r
= tcg_temp_new_i64();
295 tcg_gen_ld_i64(r
, cpu_env
, freg64_offset(reg
));
299 static TCGv_i64
load_freg32_i64(int reg
)
301 TCGv_i64 r
= tcg_temp_new_i64();
303 tcg_gen_ld32u_i64(r
, cpu_env
, freg32_offset(reg
));
307 static void store_reg(int reg
, TCGv_i64 v
)
309 tcg_gen_mov_i64(regs
[reg
], v
);
312 static void store_freg(int reg
, TCGv_i64 v
)
314 tcg_gen_st_i64(v
, cpu_env
, freg64_offset(reg
));
317 static void store_reg32_i64(int reg
, TCGv_i64 v
)
319 /* 32 bit register writes keep the upper half */
320 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
323 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
325 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
328 static void store_freg32_i64(int reg
, TCGv_i64 v
)
330 tcg_gen_st32_i64(v
, cpu_env
, freg32_offset(reg
));
333 static void return_low128(TCGv_i64 dest
)
335 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
338 static void update_psw_addr(DisasContext
*s
)
341 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
344 static void per_branch(DisasContext
*s
, bool to_next
)
346 #ifndef CONFIG_USER_ONLY
347 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
349 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
350 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
351 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
353 tcg_temp_free_i64(next_pc
);
359 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
360 TCGv_i64 arg1
, TCGv_i64 arg2
)
362 #ifndef CONFIG_USER_ONLY
363 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
364 TCGLabel
*lab
= gen_new_label();
365 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
367 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
368 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
372 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
373 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
374 tcg_temp_free_i64(pc
);
379 static void per_breaking_event(DisasContext
*s
)
381 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
384 static void update_cc_op(DisasContext
*s
)
386 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
387 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
391 static inline uint64_t ld_code2(CPUS390XState
*env
, DisasContext
*s
,
394 return (uint64_t)translator_lduw(env
, &s
->base
, pc
);
397 static inline uint64_t ld_code4(CPUS390XState
*env
, DisasContext
*s
,
400 return (uint64_t)(uint32_t)translator_ldl(env
, &s
->base
, pc
);
403 static int get_mem_index(DisasContext
*s
)
405 #ifdef CONFIG_USER_ONLY
408 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
412 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
413 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
414 return MMU_PRIMARY_IDX
;
415 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
416 return MMU_SECONDARY_IDX
;
417 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
426 static void gen_exception(int excp
)
428 TCGv_i32 tmp
= tcg_const_i32(excp
);
429 gen_helper_exception(cpu_env
, tmp
);
430 tcg_temp_free_i32(tmp
);
433 static void gen_program_exception(DisasContext
*s
, int code
)
437 /* Remember what pgm exeption this was. */
438 tmp
= tcg_const_i32(code
);
439 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
440 tcg_temp_free_i32(tmp
);
442 tmp
= tcg_const_i32(s
->ilen
);
443 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
444 tcg_temp_free_i32(tmp
);
452 /* Trigger exception. */
453 gen_exception(EXCP_PGM
);
456 static inline void gen_illegal_opcode(DisasContext
*s
)
458 gen_program_exception(s
, PGM_OPERATION
);
461 static inline void gen_data_exception(uint8_t dxc
)
463 TCGv_i32 tmp
= tcg_const_i32(dxc
);
464 gen_helper_data_exception(cpu_env
, tmp
);
465 tcg_temp_free_i32(tmp
);
468 static inline void gen_trap(DisasContext
*s
)
470 /* Set DXC to 0xff */
471 gen_data_exception(0xff);
474 static void gen_addi_and_wrap_i64(DisasContext
*s
, TCGv_i64 dst
, TCGv_i64 src
,
477 tcg_gen_addi_i64(dst
, src
, imm
);
478 if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
479 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
480 tcg_gen_andi_i64(dst
, dst
, 0x7fffffff);
482 tcg_gen_andi_i64(dst
, dst
, 0x00ffffff);
487 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
489 TCGv_i64 tmp
= tcg_temp_new_i64();
492 * Note that d2 is limited to 20 bits, signed. If we crop negative
493 * displacements early we create larger immedate addends.
496 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
497 gen_addi_and_wrap_i64(s
, tmp
, tmp
, d2
);
499 gen_addi_and_wrap_i64(s
, tmp
, regs
[b2
], d2
);
501 gen_addi_and_wrap_i64(s
, tmp
, regs
[x2
], d2
);
502 } else if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
503 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
504 tcg_gen_movi_i64(tmp
, d2
& 0x7fffffff);
506 tcg_gen_movi_i64(tmp
, d2
& 0x00ffffff);
509 tcg_gen_movi_i64(tmp
, d2
);
515 static inline bool live_cc_data(DisasContext
*s
)
517 return (s
->cc_op
!= CC_OP_DYNAMIC
518 && s
->cc_op
!= CC_OP_STATIC
522 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
524 if (live_cc_data(s
)) {
525 tcg_gen_discard_i64(cc_src
);
526 tcg_gen_discard_i64(cc_dst
);
527 tcg_gen_discard_i64(cc_vr
);
529 s
->cc_op
= CC_OP_CONST0
+ val
;
532 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
534 if (live_cc_data(s
)) {
535 tcg_gen_discard_i64(cc_src
);
536 tcg_gen_discard_i64(cc_vr
);
538 tcg_gen_mov_i64(cc_dst
, dst
);
542 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
545 if (live_cc_data(s
)) {
546 tcg_gen_discard_i64(cc_vr
);
548 tcg_gen_mov_i64(cc_src
, src
);
549 tcg_gen_mov_i64(cc_dst
, dst
);
553 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
554 TCGv_i64 dst
, TCGv_i64 vr
)
556 tcg_gen_mov_i64(cc_src
, src
);
557 tcg_gen_mov_i64(cc_dst
, dst
);
558 tcg_gen_mov_i64(cc_vr
, vr
);
562 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
564 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
567 /* CC value is in env->cc_op */
568 static void set_cc_static(DisasContext
*s
)
570 if (live_cc_data(s
)) {
571 tcg_gen_discard_i64(cc_src
);
572 tcg_gen_discard_i64(cc_dst
);
573 tcg_gen_discard_i64(cc_vr
);
575 s
->cc_op
= CC_OP_STATIC
;
578 /* calculates cc into cc_op */
579 static void gen_op_calc_cc(DisasContext
*s
)
581 TCGv_i32 local_cc_op
= NULL
;
582 TCGv_i64 dummy
= NULL
;
586 dummy
= tcg_const_i64(0);
592 local_cc_op
= tcg_const_i32(s
->cc_op
);
608 /* s->cc_op is the cc value */
609 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
612 /* env->cc_op already is the cc value */
629 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
635 case CC_OP_LTUGTU_32
:
636 case CC_OP_LTUGTU_64
:
645 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
652 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
655 /* unknown operation - assume 3 arguments and cc_op in env */
656 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
663 tcg_temp_free_i32(local_cc_op
);
666 tcg_temp_free_i64(dummy
);
669 /* We now have cc in cc_op as constant */
673 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
675 if (unlikely(s
->base
.tb
->flags
& FLAG_MASK_PER
)) {
678 return translator_use_goto_tb(&s
->base
, dest
);
681 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
683 #ifdef DEBUG_INLINE_BRANCHES
684 inline_branch_miss
[cc_op
]++;
688 static void account_inline_branch(DisasContext
*s
, int cc_op
)
690 #ifdef DEBUG_INLINE_BRANCHES
691 inline_branch_hit
[cc_op
]++;
695 /* Table of mask values to comparison codes, given a comparison as input.
696 For such, CC=3 should not be possible. */
697 static const TCGCond ltgt_cond
[16] = {
698 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
699 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
700 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
701 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
702 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
703 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
704 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
705 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
708 /* Table of mask values to comparison codes, given a logic op as input.
709 For such, only CC=0 and CC=1 should be possible. */
710 static const TCGCond nz_cond
[16] = {
711 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
712 TCG_COND_NEVER
, TCG_COND_NEVER
,
713 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
714 TCG_COND_NE
, TCG_COND_NE
,
715 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
716 TCG_COND_EQ
, TCG_COND_EQ
,
717 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
718 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
721 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
722 details required to generate a TCG comparison. */
723 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
726 enum cc_op old_cc_op
= s
->cc_op
;
728 if (mask
== 15 || mask
== 0) {
729 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
732 c
->g1
= c
->g2
= true;
737 /* Find the TCG condition for the mask + cc op. */
743 cond
= ltgt_cond
[mask
];
744 if (cond
== TCG_COND_NEVER
) {
747 account_inline_branch(s
, old_cc_op
);
750 case CC_OP_LTUGTU_32
:
751 case CC_OP_LTUGTU_64
:
752 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
753 if (cond
== TCG_COND_NEVER
) {
756 account_inline_branch(s
, old_cc_op
);
760 cond
= nz_cond
[mask
];
761 if (cond
== TCG_COND_NEVER
) {
764 account_inline_branch(s
, old_cc_op
);
779 account_inline_branch(s
, old_cc_op
);
794 account_inline_branch(s
, old_cc_op
);
798 switch (mask
& 0xa) {
799 case 8: /* src == 0 -> no one bit found */
802 case 2: /* src != 0 -> one bit found */
808 account_inline_branch(s
, old_cc_op
);
814 case 8 | 2: /* result == 0 */
817 case 4 | 1: /* result != 0 */
820 case 8 | 4: /* !carry (borrow) */
821 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_EQ
: TCG_COND_NE
;
823 case 2 | 1: /* carry (!borrow) */
824 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_NE
: TCG_COND_EQ
;
829 account_inline_branch(s
, old_cc_op
);
834 /* Calculate cc value. */
839 /* Jump based on CC. We'll load up the real cond below;
840 the assignment here merely avoids a compiler warning. */
841 account_noninline_branch(s
, old_cc_op
);
842 old_cc_op
= CC_OP_STATIC
;
843 cond
= TCG_COND_NEVER
;
847 /* Load up the arguments of the comparison. */
849 c
->g1
= c
->g2
= false;
853 c
->u
.s32
.a
= tcg_temp_new_i32();
854 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
855 c
->u
.s32
.b
= tcg_const_i32(0);
858 case CC_OP_LTUGTU_32
:
860 c
->u
.s32
.a
= tcg_temp_new_i32();
861 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
862 c
->u
.s32
.b
= tcg_temp_new_i32();
863 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
870 c
->u
.s64
.b
= tcg_const_i64(0);
874 case CC_OP_LTUGTU_64
:
877 c
->g1
= c
->g2
= true;
883 c
->u
.s64
.a
= tcg_temp_new_i64();
884 c
->u
.s64
.b
= tcg_const_i64(0);
885 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
891 c
->u
.s64
.b
= tcg_const_i64(0);
895 case 4 | 1: /* result */
899 case 2 | 1: /* carry */
903 g_assert_not_reached();
912 case 0x8 | 0x4 | 0x2: /* cc != 3 */
914 c
->u
.s32
.b
= tcg_const_i32(3);
916 case 0x8 | 0x4 | 0x1: /* cc != 2 */
918 c
->u
.s32
.b
= tcg_const_i32(2);
920 case 0x8 | 0x2 | 0x1: /* cc != 1 */
922 c
->u
.s32
.b
= tcg_const_i32(1);
924 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
927 c
->u
.s32
.a
= tcg_temp_new_i32();
928 c
->u
.s32
.b
= tcg_const_i32(0);
929 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
931 case 0x8 | 0x4: /* cc < 2 */
933 c
->u
.s32
.b
= tcg_const_i32(2);
935 case 0x8: /* cc == 0 */
937 c
->u
.s32
.b
= tcg_const_i32(0);
939 case 0x4 | 0x2 | 0x1: /* cc != 0 */
941 c
->u
.s32
.b
= tcg_const_i32(0);
943 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
946 c
->u
.s32
.a
= tcg_temp_new_i32();
947 c
->u
.s32
.b
= tcg_const_i32(0);
948 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
950 case 0x4: /* cc == 1 */
952 c
->u
.s32
.b
= tcg_const_i32(1);
954 case 0x2 | 0x1: /* cc > 1 */
956 c
->u
.s32
.b
= tcg_const_i32(1);
958 case 0x2: /* cc == 2 */
960 c
->u
.s32
.b
= tcg_const_i32(2);
962 case 0x1: /* cc == 3 */
964 c
->u
.s32
.b
= tcg_const_i32(3);
967 /* CC is masked by something else: (8 >> cc) & mask. */
970 c
->u
.s32
.a
= tcg_const_i32(8);
971 c
->u
.s32
.b
= tcg_const_i32(0);
972 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
973 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
984 static void free_compare(DisasCompare
*c
)
988 tcg_temp_free_i64(c
->u
.s64
.a
);
990 tcg_temp_free_i32(c
->u
.s32
.a
);
995 tcg_temp_free_i64(c
->u
.s64
.b
);
997 tcg_temp_free_i32(c
->u
.s32
.b
);
1002 /* ====================================================================== */
1003 /* Define the insn format enumeration. */
1004 #define F0(N) FMT_##N,
1005 #define F1(N, X1) F0(N)
1006 #define F2(N, X1, X2) F0(N)
1007 #define F3(N, X1, X2, X3) F0(N)
1008 #define F4(N, X1, X2, X3, X4) F0(N)
1009 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1010 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1013 #include "insn-format.def"
1024 /* This is the way fields are to be accessed out of DisasFields. */
1025 #define have_field(S, F) have_field1((S), FLD_O_##F)
1026 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1028 static bool have_field1(const DisasContext
*s
, enum DisasFieldIndexO c
)
1030 return (s
->fields
.presentO
>> c
) & 1;
1033 static int get_field1(const DisasContext
*s
, enum DisasFieldIndexO o
,
1034 enum DisasFieldIndexC c
)
1036 assert(have_field1(s
, o
));
1037 return s
->fields
.c
[c
];
1040 /* Describe the layout of each field in each format. */
1041 typedef struct DisasField
{
1043 unsigned int size
:8;
1044 unsigned int type
:2;
1045 unsigned int indexC
:6;
1046 enum DisasFieldIndexO indexO
:8;
1049 typedef struct DisasFormatInfo
{
1050 DisasField op
[NUM_C_FIELD
];
1053 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1054 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1055 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1056 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1057 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1058 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1059 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1060 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1061 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1062 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1063 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1064 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1065 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1066 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1067 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1069 #define F0(N) { { } },
1070 #define F1(N, X1) { { X1 } },
1071 #define F2(N, X1, X2) { { X1, X2 } },
1072 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1073 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1074 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1075 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1077 static const DisasFormatInfo format_info
[] = {
1078 #include "insn-format.def"
1098 /* Generally, we'll extract operands into this structures, operate upon
1099 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1100 of routines below for more details. */
1102 bool g_out
, g_out2
, g_in1
, g_in2
;
1103 TCGv_i64 out
, out2
, in1
, in2
;
1107 /* Instructions can place constraints on their operands, raising specification
1108 exceptions if they are violated. To make this easy to automate, each "in1",
1109 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1110 of the following, or 0. To make this easy to document, we'll put the
1111 SPEC_<name> defines next to <name>. */
1113 #define SPEC_r1_even 1
1114 #define SPEC_r2_even 2
1115 #define SPEC_r3_even 4
1116 #define SPEC_r1_f128 8
1117 #define SPEC_r2_f128 16
1119 /* Return values from translate_one, indicating the state of the TB. */
1121 /* We are not using a goto_tb (for whatever reason), but have updated
1122 the PC (for whatever reason), so there's no need to do it again on
1124 #define DISAS_PC_UPDATED DISAS_TARGET_0
1126 /* We have updated the PC and CC values. */
1127 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1129 /* We are exiting the TB to the main loop. */
1130 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1133 /* Instruction flags */
1134 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1135 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1136 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1137 #define IF_BFP 0x0008 /* binary floating point instruction */
1138 #define IF_DFP 0x0010 /* decimal floating point instruction */
1139 #define IF_PRIV 0x0020 /* privileged instruction */
1140 #define IF_VEC 0x0040 /* vector instruction */
1141 #define IF_IO 0x0080 /* input/output instruction */
1152 /* Pre-process arguments before HELP_OP. */
1153 void (*help_in1
)(DisasContext
*, DisasOps
*);
1154 void (*help_in2
)(DisasContext
*, DisasOps
*);
1155 void (*help_prep
)(DisasContext
*, DisasOps
*);
1158 * Post-process output after HELP_OP.
1159 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1161 void (*help_wout
)(DisasContext
*, DisasOps
*);
1162 void (*help_cout
)(DisasContext
*, DisasOps
*);
1164 /* Implement the operation itself. */
1165 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1170 /* ====================================================================== */
1171 /* Miscellaneous helpers, used by several operations. */
1173 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1175 if (dest
== s
->pc_tmp
) {
1176 per_branch(s
, true);
1179 if (use_goto_tb(s
, dest
)) {
1181 per_breaking_event(s
);
1183 tcg_gen_movi_i64(psw_addr
, dest
);
1184 tcg_gen_exit_tb(s
->base
.tb
, 0);
1185 return DISAS_NORETURN
;
1187 tcg_gen_movi_i64(psw_addr
, dest
);
1188 per_branch(s
, false);
1189 return DISAS_PC_UPDATED
;
1193 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1194 bool is_imm
, int imm
, TCGv_i64 cdest
)
1197 uint64_t dest
= s
->base
.pc_next
+ (int64_t)imm
* 2;
1200 /* Take care of the special cases first. */
1201 if (c
->cond
== TCG_COND_NEVER
) {
1206 if (dest
== s
->pc_tmp
) {
1207 /* Branch to next. */
1208 per_branch(s
, true);
1212 if (c
->cond
== TCG_COND_ALWAYS
) {
1213 ret
= help_goto_direct(s
, dest
);
1218 /* E.g. bcr %r0 -> no branch. */
1222 if (c
->cond
== TCG_COND_ALWAYS
) {
1223 tcg_gen_mov_i64(psw_addr
, cdest
);
1224 per_branch(s
, false);
1225 ret
= DISAS_PC_UPDATED
;
1230 if (use_goto_tb(s
, s
->pc_tmp
)) {
1231 if (is_imm
&& use_goto_tb(s
, dest
)) {
1232 /* Both exits can use goto_tb. */
1235 lab
= gen_new_label();
1237 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1239 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1242 /* Branch not taken. */
1244 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1245 tcg_gen_exit_tb(s
->base
.tb
, 0);
1249 per_breaking_event(s
);
1251 tcg_gen_movi_i64(psw_addr
, dest
);
1252 tcg_gen_exit_tb(s
->base
.tb
, 1);
1254 ret
= DISAS_NORETURN
;
1256 /* Fallthru can use goto_tb, but taken branch cannot. */
1257 /* Store taken branch destination before the brcond. This
1258 avoids having to allocate a new local temp to hold it.
1259 We'll overwrite this in the not taken case anyway. */
1261 tcg_gen_mov_i64(psw_addr
, cdest
);
1264 lab
= gen_new_label();
1266 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1268 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1271 /* Branch not taken. */
1274 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1275 tcg_gen_exit_tb(s
->base
.tb
, 0);
1279 tcg_gen_movi_i64(psw_addr
, dest
);
1281 per_breaking_event(s
);
1282 ret
= DISAS_PC_UPDATED
;
1285 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1286 Most commonly we're single-stepping or some other condition that
1287 disables all use of goto_tb. Just update the PC and exit. */
1289 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1291 cdest
= tcg_const_i64(dest
);
1295 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1297 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1299 TCGv_i32 t0
= tcg_temp_new_i32();
1300 TCGv_i64 t1
= tcg_temp_new_i64();
1301 TCGv_i64 z
= tcg_const_i64(0);
1302 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1303 tcg_gen_extu_i32_i64(t1
, t0
);
1304 tcg_temp_free_i32(t0
);
1305 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1306 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1307 tcg_temp_free_i64(t1
);
1308 tcg_temp_free_i64(z
);
1312 tcg_temp_free_i64(cdest
);
1314 tcg_temp_free_i64(next
);
1316 ret
= DISAS_PC_UPDATED
;
1324 /* ====================================================================== */
1325 /* The operations. These perform the bulk of the work for any insn,
1326 usually after the operands have been loaded and output initialized. */
1328 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1330 tcg_gen_abs_i64(o
->out
, o
->in2
);
1334 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1336 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1340 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1342 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1346 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1348 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1349 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1353 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1355 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1359 static DisasJumpType
op_addu64(DisasContext
*s
, DisasOps
*o
)
1361 tcg_gen_movi_i64(cc_src
, 0);
1362 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1366 /* Compute carry into cc_src. */
1367 static void compute_carry(DisasContext
*s
)
1371 /* The carry value is already in cc_src (1,0). */
1374 tcg_gen_addi_i64(cc_src
, cc_src
, 1);
1380 /* The carry flag is the msb of CC; compute into cc_src. */
1381 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
1382 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
1387 static DisasJumpType
op_addc32(DisasContext
*s
, DisasOps
*o
)
1390 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1391 tcg_gen_add_i64(o
->out
, o
->out
, cc_src
);
1395 static DisasJumpType
op_addc64(DisasContext
*s
, DisasOps
*o
)
1399 TCGv_i64 zero
= tcg_const_i64(0);
1400 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, zero
);
1401 tcg_gen_add2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
1402 tcg_temp_free_i64(zero
);
1407 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1409 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1411 o
->in1
= tcg_temp_new_i64();
1413 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1415 /* Perform the atomic addition in memory. */
1416 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1420 /* Recompute also for atomic case: needed for setting CC. */
1421 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1424 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1429 static DisasJumpType
op_asiu64(DisasContext
*s
, DisasOps
*o
)
1431 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1433 o
->in1
= tcg_temp_new_i64();
1435 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1437 /* Perform the atomic addition in memory. */
1438 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1442 /* Recompute also for atomic case: needed for setting CC. */
1443 tcg_gen_movi_i64(cc_src
, 0);
1444 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1447 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1452 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1454 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1458 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1460 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1464 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1466 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1467 return_low128(o
->out2
);
1471 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1473 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1477 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1479 int shift
= s
->insn
->data
& 0xff;
1480 int size
= s
->insn
->data
>> 8;
1481 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1484 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1485 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1486 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1488 /* Produce the CC from only the bits manipulated. */
1489 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1490 set_cc_nz_u64(s
, cc_dst
);
1494 static DisasJumpType
op_andc(DisasContext
*s
, DisasOps
*o
)
1496 tcg_gen_andc_i64(o
->out
, o
->in1
, o
->in2
);
1500 static DisasJumpType
op_orc(DisasContext
*s
, DisasOps
*o
)
1502 tcg_gen_orc_i64(o
->out
, o
->in1
, o
->in2
);
1506 static DisasJumpType
op_nand(DisasContext
*s
, DisasOps
*o
)
1508 tcg_gen_nand_i64(o
->out
, o
->in1
, o
->in2
);
1512 static DisasJumpType
op_nor(DisasContext
*s
, DisasOps
*o
)
1514 tcg_gen_nor_i64(o
->out
, o
->in1
, o
->in2
);
1518 static DisasJumpType
op_nxor(DisasContext
*s
, DisasOps
*o
)
1520 tcg_gen_eqv_i64(o
->out
, o
->in1
, o
->in2
);
1524 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1526 o
->in1
= tcg_temp_new_i64();
1528 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1529 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1531 /* Perform the atomic operation in memory. */
1532 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1536 /* Recompute also for atomic case: needed for setting CC. */
1537 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1539 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1540 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1545 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1547 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1549 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1550 per_branch(s
, false);
1551 return DISAS_PC_UPDATED
;
1557 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1561 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1562 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1566 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1567 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1568 t
= tcg_temp_new_i64();
1569 tcg_gen_shri_i64(t
, psw_mask
, 16);
1570 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1571 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1572 tcg_gen_extu_i32_i64(t
, cc_op
);
1573 tcg_gen_shli_i64(t
, t
, 28);
1574 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1575 tcg_temp_free_i64(t
);
1578 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1580 save_link_info(s
, o
);
1582 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1583 per_branch(s
, false);
1584 return DISAS_PC_UPDATED
;
1590 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1592 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1593 return help_goto_direct(s
, s
->base
.pc_next
+ (int64_t)get_field(s
, i2
) * 2);
1596 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1598 int m1
= get_field(s
, m1
);
1599 bool is_imm
= have_field(s
, i2
);
1600 int imm
= is_imm
? get_field(s
, i2
) : 0;
1603 /* BCR with R2 = 0 causes no branching */
1604 if (have_field(s
, r2
) && get_field(s
, r2
) == 0) {
1606 /* Perform serialization */
1607 /* FIXME: check for fast-BCR-serialization facility */
1608 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1611 /* Perform serialization */
1612 /* FIXME: perform checkpoint-synchronisation */
1613 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1618 disas_jcc(s
, &c
, m1
);
1619 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1622 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1624 int r1
= get_field(s
, r1
);
1625 bool is_imm
= have_field(s
, i2
);
1626 int imm
= is_imm
? get_field(s
, i2
) : 0;
1630 c
.cond
= TCG_COND_NE
;
1635 t
= tcg_temp_new_i64();
1636 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1637 store_reg32_i64(r1
, t
);
1638 c
.u
.s32
.a
= tcg_temp_new_i32();
1639 c
.u
.s32
.b
= tcg_const_i32(0);
1640 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1641 tcg_temp_free_i64(t
);
1643 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1646 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1648 int r1
= get_field(s
, r1
);
1649 int imm
= get_field(s
, i2
);
1653 c
.cond
= TCG_COND_NE
;
1658 t
= tcg_temp_new_i64();
1659 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1660 tcg_gen_subi_i64(t
, t
, 1);
1661 store_reg32h_i64(r1
, t
);
1662 c
.u
.s32
.a
= tcg_temp_new_i32();
1663 c
.u
.s32
.b
= tcg_const_i32(0);
1664 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1665 tcg_temp_free_i64(t
);
1667 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1670 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1672 int r1
= get_field(s
, r1
);
1673 bool is_imm
= have_field(s
, i2
);
1674 int imm
= is_imm
? get_field(s
, i2
) : 0;
1677 c
.cond
= TCG_COND_NE
;
1682 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1683 c
.u
.s64
.a
= regs
[r1
];
1684 c
.u
.s64
.b
= tcg_const_i64(0);
1686 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1689 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1691 int r1
= get_field(s
, r1
);
1692 int r3
= get_field(s
, r3
);
1693 bool is_imm
= have_field(s
, i2
);
1694 int imm
= is_imm
? get_field(s
, i2
) : 0;
1698 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1703 t
= tcg_temp_new_i64();
1704 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1705 c
.u
.s32
.a
= tcg_temp_new_i32();
1706 c
.u
.s32
.b
= tcg_temp_new_i32();
1707 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1708 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1709 store_reg32_i64(r1
, t
);
1710 tcg_temp_free_i64(t
);
1712 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1715 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1717 int r1
= get_field(s
, r1
);
1718 int r3
= get_field(s
, r3
);
1719 bool is_imm
= have_field(s
, i2
);
1720 int imm
= is_imm
? get_field(s
, i2
) : 0;
1723 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1726 if (r1
== (r3
| 1)) {
1727 c
.u
.s64
.b
= load_reg(r3
| 1);
1730 c
.u
.s64
.b
= regs
[r3
| 1];
1734 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1735 c
.u
.s64
.a
= regs
[r1
];
1738 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1741 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1743 int imm
, m3
= get_field(s
, m3
);
1747 c
.cond
= ltgt_cond
[m3
];
1748 if (s
->insn
->data
) {
1749 c
.cond
= tcg_unsigned_cond(c
.cond
);
1751 c
.is_64
= c
.g1
= c
.g2
= true;
1755 is_imm
= have_field(s
, i4
);
1757 imm
= get_field(s
, i4
);
1760 o
->out
= get_address(s
, 0, get_field(s
, b4
),
1764 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1767 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1769 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1774 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1776 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1781 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1783 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1788 static TCGv_i32
fpinst_extract_m34(DisasContext
*s
, bool m3_with_fpe
,
1791 const bool fpe
= s390_has_feat(S390_FEAT_FLOATING_POINT_EXT
);
1792 uint8_t m3
= get_field(s
, m3
);
1793 uint8_t m4
= get_field(s
, m4
);
1795 /* m3 field was introduced with FPE */
1796 if (!fpe
&& m3_with_fpe
) {
1799 /* m4 field was introduced with FPE */
1800 if (!fpe
&& m4_with_fpe
) {
1804 /* Check for valid rounding modes. Mode 3 was introduced later. */
1805 if (m3
== 2 || m3
> 7 || (!fpe
&& m3
== 3)) {
1806 gen_program_exception(s
, PGM_SPECIFICATION
);
1810 return tcg_const_i32(deposit32(m3
, 4, 4, m4
));
1813 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1815 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1818 return DISAS_NORETURN
;
1820 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1821 tcg_temp_free_i32(m34
);
1826 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1828 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1831 return DISAS_NORETURN
;
1833 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1834 tcg_temp_free_i32(m34
);
1839 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1841 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1844 return DISAS_NORETURN
;
1846 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1847 tcg_temp_free_i32(m34
);
1852 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1854 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1857 return DISAS_NORETURN
;
1859 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1860 tcg_temp_free_i32(m34
);
1865 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1867 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1870 return DISAS_NORETURN
;
1872 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1873 tcg_temp_free_i32(m34
);
1878 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1880 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1883 return DISAS_NORETURN
;
1885 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1886 tcg_temp_free_i32(m34
);
1891 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1893 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1896 return DISAS_NORETURN
;
1898 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1899 tcg_temp_free_i32(m34
);
1904 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1906 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1909 return DISAS_NORETURN
;
1911 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1912 tcg_temp_free_i32(m34
);
1917 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1919 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1922 return DISAS_NORETURN
;
1924 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1925 tcg_temp_free_i32(m34
);
1930 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1932 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1935 return DISAS_NORETURN
;
1937 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1938 tcg_temp_free_i32(m34
);
1943 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1945 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1948 return DISAS_NORETURN
;
1950 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1951 tcg_temp_free_i32(m34
);
1956 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1958 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1961 return DISAS_NORETURN
;
1963 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1964 tcg_temp_free_i32(m34
);
1969 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1971 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1974 return DISAS_NORETURN
;
1976 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m34
);
1977 tcg_temp_free_i32(m34
);
1981 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1983 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1986 return DISAS_NORETURN
;
1988 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m34
);
1989 tcg_temp_free_i32(m34
);
1993 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1995 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1998 return DISAS_NORETURN
;
2000 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m34
);
2001 tcg_temp_free_i32(m34
);
2002 return_low128(o
->out2
);
2006 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
2008 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2011 return DISAS_NORETURN
;
2013 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m34
);
2014 tcg_temp_free_i32(m34
);
2018 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
2020 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2023 return DISAS_NORETURN
;
2025 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2026 tcg_temp_free_i32(m34
);
2030 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
2032 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2035 return DISAS_NORETURN
;
2037 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2038 tcg_temp_free_i32(m34
);
2039 return_low128(o
->out2
);
2043 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
2045 int r2
= get_field(s
, r2
);
2046 TCGv_i64 len
= tcg_temp_new_i64();
2048 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
2050 return_low128(o
->out
);
2052 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
2053 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
2054 tcg_temp_free_i64(len
);
2059 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
2061 int l
= get_field(s
, l1
);
2066 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2067 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2070 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2071 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2074 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2075 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2078 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2079 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2082 vl
= tcg_const_i32(l
);
2083 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2084 tcg_temp_free_i32(vl
);
2088 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2092 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
2094 int r1
= get_field(s
, r1
);
2095 int r2
= get_field(s
, r2
);
2098 /* r1 and r2 must be even. */
2099 if (r1
& 1 || r2
& 1) {
2100 gen_program_exception(s
, PGM_SPECIFICATION
);
2101 return DISAS_NORETURN
;
2104 t1
= tcg_const_i32(r1
);
2105 t2
= tcg_const_i32(r2
);
2106 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
2107 tcg_temp_free_i32(t1
);
2108 tcg_temp_free_i32(t2
);
2113 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
2115 int r1
= get_field(s
, r1
);
2116 int r3
= get_field(s
, r3
);
2119 /* r1 and r3 must be even. */
2120 if (r1
& 1 || r3
& 1) {
2121 gen_program_exception(s
, PGM_SPECIFICATION
);
2122 return DISAS_NORETURN
;
2125 t1
= tcg_const_i32(r1
);
2126 t3
= tcg_const_i32(r3
);
2127 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2128 tcg_temp_free_i32(t1
);
2129 tcg_temp_free_i32(t3
);
2134 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
2136 int r1
= get_field(s
, r1
);
2137 int r3
= get_field(s
, r3
);
2140 /* r1 and r3 must be even. */
2141 if (r1
& 1 || r3
& 1) {
2142 gen_program_exception(s
, PGM_SPECIFICATION
);
2143 return DISAS_NORETURN
;
2146 t1
= tcg_const_i32(r1
);
2147 t3
= tcg_const_i32(r3
);
2148 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2149 tcg_temp_free_i32(t1
);
2150 tcg_temp_free_i32(t3
);
2155 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
2157 TCGv_i32 m3
= tcg_const_i32(get_field(s
, m3
));
2158 TCGv_i32 t1
= tcg_temp_new_i32();
2159 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
2160 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2162 tcg_temp_free_i32(t1
);
2163 tcg_temp_free_i32(m3
);
2167 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
2169 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2171 return_low128(o
->in2
);
2175 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
2177 TCGv_i64 t
= tcg_temp_new_i64();
2178 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
2179 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
2180 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2181 tcg_temp_free_i64(t
);
2185 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2187 int d2
= get_field(s
, d2
);
2188 int b2
= get_field(s
, b2
);
2191 /* Note that in1 = R3 (new value) and
2192 in2 = (zero-extended) R1 (expected value). */
2194 addr
= get_address(s
, 0, b2
, d2
);
2195 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2196 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2197 tcg_temp_free_i64(addr
);
2199 /* Are the memory and expected values (un)equal? Note that this setcond
2200 produces the output CC value, thus the NE sense of the test. */
2201 cc
= tcg_temp_new_i64();
2202 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2203 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2204 tcg_temp_free_i64(cc
);
2210 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2212 int r1
= get_field(s
, r1
);
2213 int r3
= get_field(s
, r3
);
2214 int d2
= get_field(s
, d2
);
2215 int b2
= get_field(s
, b2
);
2216 DisasJumpType ret
= DISAS_NEXT
;
2218 TCGv_i32 t_r1
, t_r3
;
2220 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2221 addr
= get_address(s
, 0, b2
, d2
);
2222 t_r1
= tcg_const_i32(r1
);
2223 t_r3
= tcg_const_i32(r3
);
2224 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
2225 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2226 } else if (HAVE_CMPXCHG128
) {
2227 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2229 gen_helper_exit_atomic(cpu_env
);
2230 ret
= DISAS_NORETURN
;
2232 tcg_temp_free_i64(addr
);
2233 tcg_temp_free_i32(t_r1
);
2234 tcg_temp_free_i32(t_r3
);
2240 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2242 int r3
= get_field(s
, r3
);
2243 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2245 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2246 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2248 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2250 tcg_temp_free_i32(t_r3
);
2256 #ifndef CONFIG_USER_ONLY
2257 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2259 MemOp mop
= s
->insn
->data
;
2260 TCGv_i64 addr
, old
, cc
;
2261 TCGLabel
*lab
= gen_new_label();
2263 /* Note that in1 = R1 (zero-extended expected value),
2264 out = R1 (original reg), out2 = R1+1 (new value). */
2266 addr
= tcg_temp_new_i64();
2267 old
= tcg_temp_new_i64();
2268 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2269 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2270 get_mem_index(s
), mop
| MO_ALIGN
);
2271 tcg_temp_free_i64(addr
);
2273 /* Are the memory and expected values (un)equal? */
2274 cc
= tcg_temp_new_i64();
2275 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2276 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2278 /* Write back the output now, so that it happens before the
2279 following branch, so that we don't need local temps. */
2280 if ((mop
& MO_SIZE
) == MO_32
) {
2281 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2283 tcg_gen_mov_i64(o
->out
, old
);
2285 tcg_temp_free_i64(old
);
2287 /* If the comparison was equal, and the LSB of R2 was set,
2288 then we need to flush the TLB (for all cpus). */
2289 tcg_gen_xori_i64(cc
, cc
, 1);
2290 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2291 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2292 tcg_temp_free_i64(cc
);
2294 gen_helper_purge(cpu_env
);
2301 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2303 TCGv_i64 t1
= tcg_temp_new_i64();
2304 TCGv_i32 t2
= tcg_temp_new_i32();
2305 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2306 gen_helper_cvd(t1
, t2
);
2307 tcg_temp_free_i32(t2
);
2308 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2309 tcg_temp_free_i64(t1
);
2313 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2315 int m3
= get_field(s
, m3
);
2316 TCGLabel
*lab
= gen_new_label();
2319 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2320 if (s
->insn
->data
) {
2321 c
= tcg_unsigned_cond(c
);
2323 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2332 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2334 int m3
= get_field(s
, m3
);
2335 int r1
= get_field(s
, r1
);
2336 int r2
= get_field(s
, r2
);
2337 TCGv_i32 tr1
, tr2
, chk
;
2339 /* R1 and R2 must both be even. */
2340 if ((r1
| r2
) & 1) {
2341 gen_program_exception(s
, PGM_SPECIFICATION
);
2342 return DISAS_NORETURN
;
2344 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2348 tr1
= tcg_const_i32(r1
);
2349 tr2
= tcg_const_i32(r2
);
2350 chk
= tcg_const_i32(m3
);
2352 switch (s
->insn
->data
) {
2354 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2357 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2360 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2363 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2366 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2369 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2372 g_assert_not_reached();
2375 tcg_temp_free_i32(tr1
);
2376 tcg_temp_free_i32(tr2
);
2377 tcg_temp_free_i32(chk
);
2382 #ifndef CONFIG_USER_ONLY
2383 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2385 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
2386 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
2387 TCGv_i32 func_code
= tcg_const_i32(get_field(s
, i2
));
2389 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2391 tcg_temp_free_i32(func_code
);
2392 tcg_temp_free_i32(r3
);
2393 tcg_temp_free_i32(r1
);
2398 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2400 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2401 return_low128(o
->out
);
2405 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2407 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2408 return_low128(o
->out
);
2412 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2414 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2415 return_low128(o
->out
);
2419 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2421 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2422 return_low128(o
->out
);
2426 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2428 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2432 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2434 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2438 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2440 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2441 return_low128(o
->out2
);
2445 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2447 int r2
= get_field(s
, r2
);
2448 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2452 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2454 /* No cache information provided. */
2455 tcg_gen_movi_i64(o
->out
, -1);
2459 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2461 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2465 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2467 int r1
= get_field(s
, r1
);
2468 int r2
= get_field(s
, r2
);
2469 TCGv_i64 t
= tcg_temp_new_i64();
2471 /* Note the "subsequently" in the PoO, which implies a defined result
2472 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2473 tcg_gen_shri_i64(t
, psw_mask
, 32);
2474 store_reg32_i64(r1
, t
);
2476 store_reg32_i64(r2
, psw_mask
);
2479 tcg_temp_free_i64(t
);
2483 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2485 int r1
= get_field(s
, r1
);
2489 /* Nested EXECUTE is not allowed. */
2490 if (unlikely(s
->ex_value
)) {
2491 gen_program_exception(s
, PGM_EXECUTE
);
2492 return DISAS_NORETURN
;
2499 v1
= tcg_const_i64(0);
2504 ilen
= tcg_const_i32(s
->ilen
);
2505 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2506 tcg_temp_free_i32(ilen
);
2509 tcg_temp_free_i64(v1
);
2512 return DISAS_PC_CC_UPDATED
;
2515 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2517 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2520 return DISAS_NORETURN
;
2522 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m34
);
2523 tcg_temp_free_i32(m34
);
2527 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2529 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2532 return DISAS_NORETURN
;
2534 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m34
);
2535 tcg_temp_free_i32(m34
);
2539 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2541 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2544 return DISAS_NORETURN
;
2546 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2547 return_low128(o
->out2
);
2548 tcg_temp_free_i32(m34
);
2552 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2554 /* We'll use the original input for cc computation, since we get to
2555 compare that against 0, which ought to be better than comparing
2556 the real output against 64. It also lets cc_dst be a convenient
2557 temporary during our computation. */
2558 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2560 /* R1 = IN ? CLZ(IN) : 64. */
2561 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2563 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2564 value by 64, which is undefined. But since the shift is 64 iff the
2565 input is zero, we still get the correct result after and'ing. */
2566 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2567 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2568 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2572 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2574 int m3
= get_field(s
, m3
);
2575 int pos
, len
, base
= s
->insn
->data
;
2576 TCGv_i64 tmp
= tcg_temp_new_i64();
2581 /* Effectively a 32-bit load. */
2582 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2589 /* Effectively a 16-bit load. */
2590 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2598 /* Effectively an 8-bit load. */
2599 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2604 pos
= base
+ ctz32(m3
) * 8;
2605 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2606 ccm
= ((1ull << len
) - 1) << pos
;
2610 /* This is going to be a sequence of loads and inserts. */
2611 pos
= base
+ 32 - 8;
2615 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2616 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2617 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2618 ccm
|= 0xffull
<< pos
;
2620 m3
= (m3
<< 1) & 0xf;
2626 tcg_gen_movi_i64(tmp
, ccm
);
2627 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2628 tcg_temp_free_i64(tmp
);
2632 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2634 int shift
= s
->insn
->data
& 0xff;
2635 int size
= s
->insn
->data
>> 8;
2636 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2640 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2645 t1
= tcg_temp_new_i64();
2646 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2647 t2
= tcg_temp_new_i64();
2648 tcg_gen_extu_i32_i64(t2
, cc_op
);
2649 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2650 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2651 tcg_temp_free_i64(t1
);
2652 tcg_temp_free_i64(t2
);
2656 #ifndef CONFIG_USER_ONLY
2657 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2661 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2662 m4
= tcg_const_i32(get_field(s
, m4
));
2664 m4
= tcg_const_i32(0);
2666 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2667 tcg_temp_free_i32(m4
);
2671 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2675 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2676 m4
= tcg_const_i32(get_field(s
, m4
));
2678 m4
= tcg_const_i32(0);
2680 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2681 tcg_temp_free_i32(m4
);
2685 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2687 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2692 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2694 int r1
= have_field(s
, r1
) ? get_field(s
, r1
) : 0;
2695 int r2
= have_field(s
, r2
) ? get_field(s
, r2
) : 0;
2696 int r3
= have_field(s
, r3
) ? get_field(s
, r3
) : 0;
2697 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2699 switch (s
->insn
->data
) {
2700 case S390_FEAT_TYPE_KMA
:
2701 if (r3
== r1
|| r3
== r2
) {
2702 gen_program_exception(s
, PGM_SPECIFICATION
);
2703 return DISAS_NORETURN
;
2706 case S390_FEAT_TYPE_KMCTR
:
2707 if (r3
& 1 || !r3
) {
2708 gen_program_exception(s
, PGM_SPECIFICATION
);
2709 return DISAS_NORETURN
;
2712 case S390_FEAT_TYPE_PPNO
:
2713 case S390_FEAT_TYPE_KMF
:
2714 case S390_FEAT_TYPE_KMC
:
2715 case S390_FEAT_TYPE_KMO
:
2716 case S390_FEAT_TYPE_KM
:
2717 if (r1
& 1 || !r1
) {
2718 gen_program_exception(s
, PGM_SPECIFICATION
);
2719 return DISAS_NORETURN
;
2722 case S390_FEAT_TYPE_KMAC
:
2723 case S390_FEAT_TYPE_KIMD
:
2724 case S390_FEAT_TYPE_KLMD
:
2725 if (r2
& 1 || !r2
) {
2726 gen_program_exception(s
, PGM_SPECIFICATION
);
2727 return DISAS_NORETURN
;
2730 case S390_FEAT_TYPE_PCKMO
:
2731 case S390_FEAT_TYPE_PCC
:
2734 g_assert_not_reached();
2737 t_r1
= tcg_const_i32(r1
);
2738 t_r2
= tcg_const_i32(r2
);
2739 t_r3
= tcg_const_i32(r3
);
2740 type
= tcg_const_i32(s
->insn
->data
);
2741 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2743 tcg_temp_free_i32(t_r1
);
2744 tcg_temp_free_i32(t_r2
);
2745 tcg_temp_free_i32(t_r3
);
2746 tcg_temp_free_i32(type
);
2750 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2752 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2757 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2759 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2764 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2766 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2771 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2773 /* The real output is indeed the original value in memory;
2774 recompute the addition for the computation of CC. */
2775 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2776 s
->insn
->data
| MO_ALIGN
);
2777 /* However, we need to recompute the addition for setting CC. */
2778 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2782 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2784 /* The real output is indeed the original value in memory;
2785 recompute the addition for the computation of CC. */
2786 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2787 s
->insn
->data
| MO_ALIGN
);
2788 /* However, we need to recompute the operation for setting CC. */
2789 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2793 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2795 /* The real output is indeed the original value in memory;
2796 recompute the addition for the computation of CC. */
2797 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2798 s
->insn
->data
| MO_ALIGN
);
2799 /* However, we need to recompute the operation for setting CC. */
2800 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2804 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2806 /* The real output is indeed the original value in memory;
2807 recompute the addition for the computation of CC. */
2808 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2809 s
->insn
->data
| MO_ALIGN
);
2810 /* However, we need to recompute the operation for setting CC. */
2811 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2815 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2817 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2821 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2823 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2826 return DISAS_NORETURN
;
2828 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
, m34
);
2829 tcg_temp_free_i32(m34
);
2833 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2835 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2838 return DISAS_NORETURN
;
2840 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2841 tcg_temp_free_i32(m34
);
2845 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2847 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2850 return DISAS_NORETURN
;
2852 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2853 tcg_temp_free_i32(m34
);
2857 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2859 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2860 return_low128(o
->out2
);
2864 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2866 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2867 return_low128(o
->out2
);
2871 static DisasJumpType
op_lde(DisasContext
*s
, DisasOps
*o
)
2873 tcg_gen_shli_i64(o
->out
, o
->in2
, 32);
2877 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2879 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2883 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2885 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2889 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2891 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2895 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2897 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2901 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2903 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2907 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2909 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2913 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2915 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2919 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2921 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2925 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2927 TCGLabel
*lab
= gen_new_label();
2928 store_reg32_i64(get_field(s
, r1
), o
->in2
);
2929 /* The value is stored even in case of trap. */
2930 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2936 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2938 TCGLabel
*lab
= gen_new_label();
2939 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2940 /* The value is stored even in case of trap. */
2941 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2947 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2949 TCGLabel
*lab
= gen_new_label();
2950 store_reg32h_i64(get_field(s
, r1
), o
->in2
);
2951 /* The value is stored even in case of trap. */
2952 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2958 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2960 TCGLabel
*lab
= gen_new_label();
2961 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2962 /* The value is stored even in case of trap. */
2963 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2969 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2971 TCGLabel
*lab
= gen_new_label();
2972 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2973 /* The value is stored even in case of trap. */
2974 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2980 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
2984 if (have_field(s
, m3
)) {
2985 /* LOAD * ON CONDITION */
2986 disas_jcc(s
, &c
, get_field(s
, m3
));
2989 disas_jcc(s
, &c
, get_field(s
, m4
));
2993 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2997 TCGv_i32 t32
= tcg_temp_new_i32();
3000 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
3003 t
= tcg_temp_new_i64();
3004 tcg_gen_extu_i32_i64(t
, t32
);
3005 tcg_temp_free_i32(t32
);
3007 z
= tcg_const_i64(0);
3008 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
3009 tcg_temp_free_i64(t
);
3010 tcg_temp_free_i64(z
);
3016 #ifndef CONFIG_USER_ONLY
3017 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
3019 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3020 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3021 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
3022 tcg_temp_free_i32(r1
);
3023 tcg_temp_free_i32(r3
);
3024 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3025 return DISAS_PC_STALE_NOCHAIN
;
3028 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
3030 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3031 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3032 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
3033 tcg_temp_free_i32(r1
);
3034 tcg_temp_free_i32(r3
);
3035 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3036 return DISAS_PC_STALE_NOCHAIN
;
3039 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
3041 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
3046 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
3048 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
3052 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
3056 per_breaking_event(s
);
3058 t1
= tcg_temp_new_i64();
3059 t2
= tcg_temp_new_i64();
3060 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3061 MO_TEUL
| MO_ALIGN_8
);
3062 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
3063 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
3064 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3065 tcg_gen_shli_i64(t1
, t1
, 32);
3066 gen_helper_load_psw(cpu_env
, t1
, t2
);
3067 tcg_temp_free_i64(t1
);
3068 tcg_temp_free_i64(t2
);
3069 return DISAS_NORETURN
;
3072 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
3076 per_breaking_event(s
);
3078 t1
= tcg_temp_new_i64();
3079 t2
= tcg_temp_new_i64();
3080 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3081 MO_TEUQ
| MO_ALIGN_8
);
3082 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3083 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
3084 gen_helper_load_psw(cpu_env
, t1
, t2
);
3085 tcg_temp_free_i64(t1
);
3086 tcg_temp_free_i64(t2
);
3087 return DISAS_NORETURN
;
3091 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
3093 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3094 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3095 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
3096 tcg_temp_free_i32(r1
);
3097 tcg_temp_free_i32(r3
);
3101 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
3103 int r1
= get_field(s
, r1
);
3104 int r3
= get_field(s
, r3
);
3107 /* Only one register to read. */
3108 t1
= tcg_temp_new_i64();
3109 if (unlikely(r1
== r3
)) {
3110 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3111 store_reg32_i64(r1
, t1
);
3116 /* First load the values of the first and last registers to trigger
3117 possible page faults. */
3118 t2
= tcg_temp_new_i64();
3119 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3120 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3121 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3122 store_reg32_i64(r1
, t1
);
3123 store_reg32_i64(r3
, t2
);
3125 /* Only two registers to read. */
3126 if (((r1
+ 1) & 15) == r3
) {
3132 /* Then load the remaining registers. Page fault can't occur. */
3134 tcg_gen_movi_i64(t2
, 4);
3137 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3138 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3139 store_reg32_i64(r1
, t1
);
3147 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
3149 int r1
= get_field(s
, r1
);
3150 int r3
= get_field(s
, r3
);
3153 /* Only one register to read. */
3154 t1
= tcg_temp_new_i64();
3155 if (unlikely(r1
== r3
)) {
3156 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3157 store_reg32h_i64(r1
, t1
);
3162 /* First load the values of the first and last registers to trigger
3163 possible page faults. */
3164 t2
= tcg_temp_new_i64();
3165 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3166 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3167 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3168 store_reg32h_i64(r1
, t1
);
3169 store_reg32h_i64(r3
, t2
);
3171 /* Only two registers to read. */
3172 if (((r1
+ 1) & 15) == r3
) {
3178 /* Then load the remaining registers. Page fault can't occur. */
3180 tcg_gen_movi_i64(t2
, 4);
3183 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3184 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3185 store_reg32h_i64(r1
, t1
);
3193 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
3195 int r1
= get_field(s
, r1
);
3196 int r3
= get_field(s
, r3
);
3199 /* Only one register to read. */
3200 if (unlikely(r1
== r3
)) {
3201 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3205 /* First load the values of the first and last registers to trigger
3206 possible page faults. */
3207 t1
= tcg_temp_new_i64();
3208 t2
= tcg_temp_new_i64();
3209 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
3210 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
3211 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
3212 tcg_gen_mov_i64(regs
[r1
], t1
);
3215 /* Only two registers to read. */
3216 if (((r1
+ 1) & 15) == r3
) {
3221 /* Then load the remaining registers. Page fault can't occur. */
3223 tcg_gen_movi_i64(t1
, 8);
3226 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
3227 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3234 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3237 MemOp mop
= s
->insn
->data
;
3239 /* In a parallel context, stop the world and single step. */
3240 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3243 gen_exception(EXCP_ATOMIC
);
3244 return DISAS_NORETURN
;
3247 /* In a serial context, perform the two loads ... */
3248 a1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
3249 a2
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
3250 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3251 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3252 tcg_temp_free_i64(a1
);
3253 tcg_temp_free_i64(a2
);
3255 /* ... and indicate that we performed them while interlocked. */
3256 gen_op_movi_cc(s
, 0);
3260 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3262 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3263 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3264 } else if (HAVE_ATOMIC128
) {
3265 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3267 gen_helper_exit_atomic(cpu_env
);
3268 return DISAS_NORETURN
;
3270 return_low128(o
->out2
);
3274 #ifndef CONFIG_USER_ONLY
3275 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3277 tcg_gen_qemu_ld_tl(o
->out
, o
->in2
, MMU_REAL_IDX
, s
->insn
->data
);
3282 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3284 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3288 static DisasJumpType
op_lcbb(DisasContext
*s
, DisasOps
*o
)
3290 const int64_t block_size
= (1ull << (get_field(s
, m3
) + 6));
3292 if (get_field(s
, m3
) > 6) {
3293 gen_program_exception(s
, PGM_SPECIFICATION
);
3294 return DISAS_NORETURN
;
3297 tcg_gen_ori_i64(o
->addr1
, o
->addr1
, -block_size
);
3298 tcg_gen_neg_i64(o
->addr1
, o
->addr1
);
3299 tcg_gen_movi_i64(o
->out
, 16);
3300 tcg_gen_umin_i64(o
->out
, o
->out
, o
->addr1
);
3301 gen_op_update1_cc_i64(s
, CC_OP_LCBB
, o
->out
);
3305 static DisasJumpType
op_mc(DisasContext
*s
, DisasOps
*o
)
3307 #if !defined(CONFIG_USER_ONLY)
3310 const uint16_t monitor_class
= get_field(s
, i2
);
3312 if (monitor_class
& 0xff00) {
3313 gen_program_exception(s
, PGM_SPECIFICATION
);
3314 return DISAS_NORETURN
;
3317 #if !defined(CONFIG_USER_ONLY)
3318 i2
= tcg_const_i32(monitor_class
);
3319 gen_helper_monitor_call(cpu_env
, o
->addr1
, i2
);
3320 tcg_temp_free_i32(i2
);
3322 /* Defaults to a NOP. */
3326 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3329 o
->g_out
= o
->g_in2
;
3335 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3337 int b2
= get_field(s
, b2
);
3338 TCGv ar1
= tcg_temp_new_i64();
3341 o
->g_out
= o
->g_in2
;
3345 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3346 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3347 tcg_gen_movi_i64(ar1
, 0);
3349 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3350 tcg_gen_movi_i64(ar1
, 1);
3352 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3354 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3356 tcg_gen_movi_i64(ar1
, 0);
3359 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3360 tcg_gen_movi_i64(ar1
, 2);
3364 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3365 tcg_temp_free_i64(ar1
);
3370 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3374 o
->g_out
= o
->g_in1
;
3375 o
->g_out2
= o
->g_in2
;
3378 o
->g_in1
= o
->g_in2
= false;
3382 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3384 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3385 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3386 tcg_temp_free_i32(l
);
3390 static DisasJumpType
op_mvcrl(DisasContext
*s
, DisasOps
*o
)
3392 gen_helper_mvcrl(cpu_env
, regs
[0], o
->addr1
, o
->in2
);
3396 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3398 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3399 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3400 tcg_temp_free_i32(l
);
3404 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3406 int r1
= get_field(s
, r1
);
3407 int r2
= get_field(s
, r2
);
3410 /* r1 and r2 must be even. */
3411 if (r1
& 1 || r2
& 1) {
3412 gen_program_exception(s
, PGM_SPECIFICATION
);
3413 return DISAS_NORETURN
;
3416 t1
= tcg_const_i32(r1
);
3417 t2
= tcg_const_i32(r2
);
3418 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3419 tcg_temp_free_i32(t1
);
3420 tcg_temp_free_i32(t2
);
3425 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3427 int r1
= get_field(s
, r1
);
3428 int r3
= get_field(s
, r3
);
3431 /* r1 and r3 must be even. */
3432 if (r1
& 1 || r3
& 1) {
3433 gen_program_exception(s
, PGM_SPECIFICATION
);
3434 return DISAS_NORETURN
;
3437 t1
= tcg_const_i32(r1
);
3438 t3
= tcg_const_i32(r3
);
3439 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3440 tcg_temp_free_i32(t1
);
3441 tcg_temp_free_i32(t3
);
3446 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3448 int r1
= get_field(s
, r1
);
3449 int r3
= get_field(s
, r3
);
3452 /* r1 and r3 must be even. */
3453 if (r1
& 1 || r3
& 1) {
3454 gen_program_exception(s
, PGM_SPECIFICATION
);
3455 return DISAS_NORETURN
;
3458 t1
= tcg_const_i32(r1
);
3459 t3
= tcg_const_i32(r3
);
3460 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3461 tcg_temp_free_i32(t1
);
3462 tcg_temp_free_i32(t3
);
3467 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3469 int r3
= get_field(s
, r3
);
3470 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3475 #ifndef CONFIG_USER_ONLY
3476 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3478 int r1
= get_field(s
, l1
);
3479 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3484 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3486 int r1
= get_field(s
, l1
);
3487 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3493 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3495 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3496 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3497 tcg_temp_free_i32(l
);
3501 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3503 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3504 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3505 tcg_temp_free_i32(l
);
3509 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3511 TCGv_i32 t1
= tcg_const_i32(get_field(s
, r1
));
3512 TCGv_i32 t2
= tcg_const_i32(get_field(s
, r2
));
3514 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], t1
, t2
);
3515 tcg_temp_free_i32(t1
);
3516 tcg_temp_free_i32(t2
);
3521 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3523 TCGv_i32 t1
= tcg_const_i32(get_field(s
, r1
));
3524 TCGv_i32 t2
= tcg_const_i32(get_field(s
, r2
));
3526 gen_helper_mvst(cc_op
, cpu_env
, t1
, t2
);
3527 tcg_temp_free_i32(t1
);
3528 tcg_temp_free_i32(t2
);
3533 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3535 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3536 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3537 tcg_temp_free_i32(l
);
3541 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3543 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3547 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3549 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3553 static DisasJumpType
op_muls128(DisasContext
*s
, DisasOps
*o
)
3555 tcg_gen_muls2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3559 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3561 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3565 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3567 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3571 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3573 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3577 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3579 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3580 return_low128(o
->out2
);
3584 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3586 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3587 return_low128(o
->out2
);
3591 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3593 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3594 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3595 tcg_temp_free_i64(r3
);
3599 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3601 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3602 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3603 tcg_temp_free_i64(r3
);
3607 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3609 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3610 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3611 tcg_temp_free_i64(r3
);
3615 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3617 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3618 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3619 tcg_temp_free_i64(r3
);
3623 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3626 z
= tcg_const_i64(0);
3627 n
= tcg_temp_new_i64();
3628 tcg_gen_neg_i64(n
, o
->in2
);
3629 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3630 tcg_temp_free_i64(n
);
3631 tcg_temp_free_i64(z
);
3635 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3637 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3641 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3643 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3647 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3649 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3650 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3654 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3656 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3657 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3658 tcg_temp_free_i32(l
);
3663 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3665 tcg_gen_neg_i64(o
->out
, o
->in2
);
3669 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3671 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3675 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3677 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3681 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3683 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3684 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3688 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3690 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3691 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3692 tcg_temp_free_i32(l
);
3697 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3699 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3703 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3705 int shift
= s
->insn
->data
& 0xff;
3706 int size
= s
->insn
->data
>> 8;
3707 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3710 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3711 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3713 /* Produce the CC from only the bits manipulated. */
3714 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3715 set_cc_nz_u64(s
, cc_dst
);
3719 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3721 o
->in1
= tcg_temp_new_i64();
3723 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3724 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3726 /* Perform the atomic operation in memory. */
3727 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3731 /* Recompute also for atomic case: needed for setting CC. */
3732 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3734 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3735 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3740 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3742 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3743 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3744 tcg_temp_free_i32(l
);
3748 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3750 int l2
= get_field(s
, l2
) + 1;
3753 /* The length must not exceed 32 bytes. */
3755 gen_program_exception(s
, PGM_SPECIFICATION
);
3756 return DISAS_NORETURN
;
3758 l
= tcg_const_i32(l2
);
3759 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3760 tcg_temp_free_i32(l
);
3764 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3766 int l2
= get_field(s
, l2
) + 1;
3769 /* The length must be even and should not exceed 64 bytes. */
3770 if ((l2
& 1) || (l2
> 64)) {
3771 gen_program_exception(s
, PGM_SPECIFICATION
);
3772 return DISAS_NORETURN
;
3774 l
= tcg_const_i32(l2
);
3775 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3776 tcg_temp_free_i32(l
);
3780 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3782 const uint8_t m3
= get_field(s
, m3
);
3784 if ((m3
& 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3
)) {
3785 tcg_gen_ctpop_i64(o
->out
, o
->in2
);
3787 gen_helper_popcnt(o
->out
, o
->in2
);
3792 #ifndef CONFIG_USER_ONLY
3793 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3795 gen_helper_ptlb(cpu_env
);
3800 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3802 int i3
= get_field(s
, i3
);
3803 int i4
= get_field(s
, i4
);
3804 int i5
= get_field(s
, i5
);
3805 int do_zero
= i4
& 0x80;
3806 uint64_t mask
, imask
, pmask
;
3809 /* Adjust the arguments for the specific insn. */
3810 switch (s
->fields
.op2
) {
3811 case 0x55: /* risbg */
3812 case 0x59: /* risbgn */
3817 case 0x5d: /* risbhg */
3820 pmask
= 0xffffffff00000000ull
;
3822 case 0x51: /* risblg */
3823 i3
= (i3
& 31) + 32;
3824 i4
= (i4
& 31) + 32;
3825 pmask
= 0x00000000ffffffffull
;
3828 g_assert_not_reached();
3831 /* MASK is the set of bits to be inserted from R2. */
3833 /* [0...i3---i4...63] */
3834 mask
= (-1ull >> i3
) & (-1ull << (63 - i4
));
3836 /* [0---i4...i3---63] */
3837 mask
= (-1ull >> i3
) | (-1ull << (63 - i4
));
3839 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3842 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3843 insns, we need to keep the other half of the register. */
3844 imask
= ~mask
| ~pmask
;
3853 /* In some cases we can implement this with extract. */
3854 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3855 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3859 /* In some cases we can implement this with deposit. */
3860 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3861 /* Note that we rotate the bits to be inserted to the lsb, not to
3862 the position as described in the PoO. */
3863 rot
= (rot
- pos
) & 63;
3868 /* Rotate the input as necessary. */
3869 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3871 /* Insert the selected bits into the output. */
3874 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3876 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3878 } else if (imask
== 0) {
3879 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3881 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3882 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3883 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3888 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3890 int i3
= get_field(s
, i3
);
3891 int i4
= get_field(s
, i4
);
3892 int i5
= get_field(s
, i5
);
3895 /* If this is a test-only form, arrange to discard the result. */
3897 o
->out
= tcg_temp_new_i64();
3905 /* MASK is the set of bits to be operated on from R2.
3906 Take care for I3/I4 wraparound. */
3909 mask
^= ~0ull >> i4
>> 1;
3911 mask
|= ~(~0ull >> i4
>> 1);
3914 /* Rotate the input as necessary. */
3915 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3918 switch (s
->fields
.op2
) {
3919 case 0x54: /* AND */
3920 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3921 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3924 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3925 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3927 case 0x57: /* XOR */
3928 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3929 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3936 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3937 set_cc_nz_u64(s
, cc_dst
);
3941 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3943 tcg_gen_bswap16_i64(o
->out
, o
->in2
, TCG_BSWAP_IZ
| TCG_BSWAP_OZ
);
3947 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3949 tcg_gen_bswap32_i64(o
->out
, o
->in2
, TCG_BSWAP_IZ
| TCG_BSWAP_OZ
);
3953 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3955 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3959 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3961 TCGv_i32 t1
= tcg_temp_new_i32();
3962 TCGv_i32 t2
= tcg_temp_new_i32();
3963 TCGv_i32 to
= tcg_temp_new_i32();
3964 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3965 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3966 tcg_gen_rotl_i32(to
, t1
, t2
);
3967 tcg_gen_extu_i32_i64(o
->out
, to
);
3968 tcg_temp_free_i32(t1
);
3969 tcg_temp_free_i32(t2
);
3970 tcg_temp_free_i32(to
);
3974 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3976 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3980 #ifndef CONFIG_USER_ONLY
3981 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3983 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3988 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3990 gen_helper_sacf(cpu_env
, o
->in2
);
3991 /* Addressing mode has changed, so end the block. */
3992 return DISAS_TOO_MANY
;
3996 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3998 int sam
= s
->insn
->data
;
4014 /* Bizarre but true, we check the address of the current insn for the
4015 specification exception, not the next to be executed. Thus the PoO
4016 documents that Bad Things Happen two bytes before the end. */
4017 if (s
->base
.pc_next
& ~mask
) {
4018 gen_program_exception(s
, PGM_SPECIFICATION
);
4019 return DISAS_NORETURN
;
4023 tsam
= tcg_const_i64(sam
);
4024 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
4025 tcg_temp_free_i64(tsam
);
4027 /* Always exit the TB, since we (may have) changed execution mode. */
4028 return DISAS_TOO_MANY
;
4031 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
4033 int r1
= get_field(s
, r1
);
4034 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
4038 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
4040 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4044 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
4046 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4050 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
4052 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
4053 return_low128(o
->out2
);
4057 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
4059 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
4063 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
4065 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
4069 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
4071 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4072 return_low128(o
->out2
);
4076 #ifndef CONFIG_USER_ONLY
4077 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
4079 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
4084 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
4086 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4087 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4088 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
4090 tcg_temp_free_i32(r1
);
4091 tcg_temp_free_i32(r3
);
4096 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
4103 disas_jcc(s
, &c
, get_field(s
, m3
));
4105 /* We want to store when the condition is fulfilled, so branch
4106 out when it's not */
4107 c
.cond
= tcg_invert_cond(c
.cond
);
4109 lab
= gen_new_label();
4111 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
4113 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
4117 r1
= get_field(s
, r1
);
4118 a
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
4119 switch (s
->insn
->data
) {
4121 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
4124 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
4126 case 2: /* STOCFH */
4127 h
= tcg_temp_new_i64();
4128 tcg_gen_shri_i64(h
, regs
[r1
], 32);
4129 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
4130 tcg_temp_free_i64(h
);
4133 g_assert_not_reached();
4135 tcg_temp_free_i64(a
);
4141 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
4144 uint64_t sign
= 1ull << s
->insn
->data
;
4145 if (s
->insn
->data
== 31) {
4146 t
= tcg_temp_new_i64();
4147 tcg_gen_shli_i64(t
, o
->in1
, 32);
4151 gen_op_update2_cc_i64(s
, CC_OP_SLA
, t
, o
->in2
);
4152 if (s
->insn
->data
== 31) {
4153 tcg_temp_free_i64(t
);
4155 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4156 /* The arithmetic left shift is curious in that it does not affect
4157 the sign bit. Copy that over from the source unchanged. */
4158 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
4159 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
4160 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
4164 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
4166 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4170 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
4172 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
4176 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
4178 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
4182 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
4184 gen_helper_sfpc(cpu_env
, o
->in2
);
4188 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
4190 gen_helper_sfas(cpu_env
, o
->in2
);
4194 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
4196 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4197 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x3ull
);
4198 gen_helper_srnm(cpu_env
, o
->addr1
);
4202 static DisasJumpType
op_srnmb(DisasContext
*s
, DisasOps
*o
)
4204 /* Bits 0-55 are are ignored. */
4205 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0xffull
);
4206 gen_helper_srnm(cpu_env
, o
->addr1
);
4210 static DisasJumpType
op_srnmt(DisasContext
*s
, DisasOps
*o
)
4212 TCGv_i64 tmp
= tcg_temp_new_i64();
4214 /* Bits other than 61-63 are ignored. */
4215 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x7ull
);
4217 /* No need to call a helper, we don't implement dfp */
4218 tcg_gen_ld32u_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4219 tcg_gen_deposit_i64(tmp
, tmp
, o
->addr1
, 4, 3);
4220 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4222 tcg_temp_free_i64(tmp
);
4226 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
4228 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
4229 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
4232 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
4233 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
4237 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
4239 int b1
= get_field(s
, b1
);
4240 int d1
= get_field(s
, d1
);
4241 int b2
= get_field(s
, b2
);
4242 int d2
= get_field(s
, d2
);
4243 int r3
= get_field(s
, r3
);
4244 TCGv_i64 tmp
= tcg_temp_new_i64();
4246 /* fetch all operands first */
4247 o
->in1
= tcg_temp_new_i64();
4248 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
4249 o
->in2
= tcg_temp_new_i64();
4250 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
4251 o
->addr1
= tcg_temp_new_i64();
4252 gen_addi_and_wrap_i64(s
, o
->addr1
, regs
[r3
], 0);
4254 /* load the third operand into r3 before modifying anything */
4255 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
4257 /* subtract CPU timer from first operand and store in GR0 */
4258 gen_helper_stpt(tmp
, cpu_env
);
4259 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
4261 /* store second operand in GR1 */
4262 tcg_gen_mov_i64(regs
[1], o
->in2
);
4264 tcg_temp_free_i64(tmp
);
4268 #ifndef CONFIG_USER_ONLY
4269 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
4271 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
4272 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
4276 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
4278 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
4282 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
4284 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
4285 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4286 return DISAS_PC_STALE_NOCHAIN
;
4289 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
4291 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
4296 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4298 gen_helper_stck(o
->out
, cpu_env
);
4299 /* ??? We don't implement clock states. */
4300 gen_op_movi_cc(s
, 0);
4304 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4306 TCGv_i64 c1
= tcg_temp_new_i64();
4307 TCGv_i64 c2
= tcg_temp_new_i64();
4308 TCGv_i64 todpr
= tcg_temp_new_i64();
4309 gen_helper_stck(c1
, cpu_env
);
4310 /* 16 bit value store in an uint32_t (only valid bits set) */
4311 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4312 /* Shift the 64-bit value into its place as a zero-extended
4313 104-bit value. Note that "bit positions 64-103 are always
4314 non-zero so that they compare differently to STCK"; we set
4315 the least significant bit to 1. */
4316 tcg_gen_shli_i64(c2
, c1
, 56);
4317 tcg_gen_shri_i64(c1
, c1
, 8);
4318 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4319 tcg_gen_or_i64(c2
, c2
, todpr
);
4320 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4321 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4322 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4323 tcg_temp_free_i64(c1
);
4324 tcg_temp_free_i64(c2
);
4325 tcg_temp_free_i64(todpr
);
4326 /* ??? We don't implement clock states. */
4327 gen_op_movi_cc(s
, 0);
4331 #ifndef CONFIG_USER_ONLY
4332 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4334 gen_helper_sck(cc_op
, cpu_env
, o
->in2
);
4339 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4341 gen_helper_sckc(cpu_env
, o
->in2
);
4345 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4347 gen_helper_sckpf(cpu_env
, regs
[0]);
4351 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4353 gen_helper_stckc(o
->out
, cpu_env
);
4357 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4359 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4360 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4361 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4362 tcg_temp_free_i32(r1
);
4363 tcg_temp_free_i32(r3
);
4367 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4369 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4370 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4371 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4372 tcg_temp_free_i32(r1
);
4373 tcg_temp_free_i32(r3
);
4377 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4379 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4383 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4385 gen_helper_spt(cpu_env
, o
->in2
);
4389 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4391 gen_helper_stfl(cpu_env
);
4395 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4397 gen_helper_stpt(o
->out
, cpu_env
);
4401 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4403 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4408 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4410 gen_helper_spx(cpu_env
, o
->in2
);
4414 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4416 gen_helper_xsch(cpu_env
, regs
[1]);
4421 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4423 gen_helper_csch(cpu_env
, regs
[1]);
4428 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4430 gen_helper_hsch(cpu_env
, regs
[1]);
4435 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4437 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4442 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4444 gen_helper_rchp(cpu_env
, regs
[1]);
4449 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4451 gen_helper_rsch(cpu_env
, regs
[1]);
4456 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4458 gen_helper_sal(cpu_env
, regs
[1]);
4462 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4464 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4468 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4470 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4471 gen_op_movi_cc(s
, 3);
4475 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4477 /* The instruction is suppressed if not provided. */
4481 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4483 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4488 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4490 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4495 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4497 gen_helper_stcrw(cpu_env
, o
->in2
);
4502 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4504 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4509 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4511 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4516 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4518 gen_helper_chsc(cpu_env
, o
->in2
);
4523 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4525 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4526 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4530 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4532 uint64_t i2
= get_field(s
, i2
);
4535 /* It is important to do what the instruction name says: STORE THEN.
4536 If we let the output hook perform the store then if we fault and
4537 restart, we'll have the wrong SYSTEM MASK in place. */
4538 t
= tcg_temp_new_i64();
4539 tcg_gen_shri_i64(t
, psw_mask
, 56);
4540 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4541 tcg_temp_free_i64(t
);
4543 if (s
->fields
.op
== 0xac) {
4544 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4545 (i2
<< 56) | 0x00ffffffffffffffull
);
4547 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4550 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4551 return DISAS_PC_STALE_NOCHAIN
;
4554 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4556 tcg_gen_qemu_st_tl(o
->in1
, o
->in2
, MMU_REAL_IDX
, s
->insn
->data
);
4558 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
4560 gen_helper_per_store_real(cpu_env
);
4566 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4568 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4573 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4575 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4579 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4581 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4585 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4587 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4591 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4593 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4597 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4599 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4600 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4601 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4602 tcg_temp_free_i32(r1
);
4603 tcg_temp_free_i32(r3
);
4607 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4609 int m3
= get_field(s
, m3
);
4610 int pos
, base
= s
->insn
->data
;
4611 TCGv_i64 tmp
= tcg_temp_new_i64();
4613 pos
= base
+ ctz32(m3
) * 8;
4616 /* Effectively a 32-bit store. */
4617 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4618 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4624 /* Effectively a 16-bit store. */
4625 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4626 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4633 /* Effectively an 8-bit store. */
4634 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4635 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4639 /* This is going to be a sequence of shifts and stores. */
4640 pos
= base
+ 32 - 8;
4643 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4644 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4645 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4647 m3
= (m3
<< 1) & 0xf;
4652 tcg_temp_free_i64(tmp
);
4656 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4658 int r1
= get_field(s
, r1
);
4659 int r3
= get_field(s
, r3
);
4660 int size
= s
->insn
->data
;
4661 TCGv_i64 tsize
= tcg_const_i64(size
);
4665 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4667 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4672 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4676 tcg_temp_free_i64(tsize
);
4680 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4682 int r1
= get_field(s
, r1
);
4683 int r3
= get_field(s
, r3
);
4684 TCGv_i64 t
= tcg_temp_new_i64();
4685 TCGv_i64 t4
= tcg_const_i64(4);
4686 TCGv_i64 t32
= tcg_const_i64(32);
4689 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4690 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4694 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4698 tcg_temp_free_i64(t
);
4699 tcg_temp_free_i64(t4
);
4700 tcg_temp_free_i64(t32
);
4704 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4706 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4707 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4708 } else if (HAVE_ATOMIC128
) {
4709 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4711 gen_helper_exit_atomic(cpu_env
);
4712 return DISAS_NORETURN
;
4717 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4719 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4720 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4722 gen_helper_srst(cpu_env
, r1
, r2
);
4724 tcg_temp_free_i32(r1
);
4725 tcg_temp_free_i32(r2
);
4730 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4732 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4733 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4735 gen_helper_srstu(cpu_env
, r1
, r2
);
4737 tcg_temp_free_i32(r1
);
4738 tcg_temp_free_i32(r2
);
4743 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4745 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4749 static DisasJumpType
op_subu64(DisasContext
*s
, DisasOps
*o
)
4751 tcg_gen_movi_i64(cc_src
, 0);
4752 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
4756 /* Compute borrow (0, -1) into cc_src. */
4757 static void compute_borrow(DisasContext
*s
)
4761 /* The borrow value is already in cc_src (0,-1). */
4767 /* The carry flag is the msb of CC; compute into cc_src. */
4768 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
4769 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
4772 /* Convert carry (1,0) to borrow (0,-1). */
4773 tcg_gen_subi_i64(cc_src
, cc_src
, 1);
4778 static DisasJumpType
op_subb32(DisasContext
*s
, DisasOps
*o
)
4782 /* Borrow is {0, -1}, so add to subtract. */
4783 tcg_gen_add_i64(o
->out
, o
->in1
, cc_src
);
4784 tcg_gen_sub_i64(o
->out
, o
->out
, o
->in2
);
4788 static DisasJumpType
op_subb64(DisasContext
*s
, DisasOps
*o
)
4793 * Borrow is {0, -1}, so add to subtract; replicate the
4794 * borrow input to produce 128-bit -1 for the addition.
4796 TCGv_i64 zero
= tcg_const_i64(0);
4797 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, cc_src
);
4798 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
4799 tcg_temp_free_i64(zero
);
4804 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4811 t
= tcg_const_i32(get_field(s
, i1
) & 0xff);
4812 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4813 tcg_temp_free_i32(t
);
4815 t
= tcg_const_i32(s
->ilen
);
4816 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4817 tcg_temp_free_i32(t
);
4819 gen_exception(EXCP_SVC
);
4820 return DISAS_NORETURN
;
4823 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4827 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4828 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4829 gen_op_movi_cc(s
, cc
);
4833 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4835 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4840 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4842 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4847 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4849 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4854 #ifndef CONFIG_USER_ONLY
4856 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4858 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4863 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4865 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4872 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4874 TCGv_i32 l1
= tcg_const_i32(get_field(s
, l1
) + 1);
4875 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4876 tcg_temp_free_i32(l1
);
4881 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4883 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4884 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4885 tcg_temp_free_i32(l
);
4890 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4892 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4893 return_low128(o
->out2
);
4898 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4900 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4901 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4902 tcg_temp_free_i32(l
);
4907 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4909 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4910 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4911 tcg_temp_free_i32(l
);
4916 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4918 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4919 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4920 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4921 TCGv_i32 tst
= tcg_temp_new_i32();
4922 int m3
= get_field(s
, m3
);
4924 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4928 tcg_gen_movi_i32(tst
, -1);
4930 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4931 if (s
->insn
->opc
& 3) {
4932 tcg_gen_ext8u_i32(tst
, tst
);
4934 tcg_gen_ext16u_i32(tst
, tst
);
4937 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4939 tcg_temp_free_i32(r1
);
4940 tcg_temp_free_i32(r2
);
4941 tcg_temp_free_i32(sizes
);
4942 tcg_temp_free_i32(tst
);
4947 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4949 TCGv_i32 t1
= tcg_const_i32(0xff);
4950 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4951 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4952 tcg_temp_free_i32(t1
);
4957 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4959 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4960 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4961 tcg_temp_free_i32(l
);
4965 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4967 int l1
= get_field(s
, l1
) + 1;
4970 /* The length must not exceed 32 bytes. */
4972 gen_program_exception(s
, PGM_SPECIFICATION
);
4973 return DISAS_NORETURN
;
4975 l
= tcg_const_i32(l1
);
4976 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4977 tcg_temp_free_i32(l
);
4982 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4984 int l1
= get_field(s
, l1
) + 1;
4987 /* The length must be even and should not exceed 64 bytes. */
4988 if ((l1
& 1) || (l1
> 64)) {
4989 gen_program_exception(s
, PGM_SPECIFICATION
);
4990 return DISAS_NORETURN
;
4992 l
= tcg_const_i32(l1
);
4993 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4994 tcg_temp_free_i32(l
);
5000 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
5002 int d1
= get_field(s
, d1
);
5003 int d2
= get_field(s
, d2
);
5004 int b1
= get_field(s
, b1
);
5005 int b2
= get_field(s
, b2
);
5006 int l
= get_field(s
, l1
);
5009 o
->addr1
= get_address(s
, 0, b1
, d1
);
5011 /* If the addresses are identical, this is a store/memset of zero. */
5012 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
5013 o
->in2
= tcg_const_i64(0);
5017 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
5020 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
5024 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
5027 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
5031 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
5034 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
5038 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
5040 gen_op_movi_cc(s
, 0);
5044 /* But in general we'll defer to a helper. */
5045 o
->in2
= get_address(s
, 0, b2
, d2
);
5046 t32
= tcg_const_i32(l
);
5047 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
5048 tcg_temp_free_i32(t32
);
5053 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
5055 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5059 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
5061 int shift
= s
->insn
->data
& 0xff;
5062 int size
= s
->insn
->data
>> 8;
5063 uint64_t mask
= ((1ull << size
) - 1) << shift
;
5066 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
5067 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5069 /* Produce the CC from only the bits manipulated. */
5070 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
5071 set_cc_nz_u64(s
, cc_dst
);
5075 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
5077 o
->in1
= tcg_temp_new_i64();
5079 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5080 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5082 /* Perform the atomic operation in memory. */
5083 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
5087 /* Recompute also for atomic case: needed for setting CC. */
5088 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5090 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5091 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5096 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
5098 o
->out
= tcg_const_i64(0);
5102 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
5104 o
->out
= tcg_const_i64(0);
5110 #ifndef CONFIG_USER_ONLY
5111 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
5113 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5115 gen_helper_clp(cpu_env
, r2
);
5116 tcg_temp_free_i32(r2
);
5121 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
5123 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5124 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5126 gen_helper_pcilg(cpu_env
, r1
, r2
);
5127 tcg_temp_free_i32(r1
);
5128 tcg_temp_free_i32(r2
);
5133 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
5135 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5136 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5138 gen_helper_pcistg(cpu_env
, r1
, r2
);
5139 tcg_temp_free_i32(r1
);
5140 tcg_temp_free_i32(r2
);
5145 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
5147 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5148 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5150 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5151 tcg_temp_free_i32(ar
);
5152 tcg_temp_free_i32(r1
);
5157 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
5159 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
5163 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
5165 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5166 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5168 gen_helper_rpcit(cpu_env
, r1
, r2
);
5169 tcg_temp_free_i32(r1
);
5170 tcg_temp_free_i32(r2
);
5175 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
5177 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5178 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
5179 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5181 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
5182 tcg_temp_free_i32(ar
);
5183 tcg_temp_free_i32(r1
);
5184 tcg_temp_free_i32(r3
);
5189 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
5191 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5192 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5194 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5195 tcg_temp_free_i32(ar
);
5196 tcg_temp_free_i32(r1
);
5202 #include "translate_vx.c.inc"
5204 /* ====================================================================== */
5205 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5206 the original inputs), update the various cc data structures in order to
5207 be able to compute the new condition code. */
5209 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
5211 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
5214 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
5216 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
5219 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
5221 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
5224 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
5226 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
5229 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
5231 tcg_gen_shri_i64(cc_src
, o
->out
, 32);
5232 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5233 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, cc_dst
);
5236 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
5238 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, o
->out
);
5241 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
5243 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
5246 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
5248 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
5251 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
5253 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
5256 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
5258 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
5261 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
5263 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
5266 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
5268 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
5271 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
5273 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
5276 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
5278 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
5281 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
5283 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
5286 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
5288 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
5291 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
5293 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
5296 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
5298 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5299 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
5302 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5304 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5307 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5309 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5312 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5314 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5317 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5319 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5322 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5324 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5327 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5329 tcg_gen_sari_i64(cc_src
, o
->out
, 32);
5330 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5331 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, cc_dst
);
5334 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5336 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, o
->out
);
5339 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5341 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5344 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5346 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5349 static void cout_muls32(DisasContext
*s
, DisasOps
*o
)
5351 gen_op_update1_cc_i64(s
, CC_OP_MULS_32
, o
->out
);
5354 static void cout_muls64(DisasContext
*s
, DisasOps
*o
)
5356 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5357 gen_op_update2_cc_i64(s
, CC_OP_MULS_64
, o
->out
, o
->out2
);
5360 /* ====================================================================== */
5361 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5362 with the TCG register to which we will write. Used in combination with
5363 the "wout" generators, in some cases we need a new temporary, and in
5364 some cases we can write to a TCG global. */
5366 static void prep_new(DisasContext
*s
, DisasOps
*o
)
5368 o
->out
= tcg_temp_new_i64();
5370 #define SPEC_prep_new 0
5372 static void prep_new_P(DisasContext
*s
, DisasOps
*o
)
5374 o
->out
= tcg_temp_new_i64();
5375 o
->out2
= tcg_temp_new_i64();
5377 #define SPEC_prep_new_P 0
5379 static void prep_r1(DisasContext
*s
, DisasOps
*o
)
5381 o
->out
= regs
[get_field(s
, r1
)];
5384 #define SPEC_prep_r1 0
5386 static void prep_r1_P(DisasContext
*s
, DisasOps
*o
)
5388 int r1
= get_field(s
, r1
);
5390 o
->out2
= regs
[r1
+ 1];
5391 o
->g_out
= o
->g_out2
= true;
5393 #define SPEC_prep_r1_P SPEC_r1_even
5395 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5396 static void prep_x1(DisasContext
*s
, DisasOps
*o
)
5398 o
->out
= load_freg(get_field(s
, r1
));
5399 o
->out2
= load_freg(get_field(s
, r1
) + 2);
5401 #define SPEC_prep_x1 SPEC_r1_f128
5403 /* ====================================================================== */
5404 /* The "Write OUTput" generators. These generally perform some non-trivial
5405 copy of data to TCG globals, or to main memory. The trivial cases are
5406 generally handled by having a "prep" generator install the TCG global
5407 as the destination of the operation. */
5409 static void wout_r1(DisasContext
*s
, DisasOps
*o
)
5411 store_reg(get_field(s
, r1
), o
->out
);
5413 #define SPEC_wout_r1 0
5415 static void wout_out2_r1(DisasContext
*s
, DisasOps
*o
)
5417 store_reg(get_field(s
, r1
), o
->out2
);
5419 #define SPEC_wout_out2_r1 0
5421 static void wout_r1_8(DisasContext
*s
, DisasOps
*o
)
5423 int r1
= get_field(s
, r1
);
5424 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5426 #define SPEC_wout_r1_8 0
5428 static void wout_r1_16(DisasContext
*s
, DisasOps
*o
)
5430 int r1
= get_field(s
, r1
);
5431 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5433 #define SPEC_wout_r1_16 0
5435 static void wout_r1_32(DisasContext
*s
, DisasOps
*o
)
5437 store_reg32_i64(get_field(s
, r1
), o
->out
);
5439 #define SPEC_wout_r1_32 0
5441 static void wout_r1_32h(DisasContext
*s
, DisasOps
*o
)
5443 store_reg32h_i64(get_field(s
, r1
), o
->out
);
5445 #define SPEC_wout_r1_32h 0
5447 static void wout_r1_P32(DisasContext
*s
, DisasOps
*o
)
5449 int r1
= get_field(s
, r1
);
5450 store_reg32_i64(r1
, o
->out
);
5451 store_reg32_i64(r1
+ 1, o
->out2
);
5453 #define SPEC_wout_r1_P32 SPEC_r1_even
5455 static void wout_r1_D32(DisasContext
*s
, DisasOps
*o
)
5457 int r1
= get_field(s
, r1
);
5458 TCGv_i64 t
= tcg_temp_new_i64();
5459 store_reg32_i64(r1
+ 1, o
->out
);
5460 tcg_gen_shri_i64(t
, o
->out
, 32);
5461 store_reg32_i64(r1
, t
);
5462 tcg_temp_free_i64(t
);
5464 #define SPEC_wout_r1_D32 SPEC_r1_even
5466 static void wout_r3_P32(DisasContext
*s
, DisasOps
*o
)
5468 int r3
= get_field(s
, r3
);
5469 store_reg32_i64(r3
, o
->out
);
5470 store_reg32_i64(r3
+ 1, o
->out2
);
5472 #define SPEC_wout_r3_P32 SPEC_r3_even
5474 static void wout_r3_P64(DisasContext
*s
, DisasOps
*o
)
5476 int r3
= get_field(s
, r3
);
5477 store_reg(r3
, o
->out
);
5478 store_reg(r3
+ 1, o
->out2
);
5480 #define SPEC_wout_r3_P64 SPEC_r3_even
5482 static void wout_e1(DisasContext
*s
, DisasOps
*o
)
5484 store_freg32_i64(get_field(s
, r1
), o
->out
);
5486 #define SPEC_wout_e1 0
5488 static void wout_f1(DisasContext
*s
, DisasOps
*o
)
5490 store_freg(get_field(s
, r1
), o
->out
);
5492 #define SPEC_wout_f1 0
5494 static void wout_x1(DisasContext
*s
, DisasOps
*o
)
5496 int f1
= get_field(s
, r1
);
5497 store_freg(f1
, o
->out
);
5498 store_freg(f1
+ 2, o
->out2
);
5500 #define SPEC_wout_x1 SPEC_r1_f128
5502 static void wout_cond_r1r2_32(DisasContext
*s
, DisasOps
*o
)
5504 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5505 store_reg32_i64(get_field(s
, r1
), o
->out
);
5508 #define SPEC_wout_cond_r1r2_32 0
5510 static void wout_cond_e1e2(DisasContext
*s
, DisasOps
*o
)
5512 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5513 store_freg32_i64(get_field(s
, r1
), o
->out
);
5516 #define SPEC_wout_cond_e1e2 0
5518 static void wout_m1_8(DisasContext
*s
, DisasOps
*o
)
5520 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5522 #define SPEC_wout_m1_8 0
5524 static void wout_m1_16(DisasContext
*s
, DisasOps
*o
)
5526 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5528 #define SPEC_wout_m1_16 0
5530 #ifndef CONFIG_USER_ONLY
5531 static void wout_m1_16a(DisasContext
*s
, DisasOps
*o
)
5533 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5535 #define SPEC_wout_m1_16a 0
5538 static void wout_m1_32(DisasContext
*s
, DisasOps
*o
)
5540 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5542 #define SPEC_wout_m1_32 0
5544 #ifndef CONFIG_USER_ONLY
5545 static void wout_m1_32a(DisasContext
*s
, DisasOps
*o
)
5547 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5549 #define SPEC_wout_m1_32a 0
5552 static void wout_m1_64(DisasContext
*s
, DisasOps
*o
)
5554 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5556 #define SPEC_wout_m1_64 0
5558 #ifndef CONFIG_USER_ONLY
5559 static void wout_m1_64a(DisasContext
*s
, DisasOps
*o
)
5561 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUQ
| MO_ALIGN
);
5563 #define SPEC_wout_m1_64a 0
5566 static void wout_m2_32(DisasContext
*s
, DisasOps
*o
)
5568 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5570 #define SPEC_wout_m2_32 0
5572 static void wout_in2_r1(DisasContext
*s
, DisasOps
*o
)
5574 store_reg(get_field(s
, r1
), o
->in2
);
5576 #define SPEC_wout_in2_r1 0
5578 static void wout_in2_r1_32(DisasContext
*s
, DisasOps
*o
)
5580 store_reg32_i64(get_field(s
, r1
), o
->in2
);
5582 #define SPEC_wout_in2_r1_32 0
5584 /* ====================================================================== */
5585 /* The "INput 1" generators. These load the first operand to an insn. */
5587 static void in1_r1(DisasContext
*s
, DisasOps
*o
)
5589 o
->in1
= load_reg(get_field(s
, r1
));
5591 #define SPEC_in1_r1 0
5593 static void in1_r1_o(DisasContext
*s
, DisasOps
*o
)
5595 o
->in1
= regs
[get_field(s
, r1
)];
5598 #define SPEC_in1_r1_o 0
5600 static void in1_r1_32s(DisasContext
*s
, DisasOps
*o
)
5602 o
->in1
= tcg_temp_new_i64();
5603 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5605 #define SPEC_in1_r1_32s 0
5607 static void in1_r1_32u(DisasContext
*s
, DisasOps
*o
)
5609 o
->in1
= tcg_temp_new_i64();
5610 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5612 #define SPEC_in1_r1_32u 0
5614 static void in1_r1_sr32(DisasContext
*s
, DisasOps
*o
)
5616 o
->in1
= tcg_temp_new_i64();
5617 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r1
)], 32);
5619 #define SPEC_in1_r1_sr32 0
5621 static void in1_r1p1(DisasContext
*s
, DisasOps
*o
)
5623 o
->in1
= load_reg(get_field(s
, r1
) + 1);
5625 #define SPEC_in1_r1p1 SPEC_r1_even
5627 static void in1_r1p1_o(DisasContext
*s
, DisasOps
*o
)
5629 o
->in1
= regs
[get_field(s
, r1
) + 1];
5632 #define SPEC_in1_r1p1_o SPEC_r1_even
5634 static void in1_r1p1_32s(DisasContext
*s
, DisasOps
*o
)
5636 o
->in1
= tcg_temp_new_i64();
5637 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5639 #define SPEC_in1_r1p1_32s SPEC_r1_even
5641 static void in1_r1p1_32u(DisasContext
*s
, DisasOps
*o
)
5643 o
->in1
= tcg_temp_new_i64();
5644 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5646 #define SPEC_in1_r1p1_32u SPEC_r1_even
5648 static void in1_r1_D32(DisasContext
*s
, DisasOps
*o
)
5650 int r1
= get_field(s
, r1
);
5651 o
->in1
= tcg_temp_new_i64();
5652 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5654 #define SPEC_in1_r1_D32 SPEC_r1_even
5656 static void in1_r2(DisasContext
*s
, DisasOps
*o
)
5658 o
->in1
= load_reg(get_field(s
, r2
));
5660 #define SPEC_in1_r2 0
5662 static void in1_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5664 o
->in1
= tcg_temp_new_i64();
5665 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r2
)], 32);
5667 #define SPEC_in1_r2_sr32 0
5669 static void in1_r2_32u(DisasContext
*s
, DisasOps
*o
)
5671 o
->in1
= tcg_temp_new_i64();
5672 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r2
)]);
5674 #define SPEC_in1_r2_32u 0
5676 static void in1_r3(DisasContext
*s
, DisasOps
*o
)
5678 o
->in1
= load_reg(get_field(s
, r3
));
5680 #define SPEC_in1_r3 0
5682 static void in1_r3_o(DisasContext
*s
, DisasOps
*o
)
5684 o
->in1
= regs
[get_field(s
, r3
)];
5687 #define SPEC_in1_r3_o 0
5689 static void in1_r3_32s(DisasContext
*s
, DisasOps
*o
)
5691 o
->in1
= tcg_temp_new_i64();
5692 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5694 #define SPEC_in1_r3_32s 0
5696 static void in1_r3_32u(DisasContext
*s
, DisasOps
*o
)
5698 o
->in1
= tcg_temp_new_i64();
5699 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5701 #define SPEC_in1_r3_32u 0
5703 static void in1_r3_D32(DisasContext
*s
, DisasOps
*o
)
5705 int r3
= get_field(s
, r3
);
5706 o
->in1
= tcg_temp_new_i64();
5707 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5709 #define SPEC_in1_r3_D32 SPEC_r3_even
5711 static void in1_r3_sr32(DisasContext
*s
, DisasOps
*o
)
5713 o
->in1
= tcg_temp_new_i64();
5714 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r3
)], 32);
5716 #define SPEC_in1_r3_sr32 0
5718 static void in1_e1(DisasContext
*s
, DisasOps
*o
)
5720 o
->in1
= load_freg32_i64(get_field(s
, r1
));
5722 #define SPEC_in1_e1 0
5724 static void in1_f1(DisasContext
*s
, DisasOps
*o
)
5726 o
->in1
= load_freg(get_field(s
, r1
));
5728 #define SPEC_in1_f1 0
5730 /* Load the high double word of an extended (128-bit) format FP number */
5731 static void in1_x2h(DisasContext
*s
, DisasOps
*o
)
5733 o
->in1
= load_freg(get_field(s
, r2
));
5735 #define SPEC_in1_x2h SPEC_r2_f128
5737 static void in1_f3(DisasContext
*s
, DisasOps
*o
)
5739 o
->in1
= load_freg(get_field(s
, r3
));
5741 #define SPEC_in1_f3 0
5743 static void in1_la1(DisasContext
*s
, DisasOps
*o
)
5745 o
->addr1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
5747 #define SPEC_in1_la1 0
5749 static void in1_la2(DisasContext
*s
, DisasOps
*o
)
5751 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5752 o
->addr1
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5754 #define SPEC_in1_la2 0
5756 static void in1_m1_8u(DisasContext
*s
, DisasOps
*o
)
5759 o
->in1
= tcg_temp_new_i64();
5760 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5762 #define SPEC_in1_m1_8u 0
5764 static void in1_m1_16s(DisasContext
*s
, DisasOps
*o
)
5767 o
->in1
= tcg_temp_new_i64();
5768 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5770 #define SPEC_in1_m1_16s 0
5772 static void in1_m1_16u(DisasContext
*s
, DisasOps
*o
)
5775 o
->in1
= tcg_temp_new_i64();
5776 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5778 #define SPEC_in1_m1_16u 0
5780 static void in1_m1_32s(DisasContext
*s
, DisasOps
*o
)
5783 o
->in1
= tcg_temp_new_i64();
5784 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5786 #define SPEC_in1_m1_32s 0
5788 static void in1_m1_32u(DisasContext
*s
, DisasOps
*o
)
5791 o
->in1
= tcg_temp_new_i64();
5792 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5794 #define SPEC_in1_m1_32u 0
5796 static void in1_m1_64(DisasContext
*s
, DisasOps
*o
)
5799 o
->in1
= tcg_temp_new_i64();
5800 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5802 #define SPEC_in1_m1_64 0
5804 /* ====================================================================== */
5805 /* The "INput 2" generators. These load the second operand to an insn. */
5807 static void in2_r1_o(DisasContext
*s
, DisasOps
*o
)
5809 o
->in2
= regs
[get_field(s
, r1
)];
5812 #define SPEC_in2_r1_o 0
5814 static void in2_r1_16u(DisasContext
*s
, DisasOps
*o
)
5816 o
->in2
= tcg_temp_new_i64();
5817 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5819 #define SPEC_in2_r1_16u 0
5821 static void in2_r1_32u(DisasContext
*s
, DisasOps
*o
)
5823 o
->in2
= tcg_temp_new_i64();
5824 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5826 #define SPEC_in2_r1_32u 0
5828 static void in2_r1_D32(DisasContext
*s
, DisasOps
*o
)
5830 int r1
= get_field(s
, r1
);
5831 o
->in2
= tcg_temp_new_i64();
5832 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5834 #define SPEC_in2_r1_D32 SPEC_r1_even
5836 static void in2_r2(DisasContext
*s
, DisasOps
*o
)
5838 o
->in2
= load_reg(get_field(s
, r2
));
5840 #define SPEC_in2_r2 0
5842 static void in2_r2_o(DisasContext
*s
, DisasOps
*o
)
5844 o
->in2
= regs
[get_field(s
, r2
)];
5847 #define SPEC_in2_r2_o 0
5849 static void in2_r2_nz(DisasContext
*s
, DisasOps
*o
)
5851 int r2
= get_field(s
, r2
);
5853 o
->in2
= load_reg(r2
);
5856 #define SPEC_in2_r2_nz 0
5858 static void in2_r2_8s(DisasContext
*s
, DisasOps
*o
)
5860 o
->in2
= tcg_temp_new_i64();
5861 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5863 #define SPEC_in2_r2_8s 0
5865 static void in2_r2_8u(DisasContext
*s
, DisasOps
*o
)
5867 o
->in2
= tcg_temp_new_i64();
5868 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5870 #define SPEC_in2_r2_8u 0
5872 static void in2_r2_16s(DisasContext
*s
, DisasOps
*o
)
5874 o
->in2
= tcg_temp_new_i64();
5875 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5877 #define SPEC_in2_r2_16s 0
5879 static void in2_r2_16u(DisasContext
*s
, DisasOps
*o
)
5881 o
->in2
= tcg_temp_new_i64();
5882 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5884 #define SPEC_in2_r2_16u 0
5886 static void in2_r3(DisasContext
*s
, DisasOps
*o
)
5888 o
->in2
= load_reg(get_field(s
, r3
));
5890 #define SPEC_in2_r3 0
5892 static void in2_r3_sr32(DisasContext
*s
, DisasOps
*o
)
5894 o
->in2
= tcg_temp_new_i64();
5895 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r3
)], 32);
5897 #define SPEC_in2_r3_sr32 0
5899 static void in2_r3_32u(DisasContext
*s
, DisasOps
*o
)
5901 o
->in2
= tcg_temp_new_i64();
5902 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r3
)]);
5904 #define SPEC_in2_r3_32u 0
5906 static void in2_r2_32s(DisasContext
*s
, DisasOps
*o
)
5908 o
->in2
= tcg_temp_new_i64();
5909 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5911 #define SPEC_in2_r2_32s 0
5913 static void in2_r2_32u(DisasContext
*s
, DisasOps
*o
)
5915 o
->in2
= tcg_temp_new_i64();
5916 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5918 #define SPEC_in2_r2_32u 0
5920 static void in2_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5922 o
->in2
= tcg_temp_new_i64();
5923 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r2
)], 32);
5925 #define SPEC_in2_r2_sr32 0
5927 static void in2_e2(DisasContext
*s
, DisasOps
*o
)
5929 o
->in2
= load_freg32_i64(get_field(s
, r2
));
5931 #define SPEC_in2_e2 0
5933 static void in2_f2(DisasContext
*s
, DisasOps
*o
)
5935 o
->in2
= load_freg(get_field(s
, r2
));
5937 #define SPEC_in2_f2 0
5939 /* Load the low double word of an extended (128-bit) format FP number */
5940 static void in2_x2l(DisasContext
*s
, DisasOps
*o
)
5942 o
->in2
= load_freg(get_field(s
, r2
) + 2);
5944 #define SPEC_in2_x2l SPEC_r2_f128
5946 static void in2_ra2(DisasContext
*s
, DisasOps
*o
)
5948 int r2
= get_field(s
, r2
);
5950 /* Note: *don't* treat !r2 as 0, use the reg value. */
5951 o
->in2
= tcg_temp_new_i64();
5952 gen_addi_and_wrap_i64(s
, o
->in2
, regs
[r2
], 0);
5954 #define SPEC_in2_ra2 0
5956 static void in2_a2(DisasContext
*s
, DisasOps
*o
)
5958 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5959 o
->in2
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5961 #define SPEC_in2_a2 0
5963 static void in2_ri2(DisasContext
*s
, DisasOps
*o
)
5965 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(s
, i2
) * 2);
5967 #define SPEC_in2_ri2 0
5969 static void in2_sh(DisasContext
*s
, DisasOps
*o
)
5971 int b2
= get_field(s
, b2
);
5972 int d2
= get_field(s
, d2
);
5975 o
->in2
= tcg_const_i64(d2
& 0x3f);
5977 o
->in2
= get_address(s
, 0, b2
, d2
);
5978 tcg_gen_andi_i64(o
->in2
, o
->in2
, 0x3f);
5981 #define SPEC_in2_sh 0
5983 static void in2_m2_8u(DisasContext
*s
, DisasOps
*o
)
5986 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5988 #define SPEC_in2_m2_8u 0
5990 static void in2_m2_16s(DisasContext
*s
, DisasOps
*o
)
5993 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5995 #define SPEC_in2_m2_16s 0
5997 static void in2_m2_16u(DisasContext
*s
, DisasOps
*o
)
6000 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
6002 #define SPEC_in2_m2_16u 0
6004 static void in2_m2_32s(DisasContext
*s
, DisasOps
*o
)
6007 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
6009 #define SPEC_in2_m2_32s 0
6011 static void in2_m2_32u(DisasContext
*s
, DisasOps
*o
)
6014 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
6016 #define SPEC_in2_m2_32u 0
6018 #ifndef CONFIG_USER_ONLY
6019 static void in2_m2_32ua(DisasContext
*s
, DisasOps
*o
)
6022 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
6024 #define SPEC_in2_m2_32ua 0
6027 static void in2_m2_64(DisasContext
*s
, DisasOps
*o
)
6030 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6032 #define SPEC_in2_m2_64 0
6034 static void in2_m2_64w(DisasContext
*s
, DisasOps
*o
)
6037 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6038 gen_addi_and_wrap_i64(s
, o
->in2
, o
->in2
, 0);
6040 #define SPEC_in2_m2_64w 0
6042 #ifndef CONFIG_USER_ONLY
6043 static void in2_m2_64a(DisasContext
*s
, DisasOps
*o
)
6046 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUQ
| MO_ALIGN
);
6048 #define SPEC_in2_m2_64a 0
6051 static void in2_mri2_16u(DisasContext
*s
, DisasOps
*o
)
6054 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
6056 #define SPEC_in2_mri2_16u 0
6058 static void in2_mri2_32s(DisasContext
*s
, DisasOps
*o
)
6061 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
6063 #define SPEC_in2_mri2_32s 0
6065 static void in2_mri2_32u(DisasContext
*s
, DisasOps
*o
)
6068 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
6070 #define SPEC_in2_mri2_32u 0
6072 static void in2_mri2_64(DisasContext
*s
, DisasOps
*o
)
6075 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6077 #define SPEC_in2_mri2_64 0
6079 static void in2_i2(DisasContext
*s
, DisasOps
*o
)
6081 o
->in2
= tcg_const_i64(get_field(s
, i2
));
6083 #define SPEC_in2_i2 0
6085 static void in2_i2_8u(DisasContext
*s
, DisasOps
*o
)
6087 o
->in2
= tcg_const_i64((uint8_t)get_field(s
, i2
));
6089 #define SPEC_in2_i2_8u 0
6091 static void in2_i2_16u(DisasContext
*s
, DisasOps
*o
)
6093 o
->in2
= tcg_const_i64((uint16_t)get_field(s
, i2
));
6095 #define SPEC_in2_i2_16u 0
6097 static void in2_i2_32u(DisasContext
*s
, DisasOps
*o
)
6099 o
->in2
= tcg_const_i64((uint32_t)get_field(s
, i2
));
6101 #define SPEC_in2_i2_32u 0
6103 static void in2_i2_16u_shl(DisasContext
*s
, DisasOps
*o
)
6105 uint64_t i2
= (uint16_t)get_field(s
, i2
);
6106 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
6108 #define SPEC_in2_i2_16u_shl 0
6110 static void in2_i2_32u_shl(DisasContext
*s
, DisasOps
*o
)
6112 uint64_t i2
= (uint32_t)get_field(s
, i2
);
6113 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
6115 #define SPEC_in2_i2_32u_shl 0
6117 #ifndef CONFIG_USER_ONLY
6118 static void in2_insn(DisasContext
*s
, DisasOps
*o
)
6120 o
->in2
= tcg_const_i64(s
->fields
.raw_insn
);
6122 #define SPEC_in2_insn 0
6125 /* ====================================================================== */
6127 /* Find opc within the table of insns. This is formulated as a switch
6128 statement so that (1) we get compile-time notice of cut-paste errors
6129 for duplicated opcodes, and (2) the compiler generates the binary
6130 search tree, rather than us having to post-process the table. */
6132 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6133 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6135 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6136 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6138 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6139 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6141 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6143 enum DisasInsnEnum
{
6144 #include "insn-data.def"
6148 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6153 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6155 .help_in1 = in1_##I1, \
6156 .help_in2 = in2_##I2, \
6157 .help_prep = prep_##P, \
6158 .help_wout = wout_##W, \
6159 .help_cout = cout_##CC, \
6160 .help_op = op_##OP, \
6164 /* Allow 0 to be used for NULL in the table below. */
6172 #define SPEC_in1_0 0
6173 #define SPEC_in2_0 0
6174 #define SPEC_prep_0 0
6175 #define SPEC_wout_0 0
6177 /* Give smaller names to the various facilities. */
6178 #define FAC_Z S390_FEAT_ZARCH
6179 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6180 #define FAC_DFP S390_FEAT_DFP
6181 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6182 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6183 #define FAC_EE S390_FEAT_EXECUTE_EXT
6184 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6185 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6186 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6187 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6188 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6189 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6190 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6191 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6192 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6193 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6194 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6195 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6196 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6197 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6198 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6199 #define FAC_SFLE S390_FEAT_STFLE
6200 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6201 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6202 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6203 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6204 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6205 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6206 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6207 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6208 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6209 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6210 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6211 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6212 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6213 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6214 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6215 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6216 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6217 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6218 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6219 #define FAC_VE2 S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6220 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6221 #define FAC_MIE3 S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6223 static const DisasInsn insn_info
[] = {
6224 #include "insn-data.def"
6228 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6229 case OPC: return &insn_info[insn_ ## NM];
6231 static const DisasInsn
*lookup_opc(uint16_t opc
)
6234 #include "insn-data.def"
6245 /* Extract a field from the insn. The INSN should be left-aligned in
6246 the uint64_t so that we can more easily utilize the big-bit-endian
6247 definitions we extract from the Principals of Operation. */
6249 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
6257 /* Zero extract the field from the insn. */
6258 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
6260 /* Sign-extend, or un-swap the field as necessary. */
6262 case 0: /* unsigned */
6264 case 1: /* signed */
6265 assert(f
->size
<= 32);
6266 m
= 1u << (f
->size
- 1);
6269 case 2: /* dl+dh split, signed 20 bit. */
6270 r
= ((int8_t)r
<< 12) | (r
>> 8);
6272 case 3: /* MSB stored in RXB */
6273 g_assert(f
->size
== 4);
6276 r
|= extract64(insn
, 63 - 36, 1) << 4;
6279 r
|= extract64(insn
, 63 - 37, 1) << 4;
6282 r
|= extract64(insn
, 63 - 38, 1) << 4;
6285 r
|= extract64(insn
, 63 - 39, 1) << 4;
6288 g_assert_not_reached();
6296 * Validate that the "compressed" encoding we selected above is valid.
6297 * I.e. we haven't made two different original fields overlap.
6299 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
6300 o
->presentC
|= 1 << f
->indexC
;
6301 o
->presentO
|= 1 << f
->indexO
;
6303 o
->c
[f
->indexC
] = r
;
6306 /* Lookup the insn at the current PC, extracting the operands into O and
6307 returning the info struct for the insn. Returns NULL for invalid insn. */
6309 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
)
6311 uint64_t insn
, pc
= s
->base
.pc_next
;
6313 const DisasInsn
*info
;
6315 if (unlikely(s
->ex_value
)) {
6316 /* Drop the EX data now, so that it's clear on exception paths. */
6317 TCGv_i64 zero
= tcg_const_i64(0);
6318 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
6319 tcg_temp_free_i64(zero
);
6321 /* Extract the values saved by EXECUTE. */
6322 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
6323 ilen
= s
->ex_value
& 0xf;
6326 insn
= ld_code2(env
, s
, pc
);
6327 op
= (insn
>> 8) & 0xff;
6328 ilen
= get_ilen(op
);
6334 insn
= ld_code4(env
, s
, pc
) << 32;
6337 insn
= (insn
<< 48) | (ld_code4(env
, s
, pc
+ 2) << 16);
6340 g_assert_not_reached();
6343 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
6346 /* We can't actually determine the insn format until we've looked up
6347 the full insn opcode. Which we can't do without locating the
6348 secondary opcode. Assume by default that OP2 is at bit 40; for
6349 those smaller insns that don't actually have a secondary opcode
6350 this will correctly result in OP2 = 0. */
6356 case 0xb2: /* S, RRF, RRE, IE */
6357 case 0xb3: /* RRE, RRD, RRF */
6358 case 0xb9: /* RRE, RRF */
6359 case 0xe5: /* SSE, SIL */
6360 op2
= (insn
<< 8) >> 56;
6364 case 0xc0: /* RIL */
6365 case 0xc2: /* RIL */
6366 case 0xc4: /* RIL */
6367 case 0xc6: /* RIL */
6368 case 0xc8: /* SSF */
6369 case 0xcc: /* RIL */
6370 op2
= (insn
<< 12) >> 60;
6372 case 0xc5: /* MII */
6373 case 0xc7: /* SMI */
6374 case 0xd0 ... 0xdf: /* SS */
6380 case 0xee ... 0xf3: /* SS */
6381 case 0xf8 ... 0xfd: /* SS */
6385 op2
= (insn
<< 40) >> 56;
6389 memset(&s
->fields
, 0, sizeof(s
->fields
));
6390 s
->fields
.raw_insn
= insn
;
6392 s
->fields
.op2
= op2
;
6394 /* Lookup the instruction. */
6395 info
= lookup_opc(op
<< 8 | op2
);
6398 /* If we found it, extract the operands. */
6400 DisasFormat fmt
= info
->fmt
;
6403 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6404 extract_field(&s
->fields
, &format_info
[fmt
].op
[i
], insn
);
6410 static bool is_afp_reg(int reg
)
6412 return reg
% 2 || reg
> 6;
6415 static bool is_fp_pair(int reg
)
6417 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6418 return !(reg
& 0x2);
6421 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6423 const DisasInsn
*insn
;
6424 DisasJumpType ret
= DISAS_NEXT
;
6426 bool icount
= false;
6428 /* Search for the insn in the table. */
6429 insn
= extract_insn(env
, s
);
6431 /* Update insn_start now that we know the ILEN. */
6432 tcg_set_insn_start_param(s
->insn_start
, 2, s
->ilen
);
6434 /* Not found means unimplemented/illegal opcode. */
6436 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6437 s
->fields
.op
, s
->fields
.op2
);
6438 gen_illegal_opcode(s
);
6439 ret
= DISAS_NORETURN
;
6443 #ifndef CONFIG_USER_ONLY
6444 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6445 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6446 gen_helper_per_ifetch(cpu_env
, addr
);
6447 tcg_temp_free_i64(addr
);
6453 /* privileged instruction */
6454 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6455 gen_program_exception(s
, PGM_PRIVILEGED
);
6456 ret
= DISAS_NORETURN
;
6460 /* if AFP is not enabled, instructions and registers are forbidden */
6461 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6464 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(s
, r1
))) {
6467 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(s
, r2
))) {
6470 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(s
, r3
))) {
6473 if (insn
->flags
& IF_BFP
) {
6476 if (insn
->flags
& IF_DFP
) {
6479 if (insn
->flags
& IF_VEC
) {
6483 gen_data_exception(dxc
);
6484 ret
= DISAS_NORETURN
;
6489 /* if vector instructions not enabled, executing them is forbidden */
6490 if (insn
->flags
& IF_VEC
) {
6491 if (!((s
->base
.tb
->flags
& FLAG_MASK_VECTOR
))) {
6492 gen_data_exception(0xfe);
6493 ret
= DISAS_NORETURN
;
6498 /* input/output is the special case for icount mode */
6499 if (unlikely(insn
->flags
& IF_IO
)) {
6500 icount
= tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
;
6507 /* Check for insn specification exceptions. */
6509 if ((insn
->spec
& SPEC_r1_even
&& get_field(s
, r1
) & 1) ||
6510 (insn
->spec
& SPEC_r2_even
&& get_field(s
, r2
) & 1) ||
6511 (insn
->spec
& SPEC_r3_even
&& get_field(s
, r3
) & 1) ||
6512 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(s
, r1
))) ||
6513 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(s
, r2
)))) {
6514 gen_program_exception(s
, PGM_SPECIFICATION
);
6515 ret
= DISAS_NORETURN
;
6520 /* Implement the instruction. */
6521 if (insn
->help_in1
) {
6522 insn
->help_in1(s
, &o
);
6524 if (insn
->help_in2
) {
6525 insn
->help_in2(s
, &o
);
6527 if (insn
->help_prep
) {
6528 insn
->help_prep(s
, &o
);
6530 if (insn
->help_op
) {
6531 ret
= insn
->help_op(s
, &o
);
6533 if (ret
!= DISAS_NORETURN
) {
6534 if (insn
->help_wout
) {
6535 insn
->help_wout(s
, &o
);
6537 if (insn
->help_cout
) {
6538 insn
->help_cout(s
, &o
);
6542 /* Free any temporaries created by the helpers. */
6543 if (o
.out
&& !o
.g_out
) {
6544 tcg_temp_free_i64(o
.out
);
6546 if (o
.out2
&& !o
.g_out2
) {
6547 tcg_temp_free_i64(o
.out2
);
6549 if (o
.in1
&& !o
.g_in1
) {
6550 tcg_temp_free_i64(o
.in1
);
6552 if (o
.in2
&& !o
.g_in2
) {
6553 tcg_temp_free_i64(o
.in2
);
6556 tcg_temp_free_i64(o
.addr1
);
6559 /* io should be the last instruction in tb when icount is enabled */
6560 if (unlikely(icount
&& ret
== DISAS_NEXT
)) {
6561 ret
= DISAS_TOO_MANY
;
6564 #ifndef CONFIG_USER_ONLY
6565 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6566 /* An exception might be triggered, save PSW if not already done. */
6567 if (ret
== DISAS_NEXT
|| ret
== DISAS_TOO_MANY
) {
6568 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6571 /* Call the helper to check for a possible PER exception. */
6572 gen_helper_per_check_exception(cpu_env
);
6577 /* Advance to the next instruction. */
6578 s
->base
.pc_next
= s
->pc_tmp
;
6582 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6584 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6587 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6588 dc
->base
.pc_first
&= 0x7fffffff;
6589 dc
->base
.pc_next
= dc
->base
.pc_first
;
6592 dc
->cc_op
= CC_OP_DYNAMIC
;
6593 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6596 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6600 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6602 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6604 /* Delay the set of ilen until we've read the insn. */
6605 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
, 0);
6606 dc
->insn_start
= tcg_last_op();
6609 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6611 CPUS390XState
*env
= cs
->env_ptr
;
6612 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6614 dc
->base
.is_jmp
= translate_one(env
, dc
);
6615 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6616 uint64_t page_start
;
6618 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6619 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6620 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6625 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6627 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6629 switch (dc
->base
.is_jmp
) {
6630 case DISAS_NORETURN
:
6632 case DISAS_TOO_MANY
:
6633 case DISAS_PC_STALE_NOCHAIN
:
6634 update_psw_addr(dc
);
6636 case DISAS_PC_UPDATED
:
6637 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6638 cc op type is in env */
6641 case DISAS_PC_CC_UPDATED
:
6642 /* Exit the TB, either by raising a debug exception or by return. */
6643 if ((dc
->base
.tb
->flags
& FLAG_MASK_PER
) ||
6644 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6645 tcg_gen_exit_tb(NULL
, 0);
6647 tcg_gen_lookup_and_goto_ptr();
6651 g_assert_not_reached();
6655 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
,
6656 CPUState
*cs
, FILE *logfile
)
6658 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6660 if (unlikely(dc
->ex_value
)) {
6661 /* ??? Unfortunately target_disas can't use host memory. */
6662 fprintf(logfile
, "IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6664 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6665 target_disas(logfile
, cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6669 static const TranslatorOps s390x_tr_ops
= {
6670 .init_disas_context
= s390x_tr_init_disas_context
,
6671 .tb_start
= s390x_tr_tb_start
,
6672 .insn_start
= s390x_tr_insn_start
,
6673 .translate_insn
= s390x_tr_translate_insn
,
6674 .tb_stop
= s390x_tr_tb_stop
,
6675 .disas_log
= s390x_tr_disas_log
,
6678 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
6682 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
, max_insns
);
6685 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6688 int cc_op
= data
[1];
6690 env
->psw
.addr
= data
[0];
6692 /* Update the CC opcode if it is not already up-to-date. */
6693 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {
6698 env
->int_pgm_ilen
= data
[2];