4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "exec/translator.h"
47 #include "qemu/atomic128.h"
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext
;
52 typedef struct DisasInsn DisasInsn
;
53 typedef struct DisasFields DisasFields
;
56 * Define a structure to hold the decoded fields. We'll store each inside
57 * an array indexed by an enum. In order to conserve memory, we'll arrange
58 * for fields that do not exist at the same time to overlap, thus the "C"
59 * for compact. For checking purposes there is an "O" for original index
60 * as well that will be applied to availability bitmaps.
63 enum DisasFieldIndexO
{
92 enum DisasFieldIndexC
{
133 unsigned presentC
:16;
134 unsigned int presentO
;
138 struct DisasContext
{
139 DisasContextBase base
;
140 const DisasInsn
*insn
;
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
154 /* Information carried about a condition to be evaluated. */
161 struct { TCGv_i64 a
, b
; } s64
;
162 struct { TCGv_i32 a
, b
; } s32
;
166 #ifdef DEBUG_INLINE_BRANCHES
167 static uint64_t inline_branch_hit
[CC_OP_MAX
];
168 static uint64_t inline_branch_miss
[CC_OP_MAX
];
171 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
175 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
176 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
177 tcg_gen_movi_i64(out
, pc
);
182 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
183 tmp
= tcg_const_i64(pc
);
184 tcg_gen_deposit_i64(out
, out
, tmp
, 0, 32);
185 tcg_temp_free_i64(tmp
);
188 static TCGv_i64 psw_addr
;
189 static TCGv_i64 psw_mask
;
190 static TCGv_i64 gbea
;
192 static TCGv_i32 cc_op
;
193 static TCGv_i64 cc_src
;
194 static TCGv_i64 cc_dst
;
195 static TCGv_i64 cc_vr
;
197 static char cpu_reg_names
[16][4];
198 static TCGv_i64 regs
[16];
200 void s390x_translate_init(void)
204 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
205 offsetof(CPUS390XState
, psw
.addr
),
207 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
208 offsetof(CPUS390XState
, psw
.mask
),
210 gbea
= tcg_global_mem_new_i64(cpu_env
,
211 offsetof(CPUS390XState
, gbea
),
214 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
216 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
218 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
220 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
223 for (i
= 0; i
< 16; i
++) {
224 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
225 regs
[i
] = tcg_global_mem_new(cpu_env
,
226 offsetof(CPUS390XState
, regs
[i
]),
231 static inline int vec_full_reg_offset(uint8_t reg
)
234 return offsetof(CPUS390XState
, vregs
[reg
][0]);
237 static inline int vec_reg_offset(uint8_t reg
, uint8_t enr
, MemOp es
)
239 /* Convert element size (es) - e.g. MO_8 - to bytes */
240 const uint8_t bytes
= 1 << es
;
241 int offs
= enr
* bytes
;
244 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
245 * of the 16 byte vector, on both, little and big endian systems.
247 * Big Endian (target/possible host)
248 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
249 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
250 * W: [ 0][ 1] - [ 2][ 3]
253 * Little Endian (possible host)
254 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
255 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
256 * W: [ 1][ 0] - [ 3][ 2]
259 * For 16 byte elements, the two 8 byte halves will not form a host
260 * int128 if the host is little endian, since they're in the wrong order.
261 * Some operations (e.g. xor) do not care. For operations like addition,
262 * the two 8 byte elements have to be loaded separately. Let's force all
263 * 16 byte operations to handle it in a special way.
265 g_assert(es
<= MO_64
);
266 #ifndef HOST_WORDS_BIGENDIAN
269 return offs
+ vec_full_reg_offset(reg
);
272 static inline int freg64_offset(uint8_t reg
)
275 return vec_reg_offset(reg
, 0, MO_64
);
278 static inline int freg32_offset(uint8_t reg
)
281 return vec_reg_offset(reg
, 0, MO_32
);
284 static TCGv_i64
load_reg(int reg
)
286 TCGv_i64 r
= tcg_temp_new_i64();
287 tcg_gen_mov_i64(r
, regs
[reg
]);
291 static TCGv_i64
load_freg(int reg
)
293 TCGv_i64 r
= tcg_temp_new_i64();
295 tcg_gen_ld_i64(r
, cpu_env
, freg64_offset(reg
));
299 static TCGv_i64
load_freg32_i64(int reg
)
301 TCGv_i64 r
= tcg_temp_new_i64();
303 tcg_gen_ld32u_i64(r
, cpu_env
, freg32_offset(reg
));
307 static void store_reg(int reg
, TCGv_i64 v
)
309 tcg_gen_mov_i64(regs
[reg
], v
);
312 static void store_freg(int reg
, TCGv_i64 v
)
314 tcg_gen_st_i64(v
, cpu_env
, freg64_offset(reg
));
317 static void store_reg32_i64(int reg
, TCGv_i64 v
)
319 /* 32 bit register writes keep the upper half */
320 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
323 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
325 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
328 static void store_freg32_i64(int reg
, TCGv_i64 v
)
330 tcg_gen_st32_i64(v
, cpu_env
, freg32_offset(reg
));
333 static void return_low128(TCGv_i64 dest
)
335 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
338 static void update_psw_addr(DisasContext
*s
)
341 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
344 static void per_branch(DisasContext
*s
, bool to_next
)
346 #ifndef CONFIG_USER_ONLY
347 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
349 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
350 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
351 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
353 tcg_temp_free_i64(next_pc
);
359 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
360 TCGv_i64 arg1
, TCGv_i64 arg2
)
362 #ifndef CONFIG_USER_ONLY
363 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
364 TCGLabel
*lab
= gen_new_label();
365 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
367 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
368 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
372 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
373 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
374 tcg_temp_free_i64(pc
);
379 static void per_breaking_event(DisasContext
*s
)
381 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
384 static void update_cc_op(DisasContext
*s
)
386 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
387 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
391 static inline uint64_t ld_code2(CPUS390XState
*env
, DisasContext
*s
,
394 return (uint64_t)translator_lduw(env
, &s
->base
, pc
);
397 static inline uint64_t ld_code4(CPUS390XState
*env
, DisasContext
*s
,
400 return (uint64_t)(uint32_t)translator_ldl(env
, &s
->base
, pc
);
403 static int get_mem_index(DisasContext
*s
)
405 #ifdef CONFIG_USER_ONLY
408 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
412 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
413 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
414 return MMU_PRIMARY_IDX
;
415 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
416 return MMU_SECONDARY_IDX
;
417 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
426 static void gen_exception(int excp
)
428 TCGv_i32 tmp
= tcg_const_i32(excp
);
429 gen_helper_exception(cpu_env
, tmp
);
430 tcg_temp_free_i32(tmp
);
433 static void gen_program_exception(DisasContext
*s
, int code
)
437 /* Remember what pgm exeption this was. */
438 tmp
= tcg_const_i32(code
);
439 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
440 tcg_temp_free_i32(tmp
);
442 tmp
= tcg_const_i32(s
->ilen
);
443 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
444 tcg_temp_free_i32(tmp
);
452 /* Trigger exception. */
453 gen_exception(EXCP_PGM
);
456 static inline void gen_illegal_opcode(DisasContext
*s
)
458 gen_program_exception(s
, PGM_OPERATION
);
461 static inline void gen_data_exception(uint8_t dxc
)
463 TCGv_i32 tmp
= tcg_const_i32(dxc
);
464 gen_helper_data_exception(cpu_env
, tmp
);
465 tcg_temp_free_i32(tmp
);
468 static inline void gen_trap(DisasContext
*s
)
470 /* Set DXC to 0xff */
471 gen_data_exception(0xff);
474 static void gen_addi_and_wrap_i64(DisasContext
*s
, TCGv_i64 dst
, TCGv_i64 src
,
477 tcg_gen_addi_i64(dst
, src
, imm
);
478 if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
479 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
480 tcg_gen_andi_i64(dst
, dst
, 0x7fffffff);
482 tcg_gen_andi_i64(dst
, dst
, 0x00ffffff);
487 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
489 TCGv_i64 tmp
= tcg_temp_new_i64();
492 * Note that d2 is limited to 20 bits, signed. If we crop negative
493 * displacements early we create larger immedate addends.
496 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
497 gen_addi_and_wrap_i64(s
, tmp
, tmp
, d2
);
499 gen_addi_and_wrap_i64(s
, tmp
, regs
[b2
], d2
);
501 gen_addi_and_wrap_i64(s
, tmp
, regs
[x2
], d2
);
502 } else if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
503 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
504 tcg_gen_movi_i64(tmp
, d2
& 0x7fffffff);
506 tcg_gen_movi_i64(tmp
, d2
& 0x00ffffff);
509 tcg_gen_movi_i64(tmp
, d2
);
515 static inline bool live_cc_data(DisasContext
*s
)
517 return (s
->cc_op
!= CC_OP_DYNAMIC
518 && s
->cc_op
!= CC_OP_STATIC
522 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
524 if (live_cc_data(s
)) {
525 tcg_gen_discard_i64(cc_src
);
526 tcg_gen_discard_i64(cc_dst
);
527 tcg_gen_discard_i64(cc_vr
);
529 s
->cc_op
= CC_OP_CONST0
+ val
;
532 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
534 if (live_cc_data(s
)) {
535 tcg_gen_discard_i64(cc_src
);
536 tcg_gen_discard_i64(cc_vr
);
538 tcg_gen_mov_i64(cc_dst
, dst
);
542 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
545 if (live_cc_data(s
)) {
546 tcg_gen_discard_i64(cc_vr
);
548 tcg_gen_mov_i64(cc_src
, src
);
549 tcg_gen_mov_i64(cc_dst
, dst
);
553 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
554 TCGv_i64 dst
, TCGv_i64 vr
)
556 tcg_gen_mov_i64(cc_src
, src
);
557 tcg_gen_mov_i64(cc_dst
, dst
);
558 tcg_gen_mov_i64(cc_vr
, vr
);
562 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
564 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
567 /* CC value is in env->cc_op */
568 static void set_cc_static(DisasContext
*s
)
570 if (live_cc_data(s
)) {
571 tcg_gen_discard_i64(cc_src
);
572 tcg_gen_discard_i64(cc_dst
);
573 tcg_gen_discard_i64(cc_vr
);
575 s
->cc_op
= CC_OP_STATIC
;
578 /* calculates cc into cc_op */
579 static void gen_op_calc_cc(DisasContext
*s
)
581 TCGv_i32 local_cc_op
= NULL
;
582 TCGv_i64 dummy
= NULL
;
586 dummy
= tcg_const_i64(0);
592 local_cc_op
= tcg_const_i32(s
->cc_op
);
608 /* s->cc_op is the cc value */
609 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
612 /* env->cc_op already is the cc value */
629 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
635 case CC_OP_LTUGTU_32
:
636 case CC_OP_LTUGTU_64
:
645 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
652 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
655 /* unknown operation - assume 3 arguments and cc_op in env */
656 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
663 tcg_temp_free_i32(local_cc_op
);
666 tcg_temp_free_i64(dummy
);
669 /* We now have cc in cc_op as constant */
673 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
675 if (unlikely(s
->base
.tb
->flags
& FLAG_MASK_PER
)) {
678 return translator_use_goto_tb(&s
->base
, dest
);
681 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
683 #ifdef DEBUG_INLINE_BRANCHES
684 inline_branch_miss
[cc_op
]++;
688 static void account_inline_branch(DisasContext
*s
, int cc_op
)
690 #ifdef DEBUG_INLINE_BRANCHES
691 inline_branch_hit
[cc_op
]++;
695 /* Table of mask values to comparison codes, given a comparison as input.
696 For such, CC=3 should not be possible. */
697 static const TCGCond ltgt_cond
[16] = {
698 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
699 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
700 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
701 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
702 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
703 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
704 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
705 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
708 /* Table of mask values to comparison codes, given a logic op as input.
709 For such, only CC=0 and CC=1 should be possible. */
710 static const TCGCond nz_cond
[16] = {
711 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
712 TCG_COND_NEVER
, TCG_COND_NEVER
,
713 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
714 TCG_COND_NE
, TCG_COND_NE
,
715 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
716 TCG_COND_EQ
, TCG_COND_EQ
,
717 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
718 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
721 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
722 details required to generate a TCG comparison. */
723 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
726 enum cc_op old_cc_op
= s
->cc_op
;
728 if (mask
== 15 || mask
== 0) {
729 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
732 c
->g1
= c
->g2
= true;
737 /* Find the TCG condition for the mask + cc op. */
743 cond
= ltgt_cond
[mask
];
744 if (cond
== TCG_COND_NEVER
) {
747 account_inline_branch(s
, old_cc_op
);
750 case CC_OP_LTUGTU_32
:
751 case CC_OP_LTUGTU_64
:
752 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
753 if (cond
== TCG_COND_NEVER
) {
756 account_inline_branch(s
, old_cc_op
);
760 cond
= nz_cond
[mask
];
761 if (cond
== TCG_COND_NEVER
) {
764 account_inline_branch(s
, old_cc_op
);
779 account_inline_branch(s
, old_cc_op
);
794 account_inline_branch(s
, old_cc_op
);
798 switch (mask
& 0xa) {
799 case 8: /* src == 0 -> no one bit found */
802 case 2: /* src != 0 -> one bit found */
808 account_inline_branch(s
, old_cc_op
);
814 case 8 | 2: /* result == 0 */
817 case 4 | 1: /* result != 0 */
820 case 8 | 4: /* !carry (borrow) */
821 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_EQ
: TCG_COND_NE
;
823 case 2 | 1: /* carry (!borrow) */
824 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_NE
: TCG_COND_EQ
;
829 account_inline_branch(s
, old_cc_op
);
834 /* Calculate cc value. */
839 /* Jump based on CC. We'll load up the real cond below;
840 the assignment here merely avoids a compiler warning. */
841 account_noninline_branch(s
, old_cc_op
);
842 old_cc_op
= CC_OP_STATIC
;
843 cond
= TCG_COND_NEVER
;
847 /* Load up the arguments of the comparison. */
849 c
->g1
= c
->g2
= false;
853 c
->u
.s32
.a
= tcg_temp_new_i32();
854 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
855 c
->u
.s32
.b
= tcg_const_i32(0);
858 case CC_OP_LTUGTU_32
:
860 c
->u
.s32
.a
= tcg_temp_new_i32();
861 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
862 c
->u
.s32
.b
= tcg_temp_new_i32();
863 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
870 c
->u
.s64
.b
= tcg_const_i64(0);
874 case CC_OP_LTUGTU_64
:
877 c
->g1
= c
->g2
= true;
883 c
->u
.s64
.a
= tcg_temp_new_i64();
884 c
->u
.s64
.b
= tcg_const_i64(0);
885 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
891 c
->u
.s64
.b
= tcg_const_i64(0);
895 case 4 | 1: /* result */
899 case 2 | 1: /* carry */
903 g_assert_not_reached();
912 case 0x8 | 0x4 | 0x2: /* cc != 3 */
914 c
->u
.s32
.b
= tcg_const_i32(3);
916 case 0x8 | 0x4 | 0x1: /* cc != 2 */
918 c
->u
.s32
.b
= tcg_const_i32(2);
920 case 0x8 | 0x2 | 0x1: /* cc != 1 */
922 c
->u
.s32
.b
= tcg_const_i32(1);
924 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
927 c
->u
.s32
.a
= tcg_temp_new_i32();
928 c
->u
.s32
.b
= tcg_const_i32(0);
929 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
931 case 0x8 | 0x4: /* cc < 2 */
933 c
->u
.s32
.b
= tcg_const_i32(2);
935 case 0x8: /* cc == 0 */
937 c
->u
.s32
.b
= tcg_const_i32(0);
939 case 0x4 | 0x2 | 0x1: /* cc != 0 */
941 c
->u
.s32
.b
= tcg_const_i32(0);
943 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
946 c
->u
.s32
.a
= tcg_temp_new_i32();
947 c
->u
.s32
.b
= tcg_const_i32(0);
948 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
950 case 0x4: /* cc == 1 */
952 c
->u
.s32
.b
= tcg_const_i32(1);
954 case 0x2 | 0x1: /* cc > 1 */
956 c
->u
.s32
.b
= tcg_const_i32(1);
958 case 0x2: /* cc == 2 */
960 c
->u
.s32
.b
= tcg_const_i32(2);
962 case 0x1: /* cc == 3 */
964 c
->u
.s32
.b
= tcg_const_i32(3);
967 /* CC is masked by something else: (8 >> cc) & mask. */
970 c
->u
.s32
.a
= tcg_const_i32(8);
971 c
->u
.s32
.b
= tcg_const_i32(0);
972 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
973 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
984 static void free_compare(DisasCompare
*c
)
988 tcg_temp_free_i64(c
->u
.s64
.a
);
990 tcg_temp_free_i32(c
->u
.s32
.a
);
995 tcg_temp_free_i64(c
->u
.s64
.b
);
997 tcg_temp_free_i32(c
->u
.s32
.b
);
1002 /* ====================================================================== */
1003 /* Define the insn format enumeration. */
1004 #define F0(N) FMT_##N,
1005 #define F1(N, X1) F0(N)
1006 #define F2(N, X1, X2) F0(N)
1007 #define F3(N, X1, X2, X3) F0(N)
1008 #define F4(N, X1, X2, X3, X4) F0(N)
1009 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1010 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1013 #include "insn-format.def"
1024 /* This is the way fields are to be accessed out of DisasFields. */
1025 #define have_field(S, F) have_field1((S), FLD_O_##F)
1026 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1028 static bool have_field1(const DisasContext
*s
, enum DisasFieldIndexO c
)
1030 return (s
->fields
.presentO
>> c
) & 1;
1033 static int get_field1(const DisasContext
*s
, enum DisasFieldIndexO o
,
1034 enum DisasFieldIndexC c
)
1036 assert(have_field1(s
, o
));
1037 return s
->fields
.c
[c
];
1040 /* Describe the layout of each field in each format. */
1041 typedef struct DisasField
{
1043 unsigned int size
:8;
1044 unsigned int type
:2;
1045 unsigned int indexC
:6;
1046 enum DisasFieldIndexO indexO
:8;
1049 typedef struct DisasFormatInfo
{
1050 DisasField op
[NUM_C_FIELD
];
1053 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1054 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1055 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1056 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1057 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1058 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1059 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1060 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1061 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1062 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1063 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1064 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1065 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1066 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1067 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1069 #define F0(N) { { } },
1070 #define F1(N, X1) { { X1 } },
1071 #define F2(N, X1, X2) { { X1, X2 } },
1072 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1073 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1074 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1075 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1077 static const DisasFormatInfo format_info
[] = {
1078 #include "insn-format.def"
1098 /* Generally, we'll extract operands into this structures, operate upon
1099 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1100 of routines below for more details. */
1102 bool g_out
, g_out2
, g_in1
, g_in2
;
1103 TCGv_i64 out
, out2
, in1
, in2
;
1107 /* Instructions can place constraints on their operands, raising specification
1108 exceptions if they are violated. To make this easy to automate, each "in1",
1109 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1110 of the following, or 0. To make this easy to document, we'll put the
1111 SPEC_<name> defines next to <name>. */
1113 #define SPEC_r1_even 1
1114 #define SPEC_r2_even 2
1115 #define SPEC_r3_even 4
1116 #define SPEC_r1_f128 8
1117 #define SPEC_r2_f128 16
1119 /* Return values from translate_one, indicating the state of the TB. */
1121 /* We are not using a goto_tb (for whatever reason), but have updated
1122 the PC (for whatever reason), so there's no need to do it again on
1124 #define DISAS_PC_UPDATED DISAS_TARGET_0
1126 /* We have emitted one or more goto_tb. No fixup required. */
1127 #define DISAS_GOTO_TB DISAS_TARGET_1
1129 /* We have updated the PC and CC values. */
1130 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1132 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1133 updated the PC for the next instruction to be executed. */
1134 #define DISAS_PC_STALE DISAS_TARGET_3
1136 /* We are exiting the TB to the main loop. */
1137 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1140 /* Instruction flags */
1141 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1142 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1143 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1144 #define IF_BFP 0x0008 /* binary floating point instruction */
1145 #define IF_DFP 0x0010 /* decimal floating point instruction */
1146 #define IF_PRIV 0x0020 /* privileged instruction */
1147 #define IF_VEC 0x0040 /* vector instruction */
1148 #define IF_IO 0x0080 /* input/output instruction */
1159 /* Pre-process arguments before HELP_OP. */
1160 void (*help_in1
)(DisasContext
*, DisasOps
*);
1161 void (*help_in2
)(DisasContext
*, DisasOps
*);
1162 void (*help_prep
)(DisasContext
*, DisasOps
*);
1165 * Post-process output after HELP_OP.
1166 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1168 void (*help_wout
)(DisasContext
*, DisasOps
*);
1169 void (*help_cout
)(DisasContext
*, DisasOps
*);
1171 /* Implement the operation itself. */
1172 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1177 /* ====================================================================== */
1178 /* Miscellaneous helpers, used by several operations. */
1180 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1182 if (dest
== s
->pc_tmp
) {
1183 per_branch(s
, true);
1186 if (use_goto_tb(s
, dest
)) {
1188 per_breaking_event(s
);
1190 tcg_gen_movi_i64(psw_addr
, dest
);
1191 tcg_gen_exit_tb(s
->base
.tb
, 0);
1192 return DISAS_GOTO_TB
;
1194 tcg_gen_movi_i64(psw_addr
, dest
);
1195 per_branch(s
, false);
1196 return DISAS_PC_UPDATED
;
1200 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1201 bool is_imm
, int imm
, TCGv_i64 cdest
)
1204 uint64_t dest
= s
->base
.pc_next
+ 2 * imm
;
1207 /* Take care of the special cases first. */
1208 if (c
->cond
== TCG_COND_NEVER
) {
1213 if (dest
== s
->pc_tmp
) {
1214 /* Branch to next. */
1215 per_branch(s
, true);
1219 if (c
->cond
== TCG_COND_ALWAYS
) {
1220 ret
= help_goto_direct(s
, dest
);
1225 /* E.g. bcr %r0 -> no branch. */
1229 if (c
->cond
== TCG_COND_ALWAYS
) {
1230 tcg_gen_mov_i64(psw_addr
, cdest
);
1231 per_branch(s
, false);
1232 ret
= DISAS_PC_UPDATED
;
1237 if (use_goto_tb(s
, s
->pc_tmp
)) {
1238 if (is_imm
&& use_goto_tb(s
, dest
)) {
1239 /* Both exits can use goto_tb. */
1242 lab
= gen_new_label();
1244 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1246 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1249 /* Branch not taken. */
1251 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1252 tcg_gen_exit_tb(s
->base
.tb
, 0);
1256 per_breaking_event(s
);
1258 tcg_gen_movi_i64(psw_addr
, dest
);
1259 tcg_gen_exit_tb(s
->base
.tb
, 1);
1261 ret
= DISAS_GOTO_TB
;
1263 /* Fallthru can use goto_tb, but taken branch cannot. */
1264 /* Store taken branch destination before the brcond. This
1265 avoids having to allocate a new local temp to hold it.
1266 We'll overwrite this in the not taken case anyway. */
1268 tcg_gen_mov_i64(psw_addr
, cdest
);
1271 lab
= gen_new_label();
1273 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1275 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1278 /* Branch not taken. */
1281 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1282 tcg_gen_exit_tb(s
->base
.tb
, 0);
1286 tcg_gen_movi_i64(psw_addr
, dest
);
1288 per_breaking_event(s
);
1289 ret
= DISAS_PC_UPDATED
;
1292 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1293 Most commonly we're single-stepping or some other condition that
1294 disables all use of goto_tb. Just update the PC and exit. */
1296 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1298 cdest
= tcg_const_i64(dest
);
1302 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1304 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1306 TCGv_i32 t0
= tcg_temp_new_i32();
1307 TCGv_i64 t1
= tcg_temp_new_i64();
1308 TCGv_i64 z
= tcg_const_i64(0);
1309 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1310 tcg_gen_extu_i32_i64(t1
, t0
);
1311 tcg_temp_free_i32(t0
);
1312 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1313 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1314 tcg_temp_free_i64(t1
);
1315 tcg_temp_free_i64(z
);
1319 tcg_temp_free_i64(cdest
);
1321 tcg_temp_free_i64(next
);
1323 ret
= DISAS_PC_UPDATED
;
1331 /* ====================================================================== */
1332 /* The operations. These perform the bulk of the work for any insn,
1333 usually after the operands have been loaded and output initialized. */
1335 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1337 tcg_gen_abs_i64(o
->out
, o
->in2
);
1341 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1343 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1347 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1349 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1353 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1355 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1356 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1360 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1362 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1366 static DisasJumpType
op_addu64(DisasContext
*s
, DisasOps
*o
)
1368 tcg_gen_movi_i64(cc_src
, 0);
1369 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1373 /* Compute carry into cc_src. */
1374 static void compute_carry(DisasContext
*s
)
1378 /* The carry value is already in cc_src (1,0). */
1381 tcg_gen_addi_i64(cc_src
, cc_src
, 1);
1387 /* The carry flag is the msb of CC; compute into cc_src. */
1388 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
1389 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
1394 static DisasJumpType
op_addc32(DisasContext
*s
, DisasOps
*o
)
1397 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1398 tcg_gen_add_i64(o
->out
, o
->out
, cc_src
);
1402 static DisasJumpType
op_addc64(DisasContext
*s
, DisasOps
*o
)
1406 TCGv_i64 zero
= tcg_const_i64(0);
1407 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, zero
);
1408 tcg_gen_add2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
1409 tcg_temp_free_i64(zero
);
1414 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1416 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1418 o
->in1
= tcg_temp_new_i64();
1420 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1422 /* Perform the atomic addition in memory. */
1423 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1427 /* Recompute also for atomic case: needed for setting CC. */
1428 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1431 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1436 static DisasJumpType
op_asiu64(DisasContext
*s
, DisasOps
*o
)
1438 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1440 o
->in1
= tcg_temp_new_i64();
1442 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1444 /* Perform the atomic addition in memory. */
1445 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1449 /* Recompute also for atomic case: needed for setting CC. */
1450 tcg_gen_movi_i64(cc_src
, 0);
1451 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1454 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1459 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1461 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1465 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1467 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1471 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1473 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1474 return_low128(o
->out2
);
1478 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1480 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1484 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1486 int shift
= s
->insn
->data
& 0xff;
1487 int size
= s
->insn
->data
>> 8;
1488 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1491 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1492 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1493 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1495 /* Produce the CC from only the bits manipulated. */
1496 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1497 set_cc_nz_u64(s
, cc_dst
);
1501 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1503 o
->in1
= tcg_temp_new_i64();
1505 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1506 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1508 /* Perform the atomic operation in memory. */
1509 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1513 /* Recompute also for atomic case: needed for setting CC. */
1514 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1516 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1517 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1522 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1524 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1526 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1527 per_branch(s
, false);
1528 return DISAS_PC_UPDATED
;
1534 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1538 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1539 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1543 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1544 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1545 t
= tcg_temp_new_i64();
1546 tcg_gen_shri_i64(t
, psw_mask
, 16);
1547 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1548 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1549 tcg_gen_extu_i32_i64(t
, cc_op
);
1550 tcg_gen_shli_i64(t
, t
, 28);
1551 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1552 tcg_temp_free_i64(t
);
1555 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1557 save_link_info(s
, o
);
1559 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1560 per_branch(s
, false);
1561 return DISAS_PC_UPDATED
;
1567 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1569 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1570 return help_goto_direct(s
, s
->base
.pc_next
+ 2 * get_field(s
, i2
));
1573 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1575 int m1
= get_field(s
, m1
);
1576 bool is_imm
= have_field(s
, i2
);
1577 int imm
= is_imm
? get_field(s
, i2
) : 0;
1580 /* BCR with R2 = 0 causes no branching */
1581 if (have_field(s
, r2
) && get_field(s
, r2
) == 0) {
1583 /* Perform serialization */
1584 /* FIXME: check for fast-BCR-serialization facility */
1585 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1588 /* Perform serialization */
1589 /* FIXME: perform checkpoint-synchronisation */
1590 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1595 disas_jcc(s
, &c
, m1
);
1596 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1599 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1601 int r1
= get_field(s
, r1
);
1602 bool is_imm
= have_field(s
, i2
);
1603 int imm
= is_imm
? get_field(s
, i2
) : 0;
1607 c
.cond
= TCG_COND_NE
;
1612 t
= tcg_temp_new_i64();
1613 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1614 store_reg32_i64(r1
, t
);
1615 c
.u
.s32
.a
= tcg_temp_new_i32();
1616 c
.u
.s32
.b
= tcg_const_i32(0);
1617 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1618 tcg_temp_free_i64(t
);
1620 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1623 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1625 int r1
= get_field(s
, r1
);
1626 int imm
= get_field(s
, i2
);
1630 c
.cond
= TCG_COND_NE
;
1635 t
= tcg_temp_new_i64();
1636 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1637 tcg_gen_subi_i64(t
, t
, 1);
1638 store_reg32h_i64(r1
, t
);
1639 c
.u
.s32
.a
= tcg_temp_new_i32();
1640 c
.u
.s32
.b
= tcg_const_i32(0);
1641 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1642 tcg_temp_free_i64(t
);
1644 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1647 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1649 int r1
= get_field(s
, r1
);
1650 bool is_imm
= have_field(s
, i2
);
1651 int imm
= is_imm
? get_field(s
, i2
) : 0;
1654 c
.cond
= TCG_COND_NE
;
1659 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1660 c
.u
.s64
.a
= regs
[r1
];
1661 c
.u
.s64
.b
= tcg_const_i64(0);
1663 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1666 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1668 int r1
= get_field(s
, r1
);
1669 int r3
= get_field(s
, r3
);
1670 bool is_imm
= have_field(s
, i2
);
1671 int imm
= is_imm
? get_field(s
, i2
) : 0;
1675 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1680 t
= tcg_temp_new_i64();
1681 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1682 c
.u
.s32
.a
= tcg_temp_new_i32();
1683 c
.u
.s32
.b
= tcg_temp_new_i32();
1684 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1685 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1686 store_reg32_i64(r1
, t
);
1687 tcg_temp_free_i64(t
);
1689 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1692 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1694 int r1
= get_field(s
, r1
);
1695 int r3
= get_field(s
, r3
);
1696 bool is_imm
= have_field(s
, i2
);
1697 int imm
= is_imm
? get_field(s
, i2
) : 0;
1700 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1703 if (r1
== (r3
| 1)) {
1704 c
.u
.s64
.b
= load_reg(r3
| 1);
1707 c
.u
.s64
.b
= regs
[r3
| 1];
1711 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1712 c
.u
.s64
.a
= regs
[r1
];
1715 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1718 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1720 int imm
, m3
= get_field(s
, m3
);
1724 c
.cond
= ltgt_cond
[m3
];
1725 if (s
->insn
->data
) {
1726 c
.cond
= tcg_unsigned_cond(c
.cond
);
1728 c
.is_64
= c
.g1
= c
.g2
= true;
1732 is_imm
= have_field(s
, i4
);
1734 imm
= get_field(s
, i4
);
1737 o
->out
= get_address(s
, 0, get_field(s
, b4
),
1741 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1744 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1746 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1751 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1753 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1758 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1760 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1765 static TCGv_i32
fpinst_extract_m34(DisasContext
*s
, bool m3_with_fpe
,
1768 const bool fpe
= s390_has_feat(S390_FEAT_FLOATING_POINT_EXT
);
1769 uint8_t m3
= get_field(s
, m3
);
1770 uint8_t m4
= get_field(s
, m4
);
1772 /* m3 field was introduced with FPE */
1773 if (!fpe
&& m3_with_fpe
) {
1776 /* m4 field was introduced with FPE */
1777 if (!fpe
&& m4_with_fpe
) {
1781 /* Check for valid rounding modes. Mode 3 was introduced later. */
1782 if (m3
== 2 || m3
> 7 || (!fpe
&& m3
== 3)) {
1783 gen_program_exception(s
, PGM_SPECIFICATION
);
1787 return tcg_const_i32(deposit32(m3
, 4, 4, m4
));
1790 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1792 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1795 return DISAS_NORETURN
;
1797 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1798 tcg_temp_free_i32(m34
);
1803 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1805 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1808 return DISAS_NORETURN
;
1810 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1811 tcg_temp_free_i32(m34
);
1816 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1818 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1821 return DISAS_NORETURN
;
1823 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1824 tcg_temp_free_i32(m34
);
1829 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1831 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1834 return DISAS_NORETURN
;
1836 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1837 tcg_temp_free_i32(m34
);
1842 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1844 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1847 return DISAS_NORETURN
;
1849 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1850 tcg_temp_free_i32(m34
);
1855 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1857 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1860 return DISAS_NORETURN
;
1862 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1863 tcg_temp_free_i32(m34
);
1868 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1870 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1873 return DISAS_NORETURN
;
1875 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1876 tcg_temp_free_i32(m34
);
1881 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1883 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1886 return DISAS_NORETURN
;
1888 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1889 tcg_temp_free_i32(m34
);
1894 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1896 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1899 return DISAS_NORETURN
;
1901 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1902 tcg_temp_free_i32(m34
);
1907 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1909 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1912 return DISAS_NORETURN
;
1914 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1915 tcg_temp_free_i32(m34
);
1920 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1922 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1925 return DISAS_NORETURN
;
1927 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1928 tcg_temp_free_i32(m34
);
1933 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1935 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1938 return DISAS_NORETURN
;
1940 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1941 tcg_temp_free_i32(m34
);
1946 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1948 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1951 return DISAS_NORETURN
;
1953 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m34
);
1954 tcg_temp_free_i32(m34
);
1958 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1960 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1963 return DISAS_NORETURN
;
1965 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m34
);
1966 tcg_temp_free_i32(m34
);
1970 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1972 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1975 return DISAS_NORETURN
;
1977 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m34
);
1978 tcg_temp_free_i32(m34
);
1979 return_low128(o
->out2
);
1983 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
1985 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1988 return DISAS_NORETURN
;
1990 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m34
);
1991 tcg_temp_free_i32(m34
);
1995 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1997 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2000 return DISAS_NORETURN
;
2002 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2003 tcg_temp_free_i32(m34
);
2007 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
2009 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2012 return DISAS_NORETURN
;
2014 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2015 tcg_temp_free_i32(m34
);
2016 return_low128(o
->out2
);
2020 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
2022 int r2
= get_field(s
, r2
);
2023 TCGv_i64 len
= tcg_temp_new_i64();
2025 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
2027 return_low128(o
->out
);
2029 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
2030 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
2031 tcg_temp_free_i64(len
);
2036 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
2038 int l
= get_field(s
, l1
);
2043 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2044 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2047 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2048 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2051 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2052 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2055 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2056 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2059 vl
= tcg_const_i32(l
);
2060 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2061 tcg_temp_free_i32(vl
);
2065 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2069 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
2071 int r1
= get_field(s
, r1
);
2072 int r2
= get_field(s
, r2
);
2075 /* r1 and r2 must be even. */
2076 if (r1
& 1 || r2
& 1) {
2077 gen_program_exception(s
, PGM_SPECIFICATION
);
2078 return DISAS_NORETURN
;
2081 t1
= tcg_const_i32(r1
);
2082 t2
= tcg_const_i32(r2
);
2083 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
2084 tcg_temp_free_i32(t1
);
2085 tcg_temp_free_i32(t2
);
2090 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
2092 int r1
= get_field(s
, r1
);
2093 int r3
= get_field(s
, r3
);
2096 /* r1 and r3 must be even. */
2097 if (r1
& 1 || r3
& 1) {
2098 gen_program_exception(s
, PGM_SPECIFICATION
);
2099 return DISAS_NORETURN
;
2102 t1
= tcg_const_i32(r1
);
2103 t3
= tcg_const_i32(r3
);
2104 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2105 tcg_temp_free_i32(t1
);
2106 tcg_temp_free_i32(t3
);
2111 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
2113 int r1
= get_field(s
, r1
);
2114 int r3
= get_field(s
, r3
);
2117 /* r1 and r3 must be even. */
2118 if (r1
& 1 || r3
& 1) {
2119 gen_program_exception(s
, PGM_SPECIFICATION
);
2120 return DISAS_NORETURN
;
2123 t1
= tcg_const_i32(r1
);
2124 t3
= tcg_const_i32(r3
);
2125 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2126 tcg_temp_free_i32(t1
);
2127 tcg_temp_free_i32(t3
);
2132 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
2134 TCGv_i32 m3
= tcg_const_i32(get_field(s
, m3
));
2135 TCGv_i32 t1
= tcg_temp_new_i32();
2136 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
2137 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2139 tcg_temp_free_i32(t1
);
2140 tcg_temp_free_i32(m3
);
2144 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
2146 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2148 return_low128(o
->in2
);
2152 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
2154 TCGv_i64 t
= tcg_temp_new_i64();
2155 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
2156 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
2157 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2158 tcg_temp_free_i64(t
);
2162 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2164 int d2
= get_field(s
, d2
);
2165 int b2
= get_field(s
, b2
);
2168 /* Note that in1 = R3 (new value) and
2169 in2 = (zero-extended) R1 (expected value). */
2171 addr
= get_address(s
, 0, b2
, d2
);
2172 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2173 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2174 tcg_temp_free_i64(addr
);
2176 /* Are the memory and expected values (un)equal? Note that this setcond
2177 produces the output CC value, thus the NE sense of the test. */
2178 cc
= tcg_temp_new_i64();
2179 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2180 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2181 tcg_temp_free_i64(cc
);
2187 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2189 int r1
= get_field(s
, r1
);
2190 int r3
= get_field(s
, r3
);
2191 int d2
= get_field(s
, d2
);
2192 int b2
= get_field(s
, b2
);
2193 DisasJumpType ret
= DISAS_NEXT
;
2195 TCGv_i32 t_r1
, t_r3
;
2197 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2198 addr
= get_address(s
, 0, b2
, d2
);
2199 t_r1
= tcg_const_i32(r1
);
2200 t_r3
= tcg_const_i32(r3
);
2201 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
2202 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2203 } else if (HAVE_CMPXCHG128
) {
2204 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2206 gen_helper_exit_atomic(cpu_env
);
2207 ret
= DISAS_NORETURN
;
2209 tcg_temp_free_i64(addr
);
2210 tcg_temp_free_i32(t_r1
);
2211 tcg_temp_free_i32(t_r3
);
2217 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2219 int r3
= get_field(s
, r3
);
2220 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2222 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2223 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2225 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2227 tcg_temp_free_i32(t_r3
);
2233 #ifndef CONFIG_USER_ONLY
2234 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2236 MemOp mop
= s
->insn
->data
;
2237 TCGv_i64 addr
, old
, cc
;
2238 TCGLabel
*lab
= gen_new_label();
2240 /* Note that in1 = R1 (zero-extended expected value),
2241 out = R1 (original reg), out2 = R1+1 (new value). */
2243 addr
= tcg_temp_new_i64();
2244 old
= tcg_temp_new_i64();
2245 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2246 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2247 get_mem_index(s
), mop
| MO_ALIGN
);
2248 tcg_temp_free_i64(addr
);
2250 /* Are the memory and expected values (un)equal? */
2251 cc
= tcg_temp_new_i64();
2252 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2253 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2255 /* Write back the output now, so that it happens before the
2256 following branch, so that we don't need local temps. */
2257 if ((mop
& MO_SIZE
) == MO_32
) {
2258 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2260 tcg_gen_mov_i64(o
->out
, old
);
2262 tcg_temp_free_i64(old
);
2264 /* If the comparison was equal, and the LSB of R2 was set,
2265 then we need to flush the TLB (for all cpus). */
2266 tcg_gen_xori_i64(cc
, cc
, 1);
2267 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2268 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2269 tcg_temp_free_i64(cc
);
2271 gen_helper_purge(cpu_env
);
2278 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2280 TCGv_i64 t1
= tcg_temp_new_i64();
2281 TCGv_i32 t2
= tcg_temp_new_i32();
2282 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2283 gen_helper_cvd(t1
, t2
);
2284 tcg_temp_free_i32(t2
);
2285 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2286 tcg_temp_free_i64(t1
);
2290 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2292 int m3
= get_field(s
, m3
);
2293 TCGLabel
*lab
= gen_new_label();
2296 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2297 if (s
->insn
->data
) {
2298 c
= tcg_unsigned_cond(c
);
2300 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2309 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2311 int m3
= get_field(s
, m3
);
2312 int r1
= get_field(s
, r1
);
2313 int r2
= get_field(s
, r2
);
2314 TCGv_i32 tr1
, tr2
, chk
;
2316 /* R1 and R2 must both be even. */
2317 if ((r1
| r2
) & 1) {
2318 gen_program_exception(s
, PGM_SPECIFICATION
);
2319 return DISAS_NORETURN
;
2321 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2325 tr1
= tcg_const_i32(r1
);
2326 tr2
= tcg_const_i32(r2
);
2327 chk
= tcg_const_i32(m3
);
2329 switch (s
->insn
->data
) {
2331 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2334 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2337 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2340 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2343 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2346 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2349 g_assert_not_reached();
2352 tcg_temp_free_i32(tr1
);
2353 tcg_temp_free_i32(tr2
);
2354 tcg_temp_free_i32(chk
);
2359 #ifndef CONFIG_USER_ONLY
2360 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2362 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
2363 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
2364 TCGv_i32 func_code
= tcg_const_i32(get_field(s
, i2
));
2366 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2368 tcg_temp_free_i32(func_code
);
2369 tcg_temp_free_i32(r3
);
2370 tcg_temp_free_i32(r1
);
2375 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2377 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2378 return_low128(o
->out
);
2382 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2384 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2385 return_low128(o
->out
);
2389 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2391 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2392 return_low128(o
->out
);
2396 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2398 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2399 return_low128(o
->out
);
2403 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2405 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2409 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2411 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2415 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2417 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2418 return_low128(o
->out2
);
2422 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2424 int r2
= get_field(s
, r2
);
2425 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2429 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2431 /* No cache information provided. */
2432 tcg_gen_movi_i64(o
->out
, -1);
2436 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2438 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2442 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2444 int r1
= get_field(s
, r1
);
2445 int r2
= get_field(s
, r2
);
2446 TCGv_i64 t
= tcg_temp_new_i64();
2448 /* Note the "subsequently" in the PoO, which implies a defined result
2449 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2450 tcg_gen_shri_i64(t
, psw_mask
, 32);
2451 store_reg32_i64(r1
, t
);
2453 store_reg32_i64(r2
, psw_mask
);
2456 tcg_temp_free_i64(t
);
2460 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2462 int r1
= get_field(s
, r1
);
2466 /* Nested EXECUTE is not allowed. */
2467 if (unlikely(s
->ex_value
)) {
2468 gen_program_exception(s
, PGM_EXECUTE
);
2469 return DISAS_NORETURN
;
2476 v1
= tcg_const_i64(0);
2481 ilen
= tcg_const_i32(s
->ilen
);
2482 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2483 tcg_temp_free_i32(ilen
);
2486 tcg_temp_free_i64(v1
);
2489 return DISAS_PC_CC_UPDATED
;
2492 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2494 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2497 return DISAS_NORETURN
;
2499 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m34
);
2500 tcg_temp_free_i32(m34
);
2504 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2506 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2509 return DISAS_NORETURN
;
2511 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m34
);
2512 tcg_temp_free_i32(m34
);
2516 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2518 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2521 return DISAS_NORETURN
;
2523 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2524 return_low128(o
->out2
);
2525 tcg_temp_free_i32(m34
);
2529 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2531 /* We'll use the original input for cc computation, since we get to
2532 compare that against 0, which ought to be better than comparing
2533 the real output against 64. It also lets cc_dst be a convenient
2534 temporary during our computation. */
2535 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2537 /* R1 = IN ? CLZ(IN) : 64. */
2538 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2540 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2541 value by 64, which is undefined. But since the shift is 64 iff the
2542 input is zero, we still get the correct result after and'ing. */
2543 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2544 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2545 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2549 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2551 int m3
= get_field(s
, m3
);
2552 int pos
, len
, base
= s
->insn
->data
;
2553 TCGv_i64 tmp
= tcg_temp_new_i64();
2558 /* Effectively a 32-bit load. */
2559 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2566 /* Effectively a 16-bit load. */
2567 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2575 /* Effectively an 8-bit load. */
2576 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2581 pos
= base
+ ctz32(m3
) * 8;
2582 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2583 ccm
= ((1ull << len
) - 1) << pos
;
2587 /* This is going to be a sequence of loads and inserts. */
2588 pos
= base
+ 32 - 8;
2592 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2593 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2594 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2597 m3
= (m3
<< 1) & 0xf;
2603 tcg_gen_movi_i64(tmp
, ccm
);
2604 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2605 tcg_temp_free_i64(tmp
);
2609 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2611 int shift
= s
->insn
->data
& 0xff;
2612 int size
= s
->insn
->data
>> 8;
2613 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2617 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2622 t1
= tcg_temp_new_i64();
2623 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2624 t2
= tcg_temp_new_i64();
2625 tcg_gen_extu_i32_i64(t2
, cc_op
);
2626 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2627 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2628 tcg_temp_free_i64(t1
);
2629 tcg_temp_free_i64(t2
);
2633 #ifndef CONFIG_USER_ONLY
2634 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2638 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2639 m4
= tcg_const_i32(get_field(s
, m4
));
2641 m4
= tcg_const_i32(0);
2643 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2644 tcg_temp_free_i32(m4
);
2648 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2652 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2653 m4
= tcg_const_i32(get_field(s
, m4
));
2655 m4
= tcg_const_i32(0);
2657 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2658 tcg_temp_free_i32(m4
);
2662 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2664 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2669 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2671 int r1
= have_field(s
, r1
) ? get_field(s
, r1
) : 0;
2672 int r2
= have_field(s
, r2
) ? get_field(s
, r2
) : 0;
2673 int r3
= have_field(s
, r3
) ? get_field(s
, r3
) : 0;
2674 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2676 switch (s
->insn
->data
) {
2677 case S390_FEAT_TYPE_KMA
:
2678 if (r3
== r1
|| r3
== r2
) {
2679 gen_program_exception(s
, PGM_SPECIFICATION
);
2680 return DISAS_NORETURN
;
2683 case S390_FEAT_TYPE_KMCTR
:
2684 if (r3
& 1 || !r3
) {
2685 gen_program_exception(s
, PGM_SPECIFICATION
);
2686 return DISAS_NORETURN
;
2689 case S390_FEAT_TYPE_PPNO
:
2690 case S390_FEAT_TYPE_KMF
:
2691 case S390_FEAT_TYPE_KMC
:
2692 case S390_FEAT_TYPE_KMO
:
2693 case S390_FEAT_TYPE_KM
:
2694 if (r1
& 1 || !r1
) {
2695 gen_program_exception(s
, PGM_SPECIFICATION
);
2696 return DISAS_NORETURN
;
2699 case S390_FEAT_TYPE_KMAC
:
2700 case S390_FEAT_TYPE_KIMD
:
2701 case S390_FEAT_TYPE_KLMD
:
2702 if (r2
& 1 || !r2
) {
2703 gen_program_exception(s
, PGM_SPECIFICATION
);
2704 return DISAS_NORETURN
;
2707 case S390_FEAT_TYPE_PCKMO
:
2708 case S390_FEAT_TYPE_PCC
:
2711 g_assert_not_reached();
2714 t_r1
= tcg_const_i32(r1
);
2715 t_r2
= tcg_const_i32(r2
);
2716 t_r3
= tcg_const_i32(r3
);
2717 type
= tcg_const_i32(s
->insn
->data
);
2718 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2720 tcg_temp_free_i32(t_r1
);
2721 tcg_temp_free_i32(t_r2
);
2722 tcg_temp_free_i32(t_r3
);
2723 tcg_temp_free_i32(type
);
2727 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2729 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2734 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2736 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2741 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2743 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2748 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2750 /* The real output is indeed the original value in memory;
2751 recompute the addition for the computation of CC. */
2752 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2753 s
->insn
->data
| MO_ALIGN
);
2754 /* However, we need to recompute the addition for setting CC. */
2755 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2759 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2761 /* The real output is indeed the original value in memory;
2762 recompute the addition for the computation of CC. */
2763 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2764 s
->insn
->data
| MO_ALIGN
);
2765 /* However, we need to recompute the operation for setting CC. */
2766 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2770 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2772 /* The real output is indeed the original value in memory;
2773 recompute the addition for the computation of CC. */
2774 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2775 s
->insn
->data
| MO_ALIGN
);
2776 /* However, we need to recompute the operation for setting CC. */
2777 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2781 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2783 /* The real output is indeed the original value in memory;
2784 recompute the addition for the computation of CC. */
2785 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2786 s
->insn
->data
| MO_ALIGN
);
2787 /* However, we need to recompute the operation for setting CC. */
2788 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2792 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2794 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2798 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2800 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2803 return DISAS_NORETURN
;
2805 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
, m34
);
2806 tcg_temp_free_i32(m34
);
2810 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2812 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2815 return DISAS_NORETURN
;
2817 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2818 tcg_temp_free_i32(m34
);
2822 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2824 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2827 return DISAS_NORETURN
;
2829 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2830 tcg_temp_free_i32(m34
);
2834 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2836 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2837 return_low128(o
->out2
);
2841 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2843 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2844 return_low128(o
->out2
);
2848 static DisasJumpType
op_lde(DisasContext
*s
, DisasOps
*o
)
2850 tcg_gen_shli_i64(o
->out
, o
->in2
, 32);
2854 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2856 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2860 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2862 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2866 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2868 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2872 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2874 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2878 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2880 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2884 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2886 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2890 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2892 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2896 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2898 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2902 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2904 TCGLabel
*lab
= gen_new_label();
2905 store_reg32_i64(get_field(s
, r1
), o
->in2
);
2906 /* The value is stored even in case of trap. */
2907 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2913 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2915 TCGLabel
*lab
= gen_new_label();
2916 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2917 /* The value is stored even in case of trap. */
2918 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2924 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2926 TCGLabel
*lab
= gen_new_label();
2927 store_reg32h_i64(get_field(s
, r1
), o
->in2
);
2928 /* The value is stored even in case of trap. */
2929 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2935 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2937 TCGLabel
*lab
= gen_new_label();
2938 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2939 /* The value is stored even in case of trap. */
2940 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2946 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2948 TCGLabel
*lab
= gen_new_label();
2949 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2950 /* The value is stored even in case of trap. */
2951 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2957 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
2961 disas_jcc(s
, &c
, get_field(s
, m3
));
2964 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2968 TCGv_i32 t32
= tcg_temp_new_i32();
2971 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2974 t
= tcg_temp_new_i64();
2975 tcg_gen_extu_i32_i64(t
, t32
);
2976 tcg_temp_free_i32(t32
);
2978 z
= tcg_const_i64(0);
2979 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2980 tcg_temp_free_i64(t
);
2981 tcg_temp_free_i64(z
);
2987 #ifndef CONFIG_USER_ONLY
2988 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
2990 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
2991 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
2992 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2993 tcg_temp_free_i32(r1
);
2994 tcg_temp_free_i32(r3
);
2995 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2996 return DISAS_PC_STALE_NOCHAIN
;
2999 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
3001 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3002 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3003 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
3004 tcg_temp_free_i32(r1
);
3005 tcg_temp_free_i32(r3
);
3006 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3007 return DISAS_PC_STALE_NOCHAIN
;
3010 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
3012 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
3017 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
3019 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
3023 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
3027 per_breaking_event(s
);
3029 t1
= tcg_temp_new_i64();
3030 t2
= tcg_temp_new_i64();
3031 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3032 MO_TEUL
| MO_ALIGN_8
);
3033 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
3034 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
3035 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3036 tcg_gen_shli_i64(t1
, t1
, 32);
3037 gen_helper_load_psw(cpu_env
, t1
, t2
);
3038 tcg_temp_free_i64(t1
);
3039 tcg_temp_free_i64(t2
);
3040 return DISAS_NORETURN
;
3043 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
3047 per_breaking_event(s
);
3049 t1
= tcg_temp_new_i64();
3050 t2
= tcg_temp_new_i64();
3051 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3052 MO_TEUQ
| MO_ALIGN_8
);
3053 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3054 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
3055 gen_helper_load_psw(cpu_env
, t1
, t2
);
3056 tcg_temp_free_i64(t1
);
3057 tcg_temp_free_i64(t2
);
3058 return DISAS_NORETURN
;
3062 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
3064 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3065 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3066 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
3067 tcg_temp_free_i32(r1
);
3068 tcg_temp_free_i32(r3
);
3072 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
3074 int r1
= get_field(s
, r1
);
3075 int r3
= get_field(s
, r3
);
3078 /* Only one register to read. */
3079 t1
= tcg_temp_new_i64();
3080 if (unlikely(r1
== r3
)) {
3081 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3082 store_reg32_i64(r1
, t1
);
3087 /* First load the values of the first and last registers to trigger
3088 possible page faults. */
3089 t2
= tcg_temp_new_i64();
3090 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3091 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3092 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3093 store_reg32_i64(r1
, t1
);
3094 store_reg32_i64(r3
, t2
);
3096 /* Only two registers to read. */
3097 if (((r1
+ 1) & 15) == r3
) {
3103 /* Then load the remaining registers. Page fault can't occur. */
3105 tcg_gen_movi_i64(t2
, 4);
3108 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3109 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3110 store_reg32_i64(r1
, t1
);
3118 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
3120 int r1
= get_field(s
, r1
);
3121 int r3
= get_field(s
, r3
);
3124 /* Only one register to read. */
3125 t1
= tcg_temp_new_i64();
3126 if (unlikely(r1
== r3
)) {
3127 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3128 store_reg32h_i64(r1
, t1
);
3133 /* First load the values of the first and last registers to trigger
3134 possible page faults. */
3135 t2
= tcg_temp_new_i64();
3136 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3137 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3138 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3139 store_reg32h_i64(r1
, t1
);
3140 store_reg32h_i64(r3
, t2
);
3142 /* Only two registers to read. */
3143 if (((r1
+ 1) & 15) == r3
) {
3149 /* Then load the remaining registers. Page fault can't occur. */
3151 tcg_gen_movi_i64(t2
, 4);
3154 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3155 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3156 store_reg32h_i64(r1
, t1
);
3164 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
3166 int r1
= get_field(s
, r1
);
3167 int r3
= get_field(s
, r3
);
3170 /* Only one register to read. */
3171 if (unlikely(r1
== r3
)) {
3172 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3176 /* First load the values of the first and last registers to trigger
3177 possible page faults. */
3178 t1
= tcg_temp_new_i64();
3179 t2
= tcg_temp_new_i64();
3180 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
3181 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
3182 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
3183 tcg_gen_mov_i64(regs
[r1
], t1
);
3186 /* Only two registers to read. */
3187 if (((r1
+ 1) & 15) == r3
) {
3192 /* Then load the remaining registers. Page fault can't occur. */
3194 tcg_gen_movi_i64(t1
, 8);
3197 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
3198 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3205 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3208 MemOp mop
= s
->insn
->data
;
3210 /* In a parallel context, stop the world and single step. */
3211 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3214 gen_exception(EXCP_ATOMIC
);
3215 return DISAS_NORETURN
;
3218 /* In a serial context, perform the two loads ... */
3219 a1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
3220 a2
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
3221 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3222 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3223 tcg_temp_free_i64(a1
);
3224 tcg_temp_free_i64(a2
);
3226 /* ... and indicate that we performed them while interlocked. */
3227 gen_op_movi_cc(s
, 0);
3231 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3233 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3234 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3235 } else if (HAVE_ATOMIC128
) {
3236 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3238 gen_helper_exit_atomic(cpu_env
);
3239 return DISAS_NORETURN
;
3241 return_low128(o
->out2
);
3245 #ifndef CONFIG_USER_ONLY
3246 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3248 tcg_gen_qemu_ld_tl(o
->out
, o
->in2
, MMU_REAL_IDX
, s
->insn
->data
);
3253 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3255 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3259 static DisasJumpType
op_lcbb(DisasContext
*s
, DisasOps
*o
)
3261 const int64_t block_size
= (1ull << (get_field(s
, m3
) + 6));
3263 if (get_field(s
, m3
) > 6) {
3264 gen_program_exception(s
, PGM_SPECIFICATION
);
3265 return DISAS_NORETURN
;
3268 tcg_gen_ori_i64(o
->addr1
, o
->addr1
, -block_size
);
3269 tcg_gen_neg_i64(o
->addr1
, o
->addr1
);
3270 tcg_gen_movi_i64(o
->out
, 16);
3271 tcg_gen_umin_i64(o
->out
, o
->out
, o
->addr1
);
3272 gen_op_update1_cc_i64(s
, CC_OP_LCBB
, o
->out
);
3276 static DisasJumpType
op_mc(DisasContext
*s
, DisasOps
*o
)
3278 #if !defined(CONFIG_USER_ONLY)
3281 const uint16_t monitor_class
= get_field(s
, i2
);
3283 if (monitor_class
& 0xff00) {
3284 gen_program_exception(s
, PGM_SPECIFICATION
);
3285 return DISAS_NORETURN
;
3288 #if !defined(CONFIG_USER_ONLY)
3289 i2
= tcg_const_i32(monitor_class
);
3290 gen_helper_monitor_call(cpu_env
, o
->addr1
, i2
);
3291 tcg_temp_free_i32(i2
);
3293 /* Defaults to a NOP. */
3297 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3300 o
->g_out
= o
->g_in2
;
3306 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3308 int b2
= get_field(s
, b2
);
3309 TCGv ar1
= tcg_temp_new_i64();
3312 o
->g_out
= o
->g_in2
;
3316 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3317 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3318 tcg_gen_movi_i64(ar1
, 0);
3320 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3321 tcg_gen_movi_i64(ar1
, 1);
3323 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3325 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3327 tcg_gen_movi_i64(ar1
, 0);
3330 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3331 tcg_gen_movi_i64(ar1
, 2);
3335 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3336 tcg_temp_free_i64(ar1
);
3341 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3345 o
->g_out
= o
->g_in1
;
3346 o
->g_out2
= o
->g_in2
;
3349 o
->g_in1
= o
->g_in2
= false;
3353 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3355 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3356 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3357 tcg_temp_free_i32(l
);
3361 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3363 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3364 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3365 tcg_temp_free_i32(l
);
3369 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3371 int r1
= get_field(s
, r1
);
3372 int r2
= get_field(s
, r2
);
3375 /* r1 and r2 must be even. */
3376 if (r1
& 1 || r2
& 1) {
3377 gen_program_exception(s
, PGM_SPECIFICATION
);
3378 return DISAS_NORETURN
;
3381 t1
= tcg_const_i32(r1
);
3382 t2
= tcg_const_i32(r2
);
3383 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3384 tcg_temp_free_i32(t1
);
3385 tcg_temp_free_i32(t2
);
3390 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3392 int r1
= get_field(s
, r1
);
3393 int r3
= get_field(s
, r3
);
3396 /* r1 and r3 must be even. */
3397 if (r1
& 1 || r3
& 1) {
3398 gen_program_exception(s
, PGM_SPECIFICATION
);
3399 return DISAS_NORETURN
;
3402 t1
= tcg_const_i32(r1
);
3403 t3
= tcg_const_i32(r3
);
3404 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3405 tcg_temp_free_i32(t1
);
3406 tcg_temp_free_i32(t3
);
3411 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3413 int r1
= get_field(s
, r1
);
3414 int r3
= get_field(s
, r3
);
3417 /* r1 and r3 must be even. */
3418 if (r1
& 1 || r3
& 1) {
3419 gen_program_exception(s
, PGM_SPECIFICATION
);
3420 return DISAS_NORETURN
;
3423 t1
= tcg_const_i32(r1
);
3424 t3
= tcg_const_i32(r3
);
3425 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3426 tcg_temp_free_i32(t1
);
3427 tcg_temp_free_i32(t3
);
3432 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3434 int r3
= get_field(s
, r3
);
3435 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3440 #ifndef CONFIG_USER_ONLY
3441 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3443 int r1
= get_field(s
, l1
);
3444 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3449 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3451 int r1
= get_field(s
, l1
);
3452 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3458 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3460 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3461 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3462 tcg_temp_free_i32(l
);
3466 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3468 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3469 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3470 tcg_temp_free_i32(l
);
3474 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3476 TCGv_i32 t1
= tcg_const_i32(get_field(s
, r1
));
3477 TCGv_i32 t2
= tcg_const_i32(get_field(s
, r2
));
3479 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], t1
, t2
);
3480 tcg_temp_free_i32(t1
);
3481 tcg_temp_free_i32(t2
);
3486 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3488 TCGv_i32 t1
= tcg_const_i32(get_field(s
, r1
));
3489 TCGv_i32 t2
= tcg_const_i32(get_field(s
, r2
));
3491 gen_helper_mvst(cc_op
, cpu_env
, t1
, t2
);
3492 tcg_temp_free_i32(t1
);
3493 tcg_temp_free_i32(t2
);
3498 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3500 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3501 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3502 tcg_temp_free_i32(l
);
3506 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3508 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3512 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3514 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3518 static DisasJumpType
op_muls128(DisasContext
*s
, DisasOps
*o
)
3520 tcg_gen_muls2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3524 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3526 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3530 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3532 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3536 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3538 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3542 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3544 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3545 return_low128(o
->out2
);
3549 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3551 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3552 return_low128(o
->out2
);
3556 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3558 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3559 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3560 tcg_temp_free_i64(r3
);
3564 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3566 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3567 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3568 tcg_temp_free_i64(r3
);
3572 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3574 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3575 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3576 tcg_temp_free_i64(r3
);
3580 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3582 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3583 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3584 tcg_temp_free_i64(r3
);
3588 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3591 z
= tcg_const_i64(0);
3592 n
= tcg_temp_new_i64();
3593 tcg_gen_neg_i64(n
, o
->in2
);
3594 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3595 tcg_temp_free_i64(n
);
3596 tcg_temp_free_i64(z
);
3600 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3602 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3606 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3608 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3612 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3614 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3615 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3619 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3621 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3622 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3623 tcg_temp_free_i32(l
);
3628 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3630 tcg_gen_neg_i64(o
->out
, o
->in2
);
3634 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3636 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3640 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3642 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3646 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3648 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3649 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3653 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3655 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3656 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3657 tcg_temp_free_i32(l
);
3662 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3664 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3668 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3670 int shift
= s
->insn
->data
& 0xff;
3671 int size
= s
->insn
->data
>> 8;
3672 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3675 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3676 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3678 /* Produce the CC from only the bits manipulated. */
3679 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3680 set_cc_nz_u64(s
, cc_dst
);
3684 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3686 o
->in1
= tcg_temp_new_i64();
3688 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3689 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3691 /* Perform the atomic operation in memory. */
3692 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3696 /* Recompute also for atomic case: needed for setting CC. */
3697 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3699 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3700 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3705 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3707 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3708 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3709 tcg_temp_free_i32(l
);
3713 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3715 int l2
= get_field(s
, l2
) + 1;
3718 /* The length must not exceed 32 bytes. */
3720 gen_program_exception(s
, PGM_SPECIFICATION
);
3721 return DISAS_NORETURN
;
3723 l
= tcg_const_i32(l2
);
3724 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3725 tcg_temp_free_i32(l
);
3729 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3731 int l2
= get_field(s
, l2
) + 1;
3734 /* The length must be even and should not exceed 64 bytes. */
3735 if ((l2
& 1) || (l2
> 64)) {
3736 gen_program_exception(s
, PGM_SPECIFICATION
);
3737 return DISAS_NORETURN
;
3739 l
= tcg_const_i32(l2
);
3740 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3741 tcg_temp_free_i32(l
);
3745 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3747 gen_helper_popcnt(o
->out
, o
->in2
);
3751 #ifndef CONFIG_USER_ONLY
3752 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3754 gen_helper_ptlb(cpu_env
);
3759 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3761 int i3
= get_field(s
, i3
);
3762 int i4
= get_field(s
, i4
);
3763 int i5
= get_field(s
, i5
);
3764 int do_zero
= i4
& 0x80;
3765 uint64_t mask
, imask
, pmask
;
3768 /* Adjust the arguments for the specific insn. */
3769 switch (s
->fields
.op2
) {
3770 case 0x55: /* risbg */
3771 case 0x59: /* risbgn */
3776 case 0x5d: /* risbhg */
3779 pmask
= 0xffffffff00000000ull
;
3781 case 0x51: /* risblg */
3782 i3
= (i3
& 31) + 32;
3783 i4
= (i4
& 31) + 32;
3784 pmask
= 0x00000000ffffffffull
;
3787 g_assert_not_reached();
3790 /* MASK is the set of bits to be inserted from R2. */
3792 /* [0...i3---i4...63] */
3793 mask
= (-1ull >> i3
) & (-1ull << (63 - i4
));
3795 /* [0---i4...i3---63] */
3796 mask
= (-1ull >> i3
) | (-1ull << (63 - i4
));
3798 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3801 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3802 insns, we need to keep the other half of the register. */
3803 imask
= ~mask
| ~pmask
;
3812 /* In some cases we can implement this with extract. */
3813 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3814 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3818 /* In some cases we can implement this with deposit. */
3819 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3820 /* Note that we rotate the bits to be inserted to the lsb, not to
3821 the position as described in the PoO. */
3822 rot
= (rot
- pos
) & 63;
3827 /* Rotate the input as necessary. */
3828 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3830 /* Insert the selected bits into the output. */
3833 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3835 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3837 } else if (imask
== 0) {
3838 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3840 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3841 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3842 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3847 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3849 int i3
= get_field(s
, i3
);
3850 int i4
= get_field(s
, i4
);
3851 int i5
= get_field(s
, i5
);
3854 /* If this is a test-only form, arrange to discard the result. */
3856 o
->out
= tcg_temp_new_i64();
3864 /* MASK is the set of bits to be operated on from R2.
3865 Take care for I3/I4 wraparound. */
3868 mask
^= ~0ull >> i4
>> 1;
3870 mask
|= ~(~0ull >> i4
>> 1);
3873 /* Rotate the input as necessary. */
3874 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3877 switch (s
->fields
.op2
) {
3878 case 0x54: /* AND */
3879 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3880 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3883 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3884 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3886 case 0x57: /* XOR */
3887 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3888 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3895 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3896 set_cc_nz_u64(s
, cc_dst
);
3900 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3902 tcg_gen_bswap16_i64(o
->out
, o
->in2
, TCG_BSWAP_IZ
| TCG_BSWAP_OZ
);
3906 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3908 tcg_gen_bswap32_i64(o
->out
, o
->in2
, TCG_BSWAP_IZ
| TCG_BSWAP_OZ
);
3912 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3914 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3918 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3920 TCGv_i32 t1
= tcg_temp_new_i32();
3921 TCGv_i32 t2
= tcg_temp_new_i32();
3922 TCGv_i32 to
= tcg_temp_new_i32();
3923 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3924 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3925 tcg_gen_rotl_i32(to
, t1
, t2
);
3926 tcg_gen_extu_i32_i64(o
->out
, to
);
3927 tcg_temp_free_i32(t1
);
3928 tcg_temp_free_i32(t2
);
3929 tcg_temp_free_i32(to
);
3933 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3935 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3939 #ifndef CONFIG_USER_ONLY
3940 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3942 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3947 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3949 gen_helper_sacf(cpu_env
, o
->in2
);
3950 /* Addressing mode has changed, so end the block. */
3951 return DISAS_PC_STALE
;
3955 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3957 int sam
= s
->insn
->data
;
3973 /* Bizarre but true, we check the address of the current insn for the
3974 specification exception, not the next to be executed. Thus the PoO
3975 documents that Bad Things Happen two bytes before the end. */
3976 if (s
->base
.pc_next
& ~mask
) {
3977 gen_program_exception(s
, PGM_SPECIFICATION
);
3978 return DISAS_NORETURN
;
3982 tsam
= tcg_const_i64(sam
);
3983 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3984 tcg_temp_free_i64(tsam
);
3986 /* Always exit the TB, since we (may have) changed execution mode. */
3987 return DISAS_PC_STALE
;
3990 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
3992 int r1
= get_field(s
, r1
);
3993 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3997 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
3999 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4003 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
4005 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4009 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
4011 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
4012 return_low128(o
->out2
);
4016 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
4018 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
4022 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
4024 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
4028 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
4030 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4031 return_low128(o
->out2
);
4035 #ifndef CONFIG_USER_ONLY
4036 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
4038 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
4043 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
4045 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4046 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4047 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
4049 tcg_temp_free_i32(r1
);
4050 tcg_temp_free_i32(r3
);
4055 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
4062 disas_jcc(s
, &c
, get_field(s
, m3
));
4064 /* We want to store when the condition is fulfilled, so branch
4065 out when it's not */
4066 c
.cond
= tcg_invert_cond(c
.cond
);
4068 lab
= gen_new_label();
4070 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
4072 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
4076 r1
= get_field(s
, r1
);
4077 a
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
4078 switch (s
->insn
->data
) {
4080 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
4083 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
4085 case 2: /* STOCFH */
4086 h
= tcg_temp_new_i64();
4087 tcg_gen_shri_i64(h
, regs
[r1
], 32);
4088 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
4089 tcg_temp_free_i64(h
);
4092 g_assert_not_reached();
4094 tcg_temp_free_i64(a
);
4100 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
4103 uint64_t sign
= 1ull << s
->insn
->data
;
4104 if (s
->insn
->data
== 31) {
4105 t
= tcg_temp_new_i64();
4106 tcg_gen_shli_i64(t
, o
->in1
, 32);
4110 gen_op_update2_cc_i64(s
, CC_OP_SLA
, t
, o
->in2
);
4111 if (s
->insn
->data
== 31) {
4112 tcg_temp_free_i64(t
);
4114 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4115 /* The arithmetic left shift is curious in that it does not affect
4116 the sign bit. Copy that over from the source unchanged. */
4117 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
4118 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
4119 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
4123 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
4125 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4129 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
4131 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
4135 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
4137 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
4141 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
4143 gen_helper_sfpc(cpu_env
, o
->in2
);
4147 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
4149 gen_helper_sfas(cpu_env
, o
->in2
);
4153 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
4155 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4156 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x3ull
);
4157 gen_helper_srnm(cpu_env
, o
->addr1
);
4161 static DisasJumpType
op_srnmb(DisasContext
*s
, DisasOps
*o
)
4163 /* Bits 0-55 are are ignored. */
4164 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0xffull
);
4165 gen_helper_srnm(cpu_env
, o
->addr1
);
4169 static DisasJumpType
op_srnmt(DisasContext
*s
, DisasOps
*o
)
4171 TCGv_i64 tmp
= tcg_temp_new_i64();
4173 /* Bits other than 61-63 are ignored. */
4174 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x7ull
);
4176 /* No need to call a helper, we don't implement dfp */
4177 tcg_gen_ld32u_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4178 tcg_gen_deposit_i64(tmp
, tmp
, o
->addr1
, 4, 3);
4179 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4181 tcg_temp_free_i64(tmp
);
4185 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
4187 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
4188 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
4191 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
4192 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
4196 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
4198 int b1
= get_field(s
, b1
);
4199 int d1
= get_field(s
, d1
);
4200 int b2
= get_field(s
, b2
);
4201 int d2
= get_field(s
, d2
);
4202 int r3
= get_field(s
, r3
);
4203 TCGv_i64 tmp
= tcg_temp_new_i64();
4205 /* fetch all operands first */
4206 o
->in1
= tcg_temp_new_i64();
4207 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
4208 o
->in2
= tcg_temp_new_i64();
4209 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
4210 o
->addr1
= tcg_temp_new_i64();
4211 gen_addi_and_wrap_i64(s
, o
->addr1
, regs
[r3
], 0);
4213 /* load the third operand into r3 before modifying anything */
4214 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
4216 /* subtract CPU timer from first operand and store in GR0 */
4217 gen_helper_stpt(tmp
, cpu_env
);
4218 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
4220 /* store second operand in GR1 */
4221 tcg_gen_mov_i64(regs
[1], o
->in2
);
4223 tcg_temp_free_i64(tmp
);
4227 #ifndef CONFIG_USER_ONLY
4228 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
4230 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
4231 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
4235 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
4237 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
4241 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
4243 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
4244 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4245 return DISAS_PC_STALE_NOCHAIN
;
4248 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
4250 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
4255 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4257 gen_helper_stck(o
->out
, cpu_env
);
4258 /* ??? We don't implement clock states. */
4259 gen_op_movi_cc(s
, 0);
4263 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4265 TCGv_i64 c1
= tcg_temp_new_i64();
4266 TCGv_i64 c2
= tcg_temp_new_i64();
4267 TCGv_i64 todpr
= tcg_temp_new_i64();
4268 gen_helper_stck(c1
, cpu_env
);
4269 /* 16 bit value store in an uint32_t (only valid bits set) */
4270 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4271 /* Shift the 64-bit value into its place as a zero-extended
4272 104-bit value. Note that "bit positions 64-103 are always
4273 non-zero so that they compare differently to STCK"; we set
4274 the least significant bit to 1. */
4275 tcg_gen_shli_i64(c2
, c1
, 56);
4276 tcg_gen_shri_i64(c1
, c1
, 8);
4277 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4278 tcg_gen_or_i64(c2
, c2
, todpr
);
4279 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4280 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4281 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4282 tcg_temp_free_i64(c1
);
4283 tcg_temp_free_i64(c2
);
4284 tcg_temp_free_i64(todpr
);
4285 /* ??? We don't implement clock states. */
4286 gen_op_movi_cc(s
, 0);
4290 #ifndef CONFIG_USER_ONLY
4291 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4293 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEUQ
| MO_ALIGN
);
4294 gen_helper_sck(cc_op
, cpu_env
, o
->in1
);
4299 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4301 gen_helper_sckc(cpu_env
, o
->in2
);
4305 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4307 gen_helper_sckpf(cpu_env
, regs
[0]);
4311 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4313 gen_helper_stckc(o
->out
, cpu_env
);
4317 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4319 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4320 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4321 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4322 tcg_temp_free_i32(r1
);
4323 tcg_temp_free_i32(r3
);
4327 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4329 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4330 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4331 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4332 tcg_temp_free_i32(r1
);
4333 tcg_temp_free_i32(r3
);
4337 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4339 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4343 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4345 gen_helper_spt(cpu_env
, o
->in2
);
4349 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4351 gen_helper_stfl(cpu_env
);
4355 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4357 gen_helper_stpt(o
->out
, cpu_env
);
4361 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4363 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4368 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4370 gen_helper_spx(cpu_env
, o
->in2
);
4374 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4376 gen_helper_xsch(cpu_env
, regs
[1]);
4381 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4383 gen_helper_csch(cpu_env
, regs
[1]);
4388 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4390 gen_helper_hsch(cpu_env
, regs
[1]);
4395 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4397 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4402 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4404 gen_helper_rchp(cpu_env
, regs
[1]);
4409 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4411 gen_helper_rsch(cpu_env
, regs
[1]);
4416 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4418 gen_helper_sal(cpu_env
, regs
[1]);
4422 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4424 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4428 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4430 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4431 gen_op_movi_cc(s
, 3);
4435 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4437 /* The instruction is suppressed if not provided. */
4441 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4443 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4448 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4450 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4455 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4457 gen_helper_stcrw(cpu_env
, o
->in2
);
4462 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4464 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4469 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4471 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4476 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4478 gen_helper_chsc(cpu_env
, o
->in2
);
4483 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4485 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4486 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4490 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4492 uint64_t i2
= get_field(s
, i2
);
4495 /* It is important to do what the instruction name says: STORE THEN.
4496 If we let the output hook perform the store then if we fault and
4497 restart, we'll have the wrong SYSTEM MASK in place. */
4498 t
= tcg_temp_new_i64();
4499 tcg_gen_shri_i64(t
, psw_mask
, 56);
4500 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4501 tcg_temp_free_i64(t
);
4503 if (s
->fields
.op
== 0xac) {
4504 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4505 (i2
<< 56) | 0x00ffffffffffffffull
);
4507 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4510 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4511 return DISAS_PC_STALE_NOCHAIN
;
4514 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4516 tcg_gen_qemu_st_tl(o
->in1
, o
->in2
, MMU_REAL_IDX
, s
->insn
->data
);
4518 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
4520 gen_helper_per_store_real(cpu_env
);
4526 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4528 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4533 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4535 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4539 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4541 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4545 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4547 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4551 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4553 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4557 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4559 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4560 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4561 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4562 tcg_temp_free_i32(r1
);
4563 tcg_temp_free_i32(r3
);
4567 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4569 int m3
= get_field(s
, m3
);
4570 int pos
, base
= s
->insn
->data
;
4571 TCGv_i64 tmp
= tcg_temp_new_i64();
4573 pos
= base
+ ctz32(m3
) * 8;
4576 /* Effectively a 32-bit store. */
4577 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4578 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4584 /* Effectively a 16-bit store. */
4585 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4586 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4593 /* Effectively an 8-bit store. */
4594 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4595 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4599 /* This is going to be a sequence of shifts and stores. */
4600 pos
= base
+ 32 - 8;
4603 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4604 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4605 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4607 m3
= (m3
<< 1) & 0xf;
4612 tcg_temp_free_i64(tmp
);
4616 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4618 int r1
= get_field(s
, r1
);
4619 int r3
= get_field(s
, r3
);
4620 int size
= s
->insn
->data
;
4621 TCGv_i64 tsize
= tcg_const_i64(size
);
4625 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4627 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4632 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4636 tcg_temp_free_i64(tsize
);
4640 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4642 int r1
= get_field(s
, r1
);
4643 int r3
= get_field(s
, r3
);
4644 TCGv_i64 t
= tcg_temp_new_i64();
4645 TCGv_i64 t4
= tcg_const_i64(4);
4646 TCGv_i64 t32
= tcg_const_i64(32);
4649 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4650 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4654 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4658 tcg_temp_free_i64(t
);
4659 tcg_temp_free_i64(t4
);
4660 tcg_temp_free_i64(t32
);
4664 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4666 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4667 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4668 } else if (HAVE_ATOMIC128
) {
4669 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4671 gen_helper_exit_atomic(cpu_env
);
4672 return DISAS_NORETURN
;
4677 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4679 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4680 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4682 gen_helper_srst(cpu_env
, r1
, r2
);
4684 tcg_temp_free_i32(r1
);
4685 tcg_temp_free_i32(r2
);
4690 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4692 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4693 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4695 gen_helper_srstu(cpu_env
, r1
, r2
);
4697 tcg_temp_free_i32(r1
);
4698 tcg_temp_free_i32(r2
);
4703 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4705 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4709 static DisasJumpType
op_subu64(DisasContext
*s
, DisasOps
*o
)
4711 tcg_gen_movi_i64(cc_src
, 0);
4712 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
4716 /* Compute borrow (0, -1) into cc_src. */
4717 static void compute_borrow(DisasContext
*s
)
4721 /* The borrow value is already in cc_src (0,-1). */
4727 /* The carry flag is the msb of CC; compute into cc_src. */
4728 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
4729 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
4732 /* Convert carry (1,0) to borrow (0,-1). */
4733 tcg_gen_subi_i64(cc_src
, cc_src
, 1);
4738 static DisasJumpType
op_subb32(DisasContext
*s
, DisasOps
*o
)
4742 /* Borrow is {0, -1}, so add to subtract. */
4743 tcg_gen_add_i64(o
->out
, o
->in1
, cc_src
);
4744 tcg_gen_sub_i64(o
->out
, o
->out
, o
->in2
);
4748 static DisasJumpType
op_subb64(DisasContext
*s
, DisasOps
*o
)
4753 * Borrow is {0, -1}, so add to subtract; replicate the
4754 * borrow input to produce 128-bit -1 for the addition.
4756 TCGv_i64 zero
= tcg_const_i64(0);
4757 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, cc_src
);
4758 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
4759 tcg_temp_free_i64(zero
);
4764 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4771 t
= tcg_const_i32(get_field(s
, i1
) & 0xff);
4772 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4773 tcg_temp_free_i32(t
);
4775 t
= tcg_const_i32(s
->ilen
);
4776 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4777 tcg_temp_free_i32(t
);
4779 gen_exception(EXCP_SVC
);
4780 return DISAS_NORETURN
;
4783 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4787 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4788 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4789 gen_op_movi_cc(s
, cc
);
4793 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4795 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4800 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4802 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4807 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4809 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4814 #ifndef CONFIG_USER_ONLY
4816 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4818 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4823 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4825 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4832 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4834 TCGv_i32 l1
= tcg_const_i32(get_field(s
, l1
) + 1);
4835 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4836 tcg_temp_free_i32(l1
);
4841 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4843 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4844 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4845 tcg_temp_free_i32(l
);
4850 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4852 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4853 return_low128(o
->out2
);
4858 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4860 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4861 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4862 tcg_temp_free_i32(l
);
4867 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4869 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4870 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4871 tcg_temp_free_i32(l
);
4876 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4878 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4879 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4880 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4881 TCGv_i32 tst
= tcg_temp_new_i32();
4882 int m3
= get_field(s
, m3
);
4884 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4888 tcg_gen_movi_i32(tst
, -1);
4890 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4891 if (s
->insn
->opc
& 3) {
4892 tcg_gen_ext8u_i32(tst
, tst
);
4894 tcg_gen_ext16u_i32(tst
, tst
);
4897 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4899 tcg_temp_free_i32(r1
);
4900 tcg_temp_free_i32(r2
);
4901 tcg_temp_free_i32(sizes
);
4902 tcg_temp_free_i32(tst
);
4907 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4909 TCGv_i32 t1
= tcg_const_i32(0xff);
4910 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4911 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4912 tcg_temp_free_i32(t1
);
4917 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4919 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4920 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4921 tcg_temp_free_i32(l
);
4925 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4927 int l1
= get_field(s
, l1
) + 1;
4930 /* The length must not exceed 32 bytes. */
4932 gen_program_exception(s
, PGM_SPECIFICATION
);
4933 return DISAS_NORETURN
;
4935 l
= tcg_const_i32(l1
);
4936 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4937 tcg_temp_free_i32(l
);
4942 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4944 int l1
= get_field(s
, l1
) + 1;
4947 /* The length must be even and should not exceed 64 bytes. */
4948 if ((l1
& 1) || (l1
> 64)) {
4949 gen_program_exception(s
, PGM_SPECIFICATION
);
4950 return DISAS_NORETURN
;
4952 l
= tcg_const_i32(l1
);
4953 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4954 tcg_temp_free_i32(l
);
4960 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4962 int d1
= get_field(s
, d1
);
4963 int d2
= get_field(s
, d2
);
4964 int b1
= get_field(s
, b1
);
4965 int b2
= get_field(s
, b2
);
4966 int l
= get_field(s
, l1
);
4969 o
->addr1
= get_address(s
, 0, b1
, d1
);
4971 /* If the addresses are identical, this is a store/memset of zero. */
4972 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4973 o
->in2
= tcg_const_i64(0);
4977 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4980 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4984 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4987 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4991 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4994 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4998 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
5000 gen_op_movi_cc(s
, 0);
5004 /* But in general we'll defer to a helper. */
5005 o
->in2
= get_address(s
, 0, b2
, d2
);
5006 t32
= tcg_const_i32(l
);
5007 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
5008 tcg_temp_free_i32(t32
);
5013 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
5015 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5019 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
5021 int shift
= s
->insn
->data
& 0xff;
5022 int size
= s
->insn
->data
>> 8;
5023 uint64_t mask
= ((1ull << size
) - 1) << shift
;
5026 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
5027 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5029 /* Produce the CC from only the bits manipulated. */
5030 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
5031 set_cc_nz_u64(s
, cc_dst
);
5035 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
5037 o
->in1
= tcg_temp_new_i64();
5039 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5040 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5042 /* Perform the atomic operation in memory. */
5043 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
5047 /* Recompute also for atomic case: needed for setting CC. */
5048 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5050 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5051 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5056 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
5058 o
->out
= tcg_const_i64(0);
5062 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
5064 o
->out
= tcg_const_i64(0);
5070 #ifndef CONFIG_USER_ONLY
5071 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
5073 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5075 gen_helper_clp(cpu_env
, r2
);
5076 tcg_temp_free_i32(r2
);
5081 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
5083 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5084 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5086 gen_helper_pcilg(cpu_env
, r1
, r2
);
5087 tcg_temp_free_i32(r1
);
5088 tcg_temp_free_i32(r2
);
5093 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
5095 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5096 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5098 gen_helper_pcistg(cpu_env
, r1
, r2
);
5099 tcg_temp_free_i32(r1
);
5100 tcg_temp_free_i32(r2
);
5105 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
5107 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5108 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5110 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5111 tcg_temp_free_i32(ar
);
5112 tcg_temp_free_i32(r1
);
5117 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
5119 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
5123 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
5125 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5126 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5128 gen_helper_rpcit(cpu_env
, r1
, r2
);
5129 tcg_temp_free_i32(r1
);
5130 tcg_temp_free_i32(r2
);
5135 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
5137 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5138 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
5139 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5141 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
5142 tcg_temp_free_i32(ar
);
5143 tcg_temp_free_i32(r1
);
5144 tcg_temp_free_i32(r3
);
5149 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
5151 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5152 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5154 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5155 tcg_temp_free_i32(ar
);
5156 tcg_temp_free_i32(r1
);
5162 #include "translate_vx.c.inc"
5164 /* ====================================================================== */
5165 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5166 the original inputs), update the various cc data structures in order to
5167 be able to compute the new condition code. */
5169 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
5171 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
5174 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
5176 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
5179 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
5181 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
5184 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
5186 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
5189 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
5191 tcg_gen_shri_i64(cc_src
, o
->out
, 32);
5192 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5193 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, cc_dst
);
5196 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
5198 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, o
->out
);
5201 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
5203 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
5206 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
5208 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
5211 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
5213 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
5216 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
5218 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
5221 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
5223 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
5226 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
5228 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
5231 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
5233 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
5236 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
5238 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
5241 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
5243 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
5246 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
5248 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
5251 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
5253 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
5256 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
5258 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5259 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
5262 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5264 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5267 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5269 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5272 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5274 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5277 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5279 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5282 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5284 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5287 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5289 tcg_gen_sari_i64(cc_src
, o
->out
, 32);
5290 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5291 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, cc_dst
);
5294 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5296 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, o
->out
);
5299 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5301 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5304 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5306 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5309 static void cout_muls32(DisasContext
*s
, DisasOps
*o
)
5311 gen_op_update1_cc_i64(s
, CC_OP_MULS_32
, o
->out
);
5314 static void cout_muls64(DisasContext
*s
, DisasOps
*o
)
5316 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5317 gen_op_update2_cc_i64(s
, CC_OP_MULS_64
, o
->out
, o
->out2
);
5320 /* ====================================================================== */
5321 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5322 with the TCG register to which we will write. Used in combination with
5323 the "wout" generators, in some cases we need a new temporary, and in
5324 some cases we can write to a TCG global. */
5326 static void prep_new(DisasContext
*s
, DisasOps
*o
)
5328 o
->out
= tcg_temp_new_i64();
5330 #define SPEC_prep_new 0
5332 static void prep_new_P(DisasContext
*s
, DisasOps
*o
)
5334 o
->out
= tcg_temp_new_i64();
5335 o
->out2
= tcg_temp_new_i64();
5337 #define SPEC_prep_new_P 0
5339 static void prep_r1(DisasContext
*s
, DisasOps
*o
)
5341 o
->out
= regs
[get_field(s
, r1
)];
5344 #define SPEC_prep_r1 0
5346 static void prep_r1_P(DisasContext
*s
, DisasOps
*o
)
5348 int r1
= get_field(s
, r1
);
5350 o
->out2
= regs
[r1
+ 1];
5351 o
->g_out
= o
->g_out2
= true;
5353 #define SPEC_prep_r1_P SPEC_r1_even
5355 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5356 static void prep_x1(DisasContext
*s
, DisasOps
*o
)
5358 o
->out
= load_freg(get_field(s
, r1
));
5359 o
->out2
= load_freg(get_field(s
, r1
) + 2);
5361 #define SPEC_prep_x1 SPEC_r1_f128
5363 /* ====================================================================== */
5364 /* The "Write OUTput" generators. These generally perform some non-trivial
5365 copy of data to TCG globals, or to main memory. The trivial cases are
5366 generally handled by having a "prep" generator install the TCG global
5367 as the destination of the operation. */
5369 static void wout_r1(DisasContext
*s
, DisasOps
*o
)
5371 store_reg(get_field(s
, r1
), o
->out
);
5373 #define SPEC_wout_r1 0
5375 static void wout_out2_r1(DisasContext
*s
, DisasOps
*o
)
5377 store_reg(get_field(s
, r1
), o
->out2
);
5379 #define SPEC_wout_out2_r1 0
5381 static void wout_r1_8(DisasContext
*s
, DisasOps
*o
)
5383 int r1
= get_field(s
, r1
);
5384 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5386 #define SPEC_wout_r1_8 0
5388 static void wout_r1_16(DisasContext
*s
, DisasOps
*o
)
5390 int r1
= get_field(s
, r1
);
5391 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5393 #define SPEC_wout_r1_16 0
5395 static void wout_r1_32(DisasContext
*s
, DisasOps
*o
)
5397 store_reg32_i64(get_field(s
, r1
), o
->out
);
5399 #define SPEC_wout_r1_32 0
5401 static void wout_r1_32h(DisasContext
*s
, DisasOps
*o
)
5403 store_reg32h_i64(get_field(s
, r1
), o
->out
);
5405 #define SPEC_wout_r1_32h 0
5407 static void wout_r1_P32(DisasContext
*s
, DisasOps
*o
)
5409 int r1
= get_field(s
, r1
);
5410 store_reg32_i64(r1
, o
->out
);
5411 store_reg32_i64(r1
+ 1, o
->out2
);
5413 #define SPEC_wout_r1_P32 SPEC_r1_even
5415 static void wout_r1_D32(DisasContext
*s
, DisasOps
*o
)
5417 int r1
= get_field(s
, r1
);
5418 TCGv_i64 t
= tcg_temp_new_i64();
5419 store_reg32_i64(r1
+ 1, o
->out
);
5420 tcg_gen_shri_i64(t
, o
->out
, 32);
5421 store_reg32_i64(r1
, t
);
5422 tcg_temp_free_i64(t
);
5424 #define SPEC_wout_r1_D32 SPEC_r1_even
5426 static void wout_r3_P32(DisasContext
*s
, DisasOps
*o
)
5428 int r3
= get_field(s
, r3
);
5429 store_reg32_i64(r3
, o
->out
);
5430 store_reg32_i64(r3
+ 1, o
->out2
);
5432 #define SPEC_wout_r3_P32 SPEC_r3_even
5434 static void wout_r3_P64(DisasContext
*s
, DisasOps
*o
)
5436 int r3
= get_field(s
, r3
);
5437 store_reg(r3
, o
->out
);
5438 store_reg(r3
+ 1, o
->out2
);
5440 #define SPEC_wout_r3_P64 SPEC_r3_even
5442 static void wout_e1(DisasContext
*s
, DisasOps
*o
)
5444 store_freg32_i64(get_field(s
, r1
), o
->out
);
5446 #define SPEC_wout_e1 0
5448 static void wout_f1(DisasContext
*s
, DisasOps
*o
)
5450 store_freg(get_field(s
, r1
), o
->out
);
5452 #define SPEC_wout_f1 0
5454 static void wout_x1(DisasContext
*s
, DisasOps
*o
)
5456 int f1
= get_field(s
, r1
);
5457 store_freg(f1
, o
->out
);
5458 store_freg(f1
+ 2, o
->out2
);
5460 #define SPEC_wout_x1 SPEC_r1_f128
5462 static void wout_cond_r1r2_32(DisasContext
*s
, DisasOps
*o
)
5464 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5465 store_reg32_i64(get_field(s
, r1
), o
->out
);
5468 #define SPEC_wout_cond_r1r2_32 0
5470 static void wout_cond_e1e2(DisasContext
*s
, DisasOps
*o
)
5472 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5473 store_freg32_i64(get_field(s
, r1
), o
->out
);
5476 #define SPEC_wout_cond_e1e2 0
5478 static void wout_m1_8(DisasContext
*s
, DisasOps
*o
)
5480 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5482 #define SPEC_wout_m1_8 0
5484 static void wout_m1_16(DisasContext
*s
, DisasOps
*o
)
5486 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5488 #define SPEC_wout_m1_16 0
5490 #ifndef CONFIG_USER_ONLY
5491 static void wout_m1_16a(DisasContext
*s
, DisasOps
*o
)
5493 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5495 #define SPEC_wout_m1_16a 0
5498 static void wout_m1_32(DisasContext
*s
, DisasOps
*o
)
5500 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5502 #define SPEC_wout_m1_32 0
5504 #ifndef CONFIG_USER_ONLY
5505 static void wout_m1_32a(DisasContext
*s
, DisasOps
*o
)
5507 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5509 #define SPEC_wout_m1_32a 0
5512 static void wout_m1_64(DisasContext
*s
, DisasOps
*o
)
5514 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5516 #define SPEC_wout_m1_64 0
5518 #ifndef CONFIG_USER_ONLY
5519 static void wout_m1_64a(DisasContext
*s
, DisasOps
*o
)
5521 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUQ
| MO_ALIGN
);
5523 #define SPEC_wout_m1_64a 0
5526 static void wout_m2_32(DisasContext
*s
, DisasOps
*o
)
5528 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5530 #define SPEC_wout_m2_32 0
5532 static void wout_in2_r1(DisasContext
*s
, DisasOps
*o
)
5534 store_reg(get_field(s
, r1
), o
->in2
);
5536 #define SPEC_wout_in2_r1 0
5538 static void wout_in2_r1_32(DisasContext
*s
, DisasOps
*o
)
5540 store_reg32_i64(get_field(s
, r1
), o
->in2
);
5542 #define SPEC_wout_in2_r1_32 0
5544 /* ====================================================================== */
5545 /* The "INput 1" generators. These load the first operand to an insn. */
5547 static void in1_r1(DisasContext
*s
, DisasOps
*o
)
5549 o
->in1
= load_reg(get_field(s
, r1
));
5551 #define SPEC_in1_r1 0
5553 static void in1_r1_o(DisasContext
*s
, DisasOps
*o
)
5555 o
->in1
= regs
[get_field(s
, r1
)];
5558 #define SPEC_in1_r1_o 0
5560 static void in1_r1_32s(DisasContext
*s
, DisasOps
*o
)
5562 o
->in1
= tcg_temp_new_i64();
5563 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5565 #define SPEC_in1_r1_32s 0
5567 static void in1_r1_32u(DisasContext
*s
, DisasOps
*o
)
5569 o
->in1
= tcg_temp_new_i64();
5570 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5572 #define SPEC_in1_r1_32u 0
5574 static void in1_r1_sr32(DisasContext
*s
, DisasOps
*o
)
5576 o
->in1
= tcg_temp_new_i64();
5577 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r1
)], 32);
5579 #define SPEC_in1_r1_sr32 0
5581 static void in1_r1p1(DisasContext
*s
, DisasOps
*o
)
5583 o
->in1
= load_reg(get_field(s
, r1
) + 1);
5585 #define SPEC_in1_r1p1 SPEC_r1_even
5587 static void in1_r1p1_o(DisasContext
*s
, DisasOps
*o
)
5589 o
->in1
= regs
[get_field(s
, r1
) + 1];
5592 #define SPEC_in1_r1p1_o SPEC_r1_even
5594 static void in1_r1p1_32s(DisasContext
*s
, DisasOps
*o
)
5596 o
->in1
= tcg_temp_new_i64();
5597 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5599 #define SPEC_in1_r1p1_32s SPEC_r1_even
5601 static void in1_r1p1_32u(DisasContext
*s
, DisasOps
*o
)
5603 o
->in1
= tcg_temp_new_i64();
5604 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5606 #define SPEC_in1_r1p1_32u SPEC_r1_even
5608 static void in1_r1_D32(DisasContext
*s
, DisasOps
*o
)
5610 int r1
= get_field(s
, r1
);
5611 o
->in1
= tcg_temp_new_i64();
5612 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5614 #define SPEC_in1_r1_D32 SPEC_r1_even
5616 static void in1_r2(DisasContext
*s
, DisasOps
*o
)
5618 o
->in1
= load_reg(get_field(s
, r2
));
5620 #define SPEC_in1_r2 0
5622 static void in1_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5624 o
->in1
= tcg_temp_new_i64();
5625 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r2
)], 32);
5627 #define SPEC_in1_r2_sr32 0
5629 static void in1_r2_32u(DisasContext
*s
, DisasOps
*o
)
5631 o
->in1
= tcg_temp_new_i64();
5632 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r2
)]);
5634 #define SPEC_in1_r2_32u 0
5636 static void in1_r3(DisasContext
*s
, DisasOps
*o
)
5638 o
->in1
= load_reg(get_field(s
, r3
));
5640 #define SPEC_in1_r3 0
5642 static void in1_r3_o(DisasContext
*s
, DisasOps
*o
)
5644 o
->in1
= regs
[get_field(s
, r3
)];
5647 #define SPEC_in1_r3_o 0
5649 static void in1_r3_32s(DisasContext
*s
, DisasOps
*o
)
5651 o
->in1
= tcg_temp_new_i64();
5652 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5654 #define SPEC_in1_r3_32s 0
5656 static void in1_r3_32u(DisasContext
*s
, DisasOps
*o
)
5658 o
->in1
= tcg_temp_new_i64();
5659 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5661 #define SPEC_in1_r3_32u 0
5663 static void in1_r3_D32(DisasContext
*s
, DisasOps
*o
)
5665 int r3
= get_field(s
, r3
);
5666 o
->in1
= tcg_temp_new_i64();
5667 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5669 #define SPEC_in1_r3_D32 SPEC_r3_even
5671 static void in1_e1(DisasContext
*s
, DisasOps
*o
)
5673 o
->in1
= load_freg32_i64(get_field(s
, r1
));
5675 #define SPEC_in1_e1 0
5677 static void in1_f1(DisasContext
*s
, DisasOps
*o
)
5679 o
->in1
= load_freg(get_field(s
, r1
));
5681 #define SPEC_in1_f1 0
5683 /* Load the high double word of an extended (128-bit) format FP number */
5684 static void in1_x2h(DisasContext
*s
, DisasOps
*o
)
5686 o
->in1
= load_freg(get_field(s
, r2
));
5688 #define SPEC_in1_x2h SPEC_r2_f128
5690 static void in1_f3(DisasContext
*s
, DisasOps
*o
)
5692 o
->in1
= load_freg(get_field(s
, r3
));
5694 #define SPEC_in1_f3 0
5696 static void in1_la1(DisasContext
*s
, DisasOps
*o
)
5698 o
->addr1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
5700 #define SPEC_in1_la1 0
5702 static void in1_la2(DisasContext
*s
, DisasOps
*o
)
5704 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5705 o
->addr1
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5707 #define SPEC_in1_la2 0
5709 static void in1_m1_8u(DisasContext
*s
, DisasOps
*o
)
5712 o
->in1
= tcg_temp_new_i64();
5713 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5715 #define SPEC_in1_m1_8u 0
5717 static void in1_m1_16s(DisasContext
*s
, DisasOps
*o
)
5720 o
->in1
= tcg_temp_new_i64();
5721 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5723 #define SPEC_in1_m1_16s 0
5725 static void in1_m1_16u(DisasContext
*s
, DisasOps
*o
)
5728 o
->in1
= tcg_temp_new_i64();
5729 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5731 #define SPEC_in1_m1_16u 0
5733 static void in1_m1_32s(DisasContext
*s
, DisasOps
*o
)
5736 o
->in1
= tcg_temp_new_i64();
5737 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5739 #define SPEC_in1_m1_32s 0
5741 static void in1_m1_32u(DisasContext
*s
, DisasOps
*o
)
5744 o
->in1
= tcg_temp_new_i64();
5745 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5747 #define SPEC_in1_m1_32u 0
5749 static void in1_m1_64(DisasContext
*s
, DisasOps
*o
)
5752 o
->in1
= tcg_temp_new_i64();
5753 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5755 #define SPEC_in1_m1_64 0
5757 /* ====================================================================== */
5758 /* The "INput 2" generators. These load the second operand to an insn. */
5760 static void in2_r1_o(DisasContext
*s
, DisasOps
*o
)
5762 o
->in2
= regs
[get_field(s
, r1
)];
5765 #define SPEC_in2_r1_o 0
5767 static void in2_r1_16u(DisasContext
*s
, DisasOps
*o
)
5769 o
->in2
= tcg_temp_new_i64();
5770 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5772 #define SPEC_in2_r1_16u 0
5774 static void in2_r1_32u(DisasContext
*s
, DisasOps
*o
)
5776 o
->in2
= tcg_temp_new_i64();
5777 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5779 #define SPEC_in2_r1_32u 0
5781 static void in2_r1_D32(DisasContext
*s
, DisasOps
*o
)
5783 int r1
= get_field(s
, r1
);
5784 o
->in2
= tcg_temp_new_i64();
5785 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5787 #define SPEC_in2_r1_D32 SPEC_r1_even
5789 static void in2_r2(DisasContext
*s
, DisasOps
*o
)
5791 o
->in2
= load_reg(get_field(s
, r2
));
5793 #define SPEC_in2_r2 0
5795 static void in2_r2_o(DisasContext
*s
, DisasOps
*o
)
5797 o
->in2
= regs
[get_field(s
, r2
)];
5800 #define SPEC_in2_r2_o 0
5802 static void in2_r2_nz(DisasContext
*s
, DisasOps
*o
)
5804 int r2
= get_field(s
, r2
);
5806 o
->in2
= load_reg(r2
);
5809 #define SPEC_in2_r2_nz 0
5811 static void in2_r2_8s(DisasContext
*s
, DisasOps
*o
)
5813 o
->in2
= tcg_temp_new_i64();
5814 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5816 #define SPEC_in2_r2_8s 0
5818 static void in2_r2_8u(DisasContext
*s
, DisasOps
*o
)
5820 o
->in2
= tcg_temp_new_i64();
5821 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5823 #define SPEC_in2_r2_8u 0
5825 static void in2_r2_16s(DisasContext
*s
, DisasOps
*o
)
5827 o
->in2
= tcg_temp_new_i64();
5828 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5830 #define SPEC_in2_r2_16s 0
5832 static void in2_r2_16u(DisasContext
*s
, DisasOps
*o
)
5834 o
->in2
= tcg_temp_new_i64();
5835 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5837 #define SPEC_in2_r2_16u 0
5839 static void in2_r3(DisasContext
*s
, DisasOps
*o
)
5841 o
->in2
= load_reg(get_field(s
, r3
));
5843 #define SPEC_in2_r3 0
5845 static void in2_r3_sr32(DisasContext
*s
, DisasOps
*o
)
5847 o
->in2
= tcg_temp_new_i64();
5848 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r3
)], 32);
5850 #define SPEC_in2_r3_sr32 0
5852 static void in2_r3_32u(DisasContext
*s
, DisasOps
*o
)
5854 o
->in2
= tcg_temp_new_i64();
5855 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r3
)]);
5857 #define SPEC_in2_r3_32u 0
5859 static void in2_r2_32s(DisasContext
*s
, DisasOps
*o
)
5861 o
->in2
= tcg_temp_new_i64();
5862 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5864 #define SPEC_in2_r2_32s 0
5866 static void in2_r2_32u(DisasContext
*s
, DisasOps
*o
)
5868 o
->in2
= tcg_temp_new_i64();
5869 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5871 #define SPEC_in2_r2_32u 0
5873 static void in2_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5875 o
->in2
= tcg_temp_new_i64();
5876 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r2
)], 32);
5878 #define SPEC_in2_r2_sr32 0
5880 static void in2_e2(DisasContext
*s
, DisasOps
*o
)
5882 o
->in2
= load_freg32_i64(get_field(s
, r2
));
5884 #define SPEC_in2_e2 0
5886 static void in2_f2(DisasContext
*s
, DisasOps
*o
)
5888 o
->in2
= load_freg(get_field(s
, r2
));
5890 #define SPEC_in2_f2 0
5892 /* Load the low double word of an extended (128-bit) format FP number */
5893 static void in2_x2l(DisasContext
*s
, DisasOps
*o
)
5895 o
->in2
= load_freg(get_field(s
, r2
) + 2);
5897 #define SPEC_in2_x2l SPEC_r2_f128
5899 static void in2_ra2(DisasContext
*s
, DisasOps
*o
)
5901 int r2
= get_field(s
, r2
);
5903 /* Note: *don't* treat !r2 as 0, use the reg value. */
5904 o
->in2
= tcg_temp_new_i64();
5905 gen_addi_and_wrap_i64(s
, o
->in2
, regs
[r2
], 0);
5907 #define SPEC_in2_ra2 0
5909 static void in2_a2(DisasContext
*s
, DisasOps
*o
)
5911 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5912 o
->in2
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5914 #define SPEC_in2_a2 0
5916 static void in2_ri2(DisasContext
*s
, DisasOps
*o
)
5918 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(s
, i2
) * 2);
5920 #define SPEC_in2_ri2 0
5922 static void in2_sh(DisasContext
*s
, DisasOps
*o
)
5924 int b2
= get_field(s
, b2
);
5925 int d2
= get_field(s
, d2
);
5928 o
->in2
= tcg_const_i64(d2
& 0x3f);
5930 o
->in2
= get_address(s
, 0, b2
, d2
);
5931 tcg_gen_andi_i64(o
->in2
, o
->in2
, 0x3f);
5934 #define SPEC_in2_sh 0
5936 static void in2_m2_8u(DisasContext
*s
, DisasOps
*o
)
5939 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5941 #define SPEC_in2_m2_8u 0
5943 static void in2_m2_16s(DisasContext
*s
, DisasOps
*o
)
5946 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5948 #define SPEC_in2_m2_16s 0
5950 static void in2_m2_16u(DisasContext
*s
, DisasOps
*o
)
5953 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5955 #define SPEC_in2_m2_16u 0
5957 static void in2_m2_32s(DisasContext
*s
, DisasOps
*o
)
5960 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5962 #define SPEC_in2_m2_32s 0
5964 static void in2_m2_32u(DisasContext
*s
, DisasOps
*o
)
5967 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5969 #define SPEC_in2_m2_32u 0
5971 #ifndef CONFIG_USER_ONLY
5972 static void in2_m2_32ua(DisasContext
*s
, DisasOps
*o
)
5975 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5977 #define SPEC_in2_m2_32ua 0
5980 static void in2_m2_64(DisasContext
*s
, DisasOps
*o
)
5983 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5985 #define SPEC_in2_m2_64 0
5987 static void in2_m2_64w(DisasContext
*s
, DisasOps
*o
)
5990 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5991 gen_addi_and_wrap_i64(s
, o
->in2
, o
->in2
, 0);
5993 #define SPEC_in2_m2_64w 0
5995 #ifndef CONFIG_USER_ONLY
5996 static void in2_m2_64a(DisasContext
*s
, DisasOps
*o
)
5999 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUQ
| MO_ALIGN
);
6001 #define SPEC_in2_m2_64a 0
6004 static void in2_mri2_16u(DisasContext
*s
, DisasOps
*o
)
6007 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
6009 #define SPEC_in2_mri2_16u 0
6011 static void in2_mri2_32s(DisasContext
*s
, DisasOps
*o
)
6014 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
6016 #define SPEC_in2_mri2_32s 0
6018 static void in2_mri2_32u(DisasContext
*s
, DisasOps
*o
)
6021 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
6023 #define SPEC_in2_mri2_32u 0
6025 static void in2_mri2_64(DisasContext
*s
, DisasOps
*o
)
6028 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6030 #define SPEC_in2_mri2_64 0
6032 static void in2_i2(DisasContext
*s
, DisasOps
*o
)
6034 o
->in2
= tcg_const_i64(get_field(s
, i2
));
6036 #define SPEC_in2_i2 0
6038 static void in2_i2_8u(DisasContext
*s
, DisasOps
*o
)
6040 o
->in2
= tcg_const_i64((uint8_t)get_field(s
, i2
));
6042 #define SPEC_in2_i2_8u 0
6044 static void in2_i2_16u(DisasContext
*s
, DisasOps
*o
)
6046 o
->in2
= tcg_const_i64((uint16_t)get_field(s
, i2
));
6048 #define SPEC_in2_i2_16u 0
6050 static void in2_i2_32u(DisasContext
*s
, DisasOps
*o
)
6052 o
->in2
= tcg_const_i64((uint32_t)get_field(s
, i2
));
6054 #define SPEC_in2_i2_32u 0
6056 static void in2_i2_16u_shl(DisasContext
*s
, DisasOps
*o
)
6058 uint64_t i2
= (uint16_t)get_field(s
, i2
);
6059 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
6061 #define SPEC_in2_i2_16u_shl 0
6063 static void in2_i2_32u_shl(DisasContext
*s
, DisasOps
*o
)
6065 uint64_t i2
= (uint32_t)get_field(s
, i2
);
6066 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
6068 #define SPEC_in2_i2_32u_shl 0
6070 #ifndef CONFIG_USER_ONLY
6071 static void in2_insn(DisasContext
*s
, DisasOps
*o
)
6073 o
->in2
= tcg_const_i64(s
->fields
.raw_insn
);
6075 #define SPEC_in2_insn 0
6078 /* ====================================================================== */
6080 /* Find opc within the table of insns. This is formulated as a switch
6081 statement so that (1) we get compile-time notice of cut-paste errors
6082 for duplicated opcodes, and (2) the compiler generates the binary
6083 search tree, rather than us having to post-process the table. */
6085 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6086 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6088 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6089 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6091 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6092 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6094 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6096 enum DisasInsnEnum
{
6097 #include "insn-data.def"
6101 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6106 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6108 .help_in1 = in1_##I1, \
6109 .help_in2 = in2_##I2, \
6110 .help_prep = prep_##P, \
6111 .help_wout = wout_##W, \
6112 .help_cout = cout_##CC, \
6113 .help_op = op_##OP, \
6117 /* Allow 0 to be used for NULL in the table below. */
6125 #define SPEC_in1_0 0
6126 #define SPEC_in2_0 0
6127 #define SPEC_prep_0 0
6128 #define SPEC_wout_0 0
6130 /* Give smaller names to the various facilities. */
6131 #define FAC_Z S390_FEAT_ZARCH
6132 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6133 #define FAC_DFP S390_FEAT_DFP
6134 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6135 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6136 #define FAC_EE S390_FEAT_EXECUTE_EXT
6137 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6138 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6139 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6140 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6141 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6142 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6143 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6144 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6145 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6146 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6147 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6148 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6149 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6150 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6151 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6152 #define FAC_SFLE S390_FEAT_STFLE
6153 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6154 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6155 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6156 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6157 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6158 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6159 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6160 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6161 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6162 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6163 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6164 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6165 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6166 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6167 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6168 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6169 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6170 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6171 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6172 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6174 static const DisasInsn insn_info
[] = {
6175 #include "insn-data.def"
6179 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6180 case OPC: return &insn_info[insn_ ## NM];
6182 static const DisasInsn
*lookup_opc(uint16_t opc
)
6185 #include "insn-data.def"
6196 /* Extract a field from the insn. The INSN should be left-aligned in
6197 the uint64_t so that we can more easily utilize the big-bit-endian
6198 definitions we extract from the Principals of Operation. */
6200 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
6208 /* Zero extract the field from the insn. */
6209 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
6211 /* Sign-extend, or un-swap the field as necessary. */
6213 case 0: /* unsigned */
6215 case 1: /* signed */
6216 assert(f
->size
<= 32);
6217 m
= 1u << (f
->size
- 1);
6220 case 2: /* dl+dh split, signed 20 bit. */
6221 r
= ((int8_t)r
<< 12) | (r
>> 8);
6223 case 3: /* MSB stored in RXB */
6224 g_assert(f
->size
== 4);
6227 r
|= extract64(insn
, 63 - 36, 1) << 4;
6230 r
|= extract64(insn
, 63 - 37, 1) << 4;
6233 r
|= extract64(insn
, 63 - 38, 1) << 4;
6236 r
|= extract64(insn
, 63 - 39, 1) << 4;
6239 g_assert_not_reached();
6247 * Validate that the "compressed" encoding we selected above is valid.
6248 * I.e. we haven't made two different original fields overlap.
6250 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
6251 o
->presentC
|= 1 << f
->indexC
;
6252 o
->presentO
|= 1 << f
->indexO
;
6254 o
->c
[f
->indexC
] = r
;
6257 /* Lookup the insn at the current PC, extracting the operands into O and
6258 returning the info struct for the insn. Returns NULL for invalid insn. */
6260 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
)
6262 uint64_t insn
, pc
= s
->base
.pc_next
;
6264 const DisasInsn
*info
;
6266 if (unlikely(s
->ex_value
)) {
6267 /* Drop the EX data now, so that it's clear on exception paths. */
6268 TCGv_i64 zero
= tcg_const_i64(0);
6269 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
6270 tcg_temp_free_i64(zero
);
6272 /* Extract the values saved by EXECUTE. */
6273 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
6274 ilen
= s
->ex_value
& 0xf;
6277 insn
= ld_code2(env
, s
, pc
);
6278 op
= (insn
>> 8) & 0xff;
6279 ilen
= get_ilen(op
);
6285 insn
= ld_code4(env
, s
, pc
) << 32;
6288 insn
= (insn
<< 48) | (ld_code4(env
, s
, pc
+ 2) << 16);
6291 g_assert_not_reached();
6294 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
6297 /* We can't actually determine the insn format until we've looked up
6298 the full insn opcode. Which we can't do without locating the
6299 secondary opcode. Assume by default that OP2 is at bit 40; for
6300 those smaller insns that don't actually have a secondary opcode
6301 this will correctly result in OP2 = 0. */
6307 case 0xb2: /* S, RRF, RRE, IE */
6308 case 0xb3: /* RRE, RRD, RRF */
6309 case 0xb9: /* RRE, RRF */
6310 case 0xe5: /* SSE, SIL */
6311 op2
= (insn
<< 8) >> 56;
6315 case 0xc0: /* RIL */
6316 case 0xc2: /* RIL */
6317 case 0xc4: /* RIL */
6318 case 0xc6: /* RIL */
6319 case 0xc8: /* SSF */
6320 case 0xcc: /* RIL */
6321 op2
= (insn
<< 12) >> 60;
6323 case 0xc5: /* MII */
6324 case 0xc7: /* SMI */
6325 case 0xd0 ... 0xdf: /* SS */
6331 case 0xee ... 0xf3: /* SS */
6332 case 0xf8 ... 0xfd: /* SS */
6336 op2
= (insn
<< 40) >> 56;
6340 memset(&s
->fields
, 0, sizeof(s
->fields
));
6341 s
->fields
.raw_insn
= insn
;
6343 s
->fields
.op2
= op2
;
6345 /* Lookup the instruction. */
6346 info
= lookup_opc(op
<< 8 | op2
);
6349 /* If we found it, extract the operands. */
6351 DisasFormat fmt
= info
->fmt
;
6354 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6355 extract_field(&s
->fields
, &format_info
[fmt
].op
[i
], insn
);
6361 static bool is_afp_reg(int reg
)
6363 return reg
% 2 || reg
> 6;
6366 static bool is_fp_pair(int reg
)
6368 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6369 return !(reg
& 0x2);
6372 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6374 const DisasInsn
*insn
;
6375 DisasJumpType ret
= DISAS_NEXT
;
6377 bool icount
= false;
6379 /* Search for the insn in the table. */
6380 insn
= extract_insn(env
, s
);
6382 /* Update insn_start now that we know the ILEN. */
6383 tcg_set_insn_start_param(s
->insn_start
, 2, s
->ilen
);
6385 /* Not found means unimplemented/illegal opcode. */
6387 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6388 s
->fields
.op
, s
->fields
.op2
);
6389 gen_illegal_opcode(s
);
6390 ret
= DISAS_NORETURN
;
6394 #ifndef CONFIG_USER_ONLY
6395 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6396 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6397 gen_helper_per_ifetch(cpu_env
, addr
);
6398 tcg_temp_free_i64(addr
);
6404 /* privileged instruction */
6405 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6406 gen_program_exception(s
, PGM_PRIVILEGED
);
6407 ret
= DISAS_NORETURN
;
6411 /* if AFP is not enabled, instructions and registers are forbidden */
6412 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6415 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(s
, r1
))) {
6418 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(s
, r2
))) {
6421 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(s
, r3
))) {
6424 if (insn
->flags
& IF_BFP
) {
6427 if (insn
->flags
& IF_DFP
) {
6430 if (insn
->flags
& IF_VEC
) {
6434 gen_data_exception(dxc
);
6435 ret
= DISAS_NORETURN
;
6440 /* if vector instructions not enabled, executing them is forbidden */
6441 if (insn
->flags
& IF_VEC
) {
6442 if (!((s
->base
.tb
->flags
& FLAG_MASK_VECTOR
))) {
6443 gen_data_exception(0xfe);
6444 ret
= DISAS_NORETURN
;
6449 /* input/output is the special case for icount mode */
6450 if (unlikely(insn
->flags
& IF_IO
)) {
6451 icount
= tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
;
6458 /* Check for insn specification exceptions. */
6460 if ((insn
->spec
& SPEC_r1_even
&& get_field(s
, r1
) & 1) ||
6461 (insn
->spec
& SPEC_r2_even
&& get_field(s
, r2
) & 1) ||
6462 (insn
->spec
& SPEC_r3_even
&& get_field(s
, r3
) & 1) ||
6463 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(s
, r1
))) ||
6464 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(s
, r2
)))) {
6465 gen_program_exception(s
, PGM_SPECIFICATION
);
6466 ret
= DISAS_NORETURN
;
6471 /* Implement the instruction. */
6472 if (insn
->help_in1
) {
6473 insn
->help_in1(s
, &o
);
6475 if (insn
->help_in2
) {
6476 insn
->help_in2(s
, &o
);
6478 if (insn
->help_prep
) {
6479 insn
->help_prep(s
, &o
);
6481 if (insn
->help_op
) {
6482 ret
= insn
->help_op(s
, &o
);
6484 if (ret
!= DISAS_NORETURN
) {
6485 if (insn
->help_wout
) {
6486 insn
->help_wout(s
, &o
);
6488 if (insn
->help_cout
) {
6489 insn
->help_cout(s
, &o
);
6493 /* Free any temporaries created by the helpers. */
6494 if (o
.out
&& !o
.g_out
) {
6495 tcg_temp_free_i64(o
.out
);
6497 if (o
.out2
&& !o
.g_out2
) {
6498 tcg_temp_free_i64(o
.out2
);
6500 if (o
.in1
&& !o
.g_in1
) {
6501 tcg_temp_free_i64(o
.in1
);
6503 if (o
.in2
&& !o
.g_in2
) {
6504 tcg_temp_free_i64(o
.in2
);
6507 tcg_temp_free_i64(o
.addr1
);
6510 /* io should be the last instruction in tb when icount is enabled */
6511 if (unlikely(icount
&& ret
== DISAS_NEXT
)) {
6512 ret
= DISAS_PC_STALE
;
6515 #ifndef CONFIG_USER_ONLY
6516 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6517 /* An exception might be triggered, save PSW if not already done. */
6518 if (ret
== DISAS_NEXT
|| ret
== DISAS_PC_STALE
) {
6519 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6522 /* Call the helper to check for a possible PER exception. */
6523 gen_helper_per_check_exception(cpu_env
);
6528 /* Advance to the next instruction. */
6529 s
->base
.pc_next
= s
->pc_tmp
;
6533 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6535 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6538 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6539 dc
->base
.pc_first
&= 0x7fffffff;
6540 dc
->base
.pc_next
= dc
->base
.pc_first
;
6543 dc
->cc_op
= CC_OP_DYNAMIC
;
6544 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6547 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6551 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6553 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6555 /* Delay the set of ilen until we've read the insn. */
6556 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
, 0);
6557 dc
->insn_start
= tcg_last_op();
6560 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6562 CPUS390XState
*env
= cs
->env_ptr
;
6563 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6565 dc
->base
.is_jmp
= translate_one(env
, dc
);
6566 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6567 uint64_t page_start
;
6569 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6570 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6571 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6576 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6578 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6580 switch (dc
->base
.is_jmp
) {
6582 case DISAS_NORETURN
:
6584 case DISAS_TOO_MANY
:
6585 case DISAS_PC_STALE
:
6586 case DISAS_PC_STALE_NOCHAIN
:
6587 update_psw_addr(dc
);
6589 case DISAS_PC_UPDATED
:
6590 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6591 cc op type is in env */
6594 case DISAS_PC_CC_UPDATED
:
6595 /* Exit the TB, either by raising a debug exception or by return. */
6596 if ((dc
->base
.tb
->flags
& FLAG_MASK_PER
) ||
6597 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6598 tcg_gen_exit_tb(NULL
, 0);
6600 tcg_gen_lookup_and_goto_ptr();
6604 g_assert_not_reached();
6608 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
6610 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6612 if (unlikely(dc
->ex_value
)) {
6613 /* ??? Unfortunately log_target_disas can't use host memory. */
6614 qemu_log("IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6616 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6617 log_target_disas(cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6621 static const TranslatorOps s390x_tr_ops
= {
6622 .init_disas_context
= s390x_tr_init_disas_context
,
6623 .tb_start
= s390x_tr_tb_start
,
6624 .insn_start
= s390x_tr_insn_start
,
6625 .translate_insn
= s390x_tr_translate_insn
,
6626 .tb_stop
= s390x_tr_tb_stop
,
6627 .disas_log
= s390x_tr_disas_log
,
6630 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
6634 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
, max_insns
);
6637 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6640 int cc_op
= data
[1];
6642 env
->psw
.addr
= data
[0];
6644 /* Update the CC opcode if it is not already up-to-date. */
6645 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {
6650 env
->int_pgm_ilen
= data
[2];