4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 * Define a structure to hold the decoded fields. We'll store each inside
58 * an array indexed by an enum. In order to conserve memory, we'll arrange
59 * for fields that do not exist at the same time to overlap, thus the "C"
60 * for compact. For checking purposes there is an "O" for original index
61 * as well that will be applied to availability bitmaps.
64 enum DisasFieldIndexO
{
93 enum DisasFieldIndexC
{
134 unsigned presentC
:16;
135 unsigned int presentO
;
139 struct DisasContext
{
140 DisasContextBase base
;
141 const DisasInsn
*insn
;
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
155 /* Information carried about a condition to be evaluated. */
162 struct { TCGv_i64 a
, b
; } s64
;
163 struct { TCGv_i32 a
, b
; } s32
;
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit
[CC_OP_MAX
];
169 static uint64_t inline_branch_miss
[CC_OP_MAX
];
172 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
176 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
177 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
178 tcg_gen_movi_i64(out
, pc
);
183 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
184 tmp
= tcg_const_i64(pc
);
185 tcg_gen_deposit_i64(out
, out
, tmp
, 0, 32);
186 tcg_temp_free_i64(tmp
);
189 static TCGv_i64 psw_addr
;
190 static TCGv_i64 psw_mask
;
191 static TCGv_i64 gbea
;
193 static TCGv_i32 cc_op
;
194 static TCGv_i64 cc_src
;
195 static TCGv_i64 cc_dst
;
196 static TCGv_i64 cc_vr
;
198 static char cpu_reg_names
[16][4];
199 static TCGv_i64 regs
[16];
201 void s390x_translate_init(void)
205 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
206 offsetof(CPUS390XState
, psw
.addr
),
208 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
209 offsetof(CPUS390XState
, psw
.mask
),
211 gbea
= tcg_global_mem_new_i64(cpu_env
,
212 offsetof(CPUS390XState
, gbea
),
215 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
217 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
219 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
221 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
224 for (i
= 0; i
< 16; i
++) {
225 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
226 regs
[i
] = tcg_global_mem_new(cpu_env
,
227 offsetof(CPUS390XState
, regs
[i
]),
232 static inline int vec_full_reg_offset(uint8_t reg
)
235 return offsetof(CPUS390XState
, vregs
[reg
][0]);
238 static inline int vec_reg_offset(uint8_t reg
, uint8_t enr
, MemOp es
)
240 /* Convert element size (es) - e.g. MO_8 - to bytes */
241 const uint8_t bytes
= 1 << es
;
242 int offs
= enr
* bytes
;
245 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246 * of the 16 byte vector, on both, little and big endian systems.
248 * Big Endian (target/possible host)
249 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
251 * W: [ 0][ 1] - [ 2][ 3]
254 * Little Endian (possible host)
255 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
257 * W: [ 1][ 0] - [ 3][ 2]
260 * For 16 byte elements, the two 8 byte halves will not form a host
261 * int128 if the host is little endian, since they're in the wrong order.
262 * Some operations (e.g. xor) do not care. For operations like addition,
263 * the two 8 byte elements have to be loaded separately. Let's force all
264 * 16 byte operations to handle it in a special way.
266 g_assert(es
<= MO_64
);
267 #ifndef HOST_WORDS_BIGENDIAN
270 return offs
+ vec_full_reg_offset(reg
);
273 static inline int freg64_offset(uint8_t reg
)
276 return vec_reg_offset(reg
, 0, MO_64
);
279 static inline int freg32_offset(uint8_t reg
)
282 return vec_reg_offset(reg
, 0, MO_32
);
285 static TCGv_i64
load_reg(int reg
)
287 TCGv_i64 r
= tcg_temp_new_i64();
288 tcg_gen_mov_i64(r
, regs
[reg
]);
292 static TCGv_i64
load_freg(int reg
)
294 TCGv_i64 r
= tcg_temp_new_i64();
296 tcg_gen_ld_i64(r
, cpu_env
, freg64_offset(reg
));
300 static TCGv_i64
load_freg32_i64(int reg
)
302 TCGv_i64 r
= tcg_temp_new_i64();
304 tcg_gen_ld32u_i64(r
, cpu_env
, freg32_offset(reg
));
308 static void store_reg(int reg
, TCGv_i64 v
)
310 tcg_gen_mov_i64(regs
[reg
], v
);
313 static void store_freg(int reg
, TCGv_i64 v
)
315 tcg_gen_st_i64(v
, cpu_env
, freg64_offset(reg
));
318 static void store_reg32_i64(int reg
, TCGv_i64 v
)
320 /* 32 bit register writes keep the upper half */
321 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
324 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
326 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
329 static void store_freg32_i64(int reg
, TCGv_i64 v
)
331 tcg_gen_st32_i64(v
, cpu_env
, freg32_offset(reg
));
334 static void return_low128(TCGv_i64 dest
)
336 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
339 static void update_psw_addr(DisasContext
*s
)
342 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
345 static void per_branch(DisasContext
*s
, bool to_next
)
347 #ifndef CONFIG_USER_ONLY
348 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
350 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
351 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
352 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
354 tcg_temp_free_i64(next_pc
);
360 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
361 TCGv_i64 arg1
, TCGv_i64 arg2
)
363 #ifndef CONFIG_USER_ONLY
364 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
365 TCGLabel
*lab
= gen_new_label();
366 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
368 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
369 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
373 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
374 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
375 tcg_temp_free_i64(pc
);
380 static void per_breaking_event(DisasContext
*s
)
382 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
385 static void update_cc_op(DisasContext
*s
)
387 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
388 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
392 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
394 return (uint64_t)cpu_lduw_code(env
, pc
);
397 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
399 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
402 static int get_mem_index(DisasContext
*s
)
404 #ifdef CONFIG_USER_ONLY
407 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
411 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
412 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
413 return MMU_PRIMARY_IDX
;
414 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
415 return MMU_SECONDARY_IDX
;
416 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
425 static void gen_exception(int excp
)
427 TCGv_i32 tmp
= tcg_const_i32(excp
);
428 gen_helper_exception(cpu_env
, tmp
);
429 tcg_temp_free_i32(tmp
);
432 static void gen_program_exception(DisasContext
*s
, int code
)
436 /* Remember what pgm exeption this was. */
437 tmp
= tcg_const_i32(code
);
438 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
439 tcg_temp_free_i32(tmp
);
441 tmp
= tcg_const_i32(s
->ilen
);
442 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
443 tcg_temp_free_i32(tmp
);
451 /* Trigger exception. */
452 gen_exception(EXCP_PGM
);
455 static inline void gen_illegal_opcode(DisasContext
*s
)
457 gen_program_exception(s
, PGM_OPERATION
);
460 static inline void gen_data_exception(uint8_t dxc
)
462 TCGv_i32 tmp
= tcg_const_i32(dxc
);
463 gen_helper_data_exception(cpu_env
, tmp
);
464 tcg_temp_free_i32(tmp
);
467 static inline void gen_trap(DisasContext
*s
)
469 /* Set DXC to 0xff */
470 gen_data_exception(0xff);
473 static void gen_addi_and_wrap_i64(DisasContext
*s
, TCGv_i64 dst
, TCGv_i64 src
,
476 tcg_gen_addi_i64(dst
, src
, imm
);
477 if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
478 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
479 tcg_gen_andi_i64(dst
, dst
, 0x7fffffff);
481 tcg_gen_andi_i64(dst
, dst
, 0x00ffffff);
486 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
488 TCGv_i64 tmp
= tcg_temp_new_i64();
491 * Note that d2 is limited to 20 bits, signed. If we crop negative
492 * displacements early we create larger immedate addends.
495 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
496 gen_addi_and_wrap_i64(s
, tmp
, tmp
, d2
);
498 gen_addi_and_wrap_i64(s
, tmp
, regs
[b2
], d2
);
500 gen_addi_and_wrap_i64(s
, tmp
, regs
[x2
], d2
);
501 } else if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
502 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
503 tcg_gen_movi_i64(tmp
, d2
& 0x7fffffff);
505 tcg_gen_movi_i64(tmp
, d2
& 0x00ffffff);
508 tcg_gen_movi_i64(tmp
, d2
);
514 static inline bool live_cc_data(DisasContext
*s
)
516 return (s
->cc_op
!= CC_OP_DYNAMIC
517 && s
->cc_op
!= CC_OP_STATIC
521 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
523 if (live_cc_data(s
)) {
524 tcg_gen_discard_i64(cc_src
);
525 tcg_gen_discard_i64(cc_dst
);
526 tcg_gen_discard_i64(cc_vr
);
528 s
->cc_op
= CC_OP_CONST0
+ val
;
531 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
533 if (live_cc_data(s
)) {
534 tcg_gen_discard_i64(cc_src
);
535 tcg_gen_discard_i64(cc_vr
);
537 tcg_gen_mov_i64(cc_dst
, dst
);
541 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
544 if (live_cc_data(s
)) {
545 tcg_gen_discard_i64(cc_vr
);
547 tcg_gen_mov_i64(cc_src
, src
);
548 tcg_gen_mov_i64(cc_dst
, dst
);
552 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
553 TCGv_i64 dst
, TCGv_i64 vr
)
555 tcg_gen_mov_i64(cc_src
, src
);
556 tcg_gen_mov_i64(cc_dst
, dst
);
557 tcg_gen_mov_i64(cc_vr
, vr
);
561 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
563 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
566 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
568 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
571 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
573 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
576 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
578 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
581 /* CC value is in env->cc_op */
582 static void set_cc_static(DisasContext
*s
)
584 if (live_cc_data(s
)) {
585 tcg_gen_discard_i64(cc_src
);
586 tcg_gen_discard_i64(cc_dst
);
587 tcg_gen_discard_i64(cc_vr
);
589 s
->cc_op
= CC_OP_STATIC
;
592 /* calculates cc into cc_op */
593 static void gen_op_calc_cc(DisasContext
*s
)
595 TCGv_i32 local_cc_op
= NULL
;
596 TCGv_i64 dummy
= NULL
;
600 dummy
= tcg_const_i64(0);
606 local_cc_op
= tcg_const_i32(s
->cc_op
);
622 /* s->cc_op is the cc value */
623 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
626 /* env->cc_op already is the cc value */
643 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
649 case CC_OP_LTUGTU_32
:
650 case CC_OP_LTUGTU_64
:
660 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
667 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
670 /* unknown operation - assume 3 arguments and cc_op in env */
671 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
678 tcg_temp_free_i32(local_cc_op
);
681 tcg_temp_free_i64(dummy
);
684 /* We now have cc in cc_op as constant */
688 static bool use_exit_tb(DisasContext
*s
)
690 return s
->base
.singlestep_enabled
||
691 (tb_cflags(s
->base
.tb
) & CF_LAST_IO
) ||
692 (s
->base
.tb
->flags
& FLAG_MASK_PER
);
695 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
697 if (unlikely(use_exit_tb(s
))) {
700 #ifndef CONFIG_USER_ONLY
701 return (dest
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
702 (dest
& TARGET_PAGE_MASK
) == (s
->base
.pc_next
& TARGET_PAGE_MASK
);
708 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
710 #ifdef DEBUG_INLINE_BRANCHES
711 inline_branch_miss
[cc_op
]++;
715 static void account_inline_branch(DisasContext
*s
, int cc_op
)
717 #ifdef DEBUG_INLINE_BRANCHES
718 inline_branch_hit
[cc_op
]++;
722 /* Table of mask values to comparison codes, given a comparison as input.
723 For such, CC=3 should not be possible. */
724 static const TCGCond ltgt_cond
[16] = {
725 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
726 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
727 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
728 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
729 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
730 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
731 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
732 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
735 /* Table of mask values to comparison codes, given a logic op as input.
736 For such, only CC=0 and CC=1 should be possible. */
737 static const TCGCond nz_cond
[16] = {
738 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
739 TCG_COND_NEVER
, TCG_COND_NEVER
,
740 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
741 TCG_COND_NE
, TCG_COND_NE
,
742 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
743 TCG_COND_EQ
, TCG_COND_EQ
,
744 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
745 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
748 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
749 details required to generate a TCG comparison. */
750 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
753 enum cc_op old_cc_op
= s
->cc_op
;
755 if (mask
== 15 || mask
== 0) {
756 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
759 c
->g1
= c
->g2
= true;
764 /* Find the TCG condition for the mask + cc op. */
770 cond
= ltgt_cond
[mask
];
771 if (cond
== TCG_COND_NEVER
) {
774 account_inline_branch(s
, old_cc_op
);
777 case CC_OP_LTUGTU_32
:
778 case CC_OP_LTUGTU_64
:
779 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
780 if (cond
== TCG_COND_NEVER
) {
783 account_inline_branch(s
, old_cc_op
);
787 cond
= nz_cond
[mask
];
788 if (cond
== TCG_COND_NEVER
) {
791 account_inline_branch(s
, old_cc_op
);
806 account_inline_branch(s
, old_cc_op
);
821 account_inline_branch(s
, old_cc_op
);
825 switch (mask
& 0xa) {
826 case 8: /* src == 0 -> no one bit found */
829 case 2: /* src != 0 -> one bit found */
835 account_inline_branch(s
, old_cc_op
);
841 case 8 | 2: /* result == 0 */
844 case 4 | 1: /* result != 0 */
847 case 8 | 4: /* !carry (borrow) */
848 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_EQ
: TCG_COND_NE
;
850 case 2 | 1: /* carry (!borrow) */
851 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_NE
: TCG_COND_EQ
;
856 account_inline_branch(s
, old_cc_op
);
861 /* Calculate cc value. */
866 /* Jump based on CC. We'll load up the real cond below;
867 the assignment here merely avoids a compiler warning. */
868 account_noninline_branch(s
, old_cc_op
);
869 old_cc_op
= CC_OP_STATIC
;
870 cond
= TCG_COND_NEVER
;
874 /* Load up the arguments of the comparison. */
876 c
->g1
= c
->g2
= false;
880 c
->u
.s32
.a
= tcg_temp_new_i32();
881 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
882 c
->u
.s32
.b
= tcg_const_i32(0);
885 case CC_OP_LTUGTU_32
:
887 c
->u
.s32
.a
= tcg_temp_new_i32();
888 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
889 c
->u
.s32
.b
= tcg_temp_new_i32();
890 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
897 c
->u
.s64
.b
= tcg_const_i64(0);
901 case CC_OP_LTUGTU_64
:
904 c
->g1
= c
->g2
= true;
910 c
->u
.s64
.a
= tcg_temp_new_i64();
911 c
->u
.s64
.b
= tcg_const_i64(0);
912 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
918 c
->u
.s64
.b
= tcg_const_i64(0);
922 case 4 | 1: /* result */
926 case 2 | 1: /* carry */
930 g_assert_not_reached();
939 case 0x8 | 0x4 | 0x2: /* cc != 3 */
941 c
->u
.s32
.b
= tcg_const_i32(3);
943 case 0x8 | 0x4 | 0x1: /* cc != 2 */
945 c
->u
.s32
.b
= tcg_const_i32(2);
947 case 0x8 | 0x2 | 0x1: /* cc != 1 */
949 c
->u
.s32
.b
= tcg_const_i32(1);
951 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
954 c
->u
.s32
.a
= tcg_temp_new_i32();
955 c
->u
.s32
.b
= tcg_const_i32(0);
956 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
958 case 0x8 | 0x4: /* cc < 2 */
960 c
->u
.s32
.b
= tcg_const_i32(2);
962 case 0x8: /* cc == 0 */
964 c
->u
.s32
.b
= tcg_const_i32(0);
966 case 0x4 | 0x2 | 0x1: /* cc != 0 */
968 c
->u
.s32
.b
= tcg_const_i32(0);
970 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
973 c
->u
.s32
.a
= tcg_temp_new_i32();
974 c
->u
.s32
.b
= tcg_const_i32(0);
975 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
977 case 0x4: /* cc == 1 */
979 c
->u
.s32
.b
= tcg_const_i32(1);
981 case 0x2 | 0x1: /* cc > 1 */
983 c
->u
.s32
.b
= tcg_const_i32(1);
985 case 0x2: /* cc == 2 */
987 c
->u
.s32
.b
= tcg_const_i32(2);
989 case 0x1: /* cc == 3 */
991 c
->u
.s32
.b
= tcg_const_i32(3);
994 /* CC is masked by something else: (8 >> cc) & mask. */
997 c
->u
.s32
.a
= tcg_const_i32(8);
998 c
->u
.s32
.b
= tcg_const_i32(0);
999 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
1000 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
1011 static void free_compare(DisasCompare
*c
)
1015 tcg_temp_free_i64(c
->u
.s64
.a
);
1017 tcg_temp_free_i32(c
->u
.s32
.a
);
1022 tcg_temp_free_i64(c
->u
.s64
.b
);
1024 tcg_temp_free_i32(c
->u
.s32
.b
);
1029 /* ====================================================================== */
1030 /* Define the insn format enumeration. */
1031 #define F0(N) FMT_##N,
1032 #define F1(N, X1) F0(N)
1033 #define F2(N, X1, X2) F0(N)
1034 #define F3(N, X1, X2, X3) F0(N)
1035 #define F4(N, X1, X2, X3, X4) F0(N)
1036 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1037 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1040 #include "insn-format.def"
1051 /* This is the way fields are to be accessed out of DisasFields. */
1052 #define have_field(S, F) have_field1((S), FLD_O_##F)
1053 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1055 static bool have_field1(const DisasContext
*s
, enum DisasFieldIndexO c
)
1057 return (s
->fields
.presentO
>> c
) & 1;
1060 static int get_field1(const DisasContext
*s
, enum DisasFieldIndexO o
,
1061 enum DisasFieldIndexC c
)
1063 assert(have_field1(s
, o
));
1064 return s
->fields
.c
[c
];
1067 /* Describe the layout of each field in each format. */
1068 typedef struct DisasField
{
1070 unsigned int size
:8;
1071 unsigned int type
:2;
1072 unsigned int indexC
:6;
1073 enum DisasFieldIndexO indexO
:8;
1076 typedef struct DisasFormatInfo
{
1077 DisasField op
[NUM_C_FIELD
];
1080 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1081 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1082 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1083 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1084 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1085 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1086 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1087 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1088 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1089 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1090 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1091 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1092 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1093 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1094 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1096 #define F0(N) { { } },
1097 #define F1(N, X1) { { X1 } },
1098 #define F2(N, X1, X2) { { X1, X2 } },
1099 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1100 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1101 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1102 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1104 static const DisasFormatInfo format_info
[] = {
1105 #include "insn-format.def"
1125 /* Generally, we'll extract operands into this structures, operate upon
1126 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1127 of routines below for more details. */
1129 bool g_out
, g_out2
, g_in1
, g_in2
;
1130 TCGv_i64 out
, out2
, in1
, in2
;
1134 /* Instructions can place constraints on their operands, raising specification
1135 exceptions if they are violated. To make this easy to automate, each "in1",
1136 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1137 of the following, or 0. To make this easy to document, we'll put the
1138 SPEC_<name> defines next to <name>. */
1140 #define SPEC_r1_even 1
1141 #define SPEC_r2_even 2
1142 #define SPEC_r3_even 4
1143 #define SPEC_r1_f128 8
1144 #define SPEC_r2_f128 16
1146 /* Return values from translate_one, indicating the state of the TB. */
1148 /* We are not using a goto_tb (for whatever reason), but have updated
1149 the PC (for whatever reason), so there's no need to do it again on
1151 #define DISAS_PC_UPDATED DISAS_TARGET_0
1153 /* We have emitted one or more goto_tb. No fixup required. */
1154 #define DISAS_GOTO_TB DISAS_TARGET_1
1156 /* We have updated the PC and CC values. */
1157 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1159 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1160 updated the PC for the next instruction to be executed. */
1161 #define DISAS_PC_STALE DISAS_TARGET_3
1163 /* We are exiting the TB to the main loop. */
1164 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1167 /* Instruction flags */
1168 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1169 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1170 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1171 #define IF_BFP 0x0008 /* binary floating point instruction */
1172 #define IF_DFP 0x0010 /* decimal floating point instruction */
1173 #define IF_PRIV 0x0020 /* privileged instruction */
1174 #define IF_VEC 0x0040 /* vector instruction */
1175 #define IF_IO 0x0080 /* input/output instruction */
1186 /* Pre-process arguments before HELP_OP. */
1187 void (*help_in1
)(DisasContext
*, DisasOps
*);
1188 void (*help_in2
)(DisasContext
*, DisasOps
*);
1189 void (*help_prep
)(DisasContext
*, DisasOps
*);
1192 * Post-process output after HELP_OP.
1193 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1195 void (*help_wout
)(DisasContext
*, DisasOps
*);
1196 void (*help_cout
)(DisasContext
*, DisasOps
*);
1198 /* Implement the operation itself. */
1199 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1204 /* ====================================================================== */
1205 /* Miscellaneous helpers, used by several operations. */
1207 static void help_l2_shift(DisasContext
*s
, DisasOps
*o
, int mask
)
1209 int b2
= get_field(s
, b2
);
1210 int d2
= get_field(s
, d2
);
1213 o
->in2
= tcg_const_i64(d2
& mask
);
1215 o
->in2
= get_address(s
, 0, b2
, d2
);
1216 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1220 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1222 if (dest
== s
->pc_tmp
) {
1223 per_branch(s
, true);
1226 if (use_goto_tb(s
, dest
)) {
1228 per_breaking_event(s
);
1230 tcg_gen_movi_i64(psw_addr
, dest
);
1231 tcg_gen_exit_tb(s
->base
.tb
, 0);
1232 return DISAS_GOTO_TB
;
1234 tcg_gen_movi_i64(psw_addr
, dest
);
1235 per_branch(s
, false);
1236 return DISAS_PC_UPDATED
;
1240 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1241 bool is_imm
, int imm
, TCGv_i64 cdest
)
1244 uint64_t dest
= s
->base
.pc_next
+ 2 * imm
;
1247 /* Take care of the special cases first. */
1248 if (c
->cond
== TCG_COND_NEVER
) {
1253 if (dest
== s
->pc_tmp
) {
1254 /* Branch to next. */
1255 per_branch(s
, true);
1259 if (c
->cond
== TCG_COND_ALWAYS
) {
1260 ret
= help_goto_direct(s
, dest
);
1265 /* E.g. bcr %r0 -> no branch. */
1269 if (c
->cond
== TCG_COND_ALWAYS
) {
1270 tcg_gen_mov_i64(psw_addr
, cdest
);
1271 per_branch(s
, false);
1272 ret
= DISAS_PC_UPDATED
;
1277 if (use_goto_tb(s
, s
->pc_tmp
)) {
1278 if (is_imm
&& use_goto_tb(s
, dest
)) {
1279 /* Both exits can use goto_tb. */
1282 lab
= gen_new_label();
1284 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1286 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1289 /* Branch not taken. */
1291 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1292 tcg_gen_exit_tb(s
->base
.tb
, 0);
1296 per_breaking_event(s
);
1298 tcg_gen_movi_i64(psw_addr
, dest
);
1299 tcg_gen_exit_tb(s
->base
.tb
, 1);
1301 ret
= DISAS_GOTO_TB
;
1303 /* Fallthru can use goto_tb, but taken branch cannot. */
1304 /* Store taken branch destination before the brcond. This
1305 avoids having to allocate a new local temp to hold it.
1306 We'll overwrite this in the not taken case anyway. */
1308 tcg_gen_mov_i64(psw_addr
, cdest
);
1311 lab
= gen_new_label();
1313 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1315 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1318 /* Branch not taken. */
1321 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1322 tcg_gen_exit_tb(s
->base
.tb
, 0);
1326 tcg_gen_movi_i64(psw_addr
, dest
);
1328 per_breaking_event(s
);
1329 ret
= DISAS_PC_UPDATED
;
1332 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1333 Most commonly we're single-stepping or some other condition that
1334 disables all use of goto_tb. Just update the PC and exit. */
1336 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1338 cdest
= tcg_const_i64(dest
);
1342 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1344 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1346 TCGv_i32 t0
= tcg_temp_new_i32();
1347 TCGv_i64 t1
= tcg_temp_new_i64();
1348 TCGv_i64 z
= tcg_const_i64(0);
1349 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1350 tcg_gen_extu_i32_i64(t1
, t0
);
1351 tcg_temp_free_i32(t0
);
1352 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1353 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1354 tcg_temp_free_i64(t1
);
1355 tcg_temp_free_i64(z
);
1359 tcg_temp_free_i64(cdest
);
1361 tcg_temp_free_i64(next
);
1363 ret
= DISAS_PC_UPDATED
;
1371 /* ====================================================================== */
1372 /* The operations. These perform the bulk of the work for any insn,
1373 usually after the operands have been loaded and output initialized. */
1375 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1377 tcg_gen_abs_i64(o
->out
, o
->in2
);
1381 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1383 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1387 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1389 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1393 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1395 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1396 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1400 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1402 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1406 static DisasJumpType
op_addu64(DisasContext
*s
, DisasOps
*o
)
1408 tcg_gen_movi_i64(cc_src
, 0);
1409 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1413 /* Compute carry into cc_src. */
1414 static void compute_carry(DisasContext
*s
)
1418 /* The carry value is already in cc_src (1,0). */
1421 tcg_gen_addi_i64(cc_src
, cc_src
, 1);
1427 /* The carry flag is the msb of CC; compute into cc_src. */
1428 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
1429 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
1434 static DisasJumpType
op_addc32(DisasContext
*s
, DisasOps
*o
)
1437 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1438 tcg_gen_add_i64(o
->out
, o
->out
, cc_src
);
1442 static DisasJumpType
op_addc64(DisasContext
*s
, DisasOps
*o
)
1446 TCGv_i64 zero
= tcg_const_i64(0);
1447 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, zero
);
1448 tcg_gen_add2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
1449 tcg_temp_free_i64(zero
);
1454 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1456 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1458 o
->in1
= tcg_temp_new_i64();
1460 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1462 /* Perform the atomic addition in memory. */
1463 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1467 /* Recompute also for atomic case: needed for setting CC. */
1468 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1471 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1476 static DisasJumpType
op_asiu64(DisasContext
*s
, DisasOps
*o
)
1478 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1480 o
->in1
= tcg_temp_new_i64();
1482 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1484 /* Perform the atomic addition in memory. */
1485 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1489 /* Recompute also for atomic case: needed for setting CC. */
1490 tcg_gen_movi_i64(cc_src
, 0);
1491 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1494 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1499 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1501 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1505 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1507 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1511 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1513 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1514 return_low128(o
->out2
);
1518 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1520 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1524 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1526 int shift
= s
->insn
->data
& 0xff;
1527 int size
= s
->insn
->data
>> 8;
1528 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1531 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1532 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1533 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1535 /* Produce the CC from only the bits manipulated. */
1536 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1537 set_cc_nz_u64(s
, cc_dst
);
1541 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1543 o
->in1
= tcg_temp_new_i64();
1545 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1546 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1548 /* Perform the atomic operation in memory. */
1549 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1553 /* Recompute also for atomic case: needed for setting CC. */
1554 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1556 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1557 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1562 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1564 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1566 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1567 per_branch(s
, false);
1568 return DISAS_PC_UPDATED
;
1574 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1578 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1579 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1583 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1584 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1585 t
= tcg_temp_new_i64();
1586 tcg_gen_shri_i64(t
, psw_mask
, 16);
1587 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1588 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1589 tcg_gen_extu_i32_i64(t
, cc_op
);
1590 tcg_gen_shli_i64(t
, t
, 28);
1591 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1592 tcg_temp_free_i64(t
);
1595 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1597 save_link_info(s
, o
);
1599 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1600 per_branch(s
, false);
1601 return DISAS_PC_UPDATED
;
1607 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1609 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1610 return help_goto_direct(s
, s
->base
.pc_next
+ 2 * get_field(s
, i2
));
1613 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1615 int m1
= get_field(s
, m1
);
1616 bool is_imm
= have_field(s
, i2
);
1617 int imm
= is_imm
? get_field(s
, i2
) : 0;
1620 /* BCR with R2 = 0 causes no branching */
1621 if (have_field(s
, r2
) && get_field(s
, r2
) == 0) {
1623 /* Perform serialization */
1624 /* FIXME: check for fast-BCR-serialization facility */
1625 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1628 /* Perform serialization */
1629 /* FIXME: perform checkpoint-synchronisation */
1630 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1635 disas_jcc(s
, &c
, m1
);
1636 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1639 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1641 int r1
= get_field(s
, r1
);
1642 bool is_imm
= have_field(s
, i2
);
1643 int imm
= is_imm
? get_field(s
, i2
) : 0;
1647 c
.cond
= TCG_COND_NE
;
1652 t
= tcg_temp_new_i64();
1653 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1654 store_reg32_i64(r1
, t
);
1655 c
.u
.s32
.a
= tcg_temp_new_i32();
1656 c
.u
.s32
.b
= tcg_const_i32(0);
1657 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1658 tcg_temp_free_i64(t
);
1660 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1663 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1665 int r1
= get_field(s
, r1
);
1666 int imm
= get_field(s
, i2
);
1670 c
.cond
= TCG_COND_NE
;
1675 t
= tcg_temp_new_i64();
1676 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1677 tcg_gen_subi_i64(t
, t
, 1);
1678 store_reg32h_i64(r1
, t
);
1679 c
.u
.s32
.a
= tcg_temp_new_i32();
1680 c
.u
.s32
.b
= tcg_const_i32(0);
1681 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1682 tcg_temp_free_i64(t
);
1684 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1687 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1689 int r1
= get_field(s
, r1
);
1690 bool is_imm
= have_field(s
, i2
);
1691 int imm
= is_imm
? get_field(s
, i2
) : 0;
1694 c
.cond
= TCG_COND_NE
;
1699 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1700 c
.u
.s64
.a
= regs
[r1
];
1701 c
.u
.s64
.b
= tcg_const_i64(0);
1703 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1706 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1708 int r1
= get_field(s
, r1
);
1709 int r3
= get_field(s
, r3
);
1710 bool is_imm
= have_field(s
, i2
);
1711 int imm
= is_imm
? get_field(s
, i2
) : 0;
1715 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1720 t
= tcg_temp_new_i64();
1721 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1722 c
.u
.s32
.a
= tcg_temp_new_i32();
1723 c
.u
.s32
.b
= tcg_temp_new_i32();
1724 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1725 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1726 store_reg32_i64(r1
, t
);
1727 tcg_temp_free_i64(t
);
1729 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1732 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1734 int r1
= get_field(s
, r1
);
1735 int r3
= get_field(s
, r3
);
1736 bool is_imm
= have_field(s
, i2
);
1737 int imm
= is_imm
? get_field(s
, i2
) : 0;
1740 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1743 if (r1
== (r3
| 1)) {
1744 c
.u
.s64
.b
= load_reg(r3
| 1);
1747 c
.u
.s64
.b
= regs
[r3
| 1];
1751 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1752 c
.u
.s64
.a
= regs
[r1
];
1755 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1758 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1760 int imm
, m3
= get_field(s
, m3
);
1764 c
.cond
= ltgt_cond
[m3
];
1765 if (s
->insn
->data
) {
1766 c
.cond
= tcg_unsigned_cond(c
.cond
);
1768 c
.is_64
= c
.g1
= c
.g2
= true;
1772 is_imm
= have_field(s
, i4
);
1774 imm
= get_field(s
, i4
);
1777 o
->out
= get_address(s
, 0, get_field(s
, b4
),
1781 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1784 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1786 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1791 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1793 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1798 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1800 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1805 static TCGv_i32
fpinst_extract_m34(DisasContext
*s
, bool m3_with_fpe
,
1808 const bool fpe
= s390_has_feat(S390_FEAT_FLOATING_POINT_EXT
);
1809 uint8_t m3
= get_field(s
, m3
);
1810 uint8_t m4
= get_field(s
, m4
);
1812 /* m3 field was introduced with FPE */
1813 if (!fpe
&& m3_with_fpe
) {
1816 /* m4 field was introduced with FPE */
1817 if (!fpe
&& m4_with_fpe
) {
1821 /* Check for valid rounding modes. Mode 3 was introduced later. */
1822 if (m3
== 2 || m3
> 7 || (!fpe
&& m3
== 3)) {
1823 gen_program_exception(s
, PGM_SPECIFICATION
);
1827 return tcg_const_i32(deposit32(m3
, 4, 4, m4
));
1830 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1832 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1835 return DISAS_NORETURN
;
1837 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1838 tcg_temp_free_i32(m34
);
1839 gen_set_cc_nz_f32(s
, o
->in2
);
1843 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1845 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1848 return DISAS_NORETURN
;
1850 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1851 tcg_temp_free_i32(m34
);
1852 gen_set_cc_nz_f64(s
, o
->in2
);
1856 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1858 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1861 return DISAS_NORETURN
;
1863 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1864 tcg_temp_free_i32(m34
);
1865 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1869 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1871 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1874 return DISAS_NORETURN
;
1876 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1877 tcg_temp_free_i32(m34
);
1878 gen_set_cc_nz_f32(s
, o
->in2
);
1882 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1884 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1887 return DISAS_NORETURN
;
1889 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1890 tcg_temp_free_i32(m34
);
1891 gen_set_cc_nz_f64(s
, o
->in2
);
1895 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1897 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1900 return DISAS_NORETURN
;
1902 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1903 tcg_temp_free_i32(m34
);
1904 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1908 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1910 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1913 return DISAS_NORETURN
;
1915 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1916 tcg_temp_free_i32(m34
);
1917 gen_set_cc_nz_f32(s
, o
->in2
);
1921 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1923 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1926 return DISAS_NORETURN
;
1928 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1929 tcg_temp_free_i32(m34
);
1930 gen_set_cc_nz_f64(s
, o
->in2
);
1934 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1936 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1939 return DISAS_NORETURN
;
1941 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1942 tcg_temp_free_i32(m34
);
1943 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1947 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1949 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1952 return DISAS_NORETURN
;
1954 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1955 tcg_temp_free_i32(m34
);
1956 gen_set_cc_nz_f32(s
, o
->in2
);
1960 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1962 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1965 return DISAS_NORETURN
;
1967 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1968 tcg_temp_free_i32(m34
);
1969 gen_set_cc_nz_f64(s
, o
->in2
);
1973 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1975 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1978 return DISAS_NORETURN
;
1980 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1981 tcg_temp_free_i32(m34
);
1982 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1986 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1988 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1991 return DISAS_NORETURN
;
1993 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m34
);
1994 tcg_temp_free_i32(m34
);
1998 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
2000 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2003 return DISAS_NORETURN
;
2005 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m34
);
2006 tcg_temp_free_i32(m34
);
2010 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
2012 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2015 return DISAS_NORETURN
;
2017 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m34
);
2018 tcg_temp_free_i32(m34
);
2019 return_low128(o
->out2
);
2023 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
2025 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2028 return DISAS_NORETURN
;
2030 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m34
);
2031 tcg_temp_free_i32(m34
);
2035 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
2037 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2040 return DISAS_NORETURN
;
2042 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2043 tcg_temp_free_i32(m34
);
2047 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
2049 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2052 return DISAS_NORETURN
;
2054 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2055 tcg_temp_free_i32(m34
);
2056 return_low128(o
->out2
);
2060 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
2062 int r2
= get_field(s
, r2
);
2063 TCGv_i64 len
= tcg_temp_new_i64();
2065 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
2067 return_low128(o
->out
);
2069 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
2070 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
2071 tcg_temp_free_i64(len
);
2076 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
2078 int l
= get_field(s
, l1
);
2083 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2084 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2087 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2088 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2091 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2092 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2095 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2096 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2099 vl
= tcg_const_i32(l
);
2100 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2101 tcg_temp_free_i32(vl
);
2105 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2109 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
2111 int r1
= get_field(s
, r1
);
2112 int r2
= get_field(s
, r2
);
2115 /* r1 and r2 must be even. */
2116 if (r1
& 1 || r2
& 1) {
2117 gen_program_exception(s
, PGM_SPECIFICATION
);
2118 return DISAS_NORETURN
;
2121 t1
= tcg_const_i32(r1
);
2122 t2
= tcg_const_i32(r2
);
2123 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
2124 tcg_temp_free_i32(t1
);
2125 tcg_temp_free_i32(t2
);
2130 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
2132 int r1
= get_field(s
, r1
);
2133 int r3
= get_field(s
, r3
);
2136 /* r1 and r3 must be even. */
2137 if (r1
& 1 || r3
& 1) {
2138 gen_program_exception(s
, PGM_SPECIFICATION
);
2139 return DISAS_NORETURN
;
2142 t1
= tcg_const_i32(r1
);
2143 t3
= tcg_const_i32(r3
);
2144 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2145 tcg_temp_free_i32(t1
);
2146 tcg_temp_free_i32(t3
);
2151 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
2153 int r1
= get_field(s
, r1
);
2154 int r3
= get_field(s
, r3
);
2157 /* r1 and r3 must be even. */
2158 if (r1
& 1 || r3
& 1) {
2159 gen_program_exception(s
, PGM_SPECIFICATION
);
2160 return DISAS_NORETURN
;
2163 t1
= tcg_const_i32(r1
);
2164 t3
= tcg_const_i32(r3
);
2165 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2166 tcg_temp_free_i32(t1
);
2167 tcg_temp_free_i32(t3
);
2172 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
2174 TCGv_i32 m3
= tcg_const_i32(get_field(s
, m3
));
2175 TCGv_i32 t1
= tcg_temp_new_i32();
2176 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
2177 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2179 tcg_temp_free_i32(t1
);
2180 tcg_temp_free_i32(m3
);
2184 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
2186 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2188 return_low128(o
->in2
);
2192 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
2194 TCGv_i64 t
= tcg_temp_new_i64();
2195 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
2196 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
2197 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2198 tcg_temp_free_i64(t
);
2202 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2204 int d2
= get_field(s
, d2
);
2205 int b2
= get_field(s
, b2
);
2208 /* Note that in1 = R3 (new value) and
2209 in2 = (zero-extended) R1 (expected value). */
2211 addr
= get_address(s
, 0, b2
, d2
);
2212 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2213 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2214 tcg_temp_free_i64(addr
);
2216 /* Are the memory and expected values (un)equal? Note that this setcond
2217 produces the output CC value, thus the NE sense of the test. */
2218 cc
= tcg_temp_new_i64();
2219 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2220 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2221 tcg_temp_free_i64(cc
);
2227 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2229 int r1
= get_field(s
, r1
);
2230 int r3
= get_field(s
, r3
);
2231 int d2
= get_field(s
, d2
);
2232 int b2
= get_field(s
, b2
);
2233 DisasJumpType ret
= DISAS_NEXT
;
2235 TCGv_i32 t_r1
, t_r3
;
2237 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2238 addr
= get_address(s
, 0, b2
, d2
);
2239 t_r1
= tcg_const_i32(r1
);
2240 t_r3
= tcg_const_i32(r3
);
2241 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
2242 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2243 } else if (HAVE_CMPXCHG128
) {
2244 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2246 gen_helper_exit_atomic(cpu_env
);
2247 ret
= DISAS_NORETURN
;
2249 tcg_temp_free_i64(addr
);
2250 tcg_temp_free_i32(t_r1
);
2251 tcg_temp_free_i32(t_r3
);
2257 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2259 int r3
= get_field(s
, r3
);
2260 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2262 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2263 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2265 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2267 tcg_temp_free_i32(t_r3
);
2273 #ifndef CONFIG_USER_ONLY
2274 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2276 MemOp mop
= s
->insn
->data
;
2277 TCGv_i64 addr
, old
, cc
;
2278 TCGLabel
*lab
= gen_new_label();
2280 /* Note that in1 = R1 (zero-extended expected value),
2281 out = R1 (original reg), out2 = R1+1 (new value). */
2283 addr
= tcg_temp_new_i64();
2284 old
= tcg_temp_new_i64();
2285 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2286 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2287 get_mem_index(s
), mop
| MO_ALIGN
);
2288 tcg_temp_free_i64(addr
);
2290 /* Are the memory and expected values (un)equal? */
2291 cc
= tcg_temp_new_i64();
2292 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2293 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2295 /* Write back the output now, so that it happens before the
2296 following branch, so that we don't need local temps. */
2297 if ((mop
& MO_SIZE
) == MO_32
) {
2298 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2300 tcg_gen_mov_i64(o
->out
, old
);
2302 tcg_temp_free_i64(old
);
2304 /* If the comparison was equal, and the LSB of R2 was set,
2305 then we need to flush the TLB (for all cpus). */
2306 tcg_gen_xori_i64(cc
, cc
, 1);
2307 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2308 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2309 tcg_temp_free_i64(cc
);
2311 gen_helper_purge(cpu_env
);
2318 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2320 TCGv_i64 t1
= tcg_temp_new_i64();
2321 TCGv_i32 t2
= tcg_temp_new_i32();
2322 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2323 gen_helper_cvd(t1
, t2
);
2324 tcg_temp_free_i32(t2
);
2325 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2326 tcg_temp_free_i64(t1
);
2330 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2332 int m3
= get_field(s
, m3
);
2333 TCGLabel
*lab
= gen_new_label();
2336 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2337 if (s
->insn
->data
) {
2338 c
= tcg_unsigned_cond(c
);
2340 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2349 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2351 int m3
= get_field(s
, m3
);
2352 int r1
= get_field(s
, r1
);
2353 int r2
= get_field(s
, r2
);
2354 TCGv_i32 tr1
, tr2
, chk
;
2356 /* R1 and R2 must both be even. */
2357 if ((r1
| r2
) & 1) {
2358 gen_program_exception(s
, PGM_SPECIFICATION
);
2359 return DISAS_NORETURN
;
2361 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2365 tr1
= tcg_const_i32(r1
);
2366 tr2
= tcg_const_i32(r2
);
2367 chk
= tcg_const_i32(m3
);
2369 switch (s
->insn
->data
) {
2371 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2374 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2377 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2380 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2383 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2386 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2389 g_assert_not_reached();
2392 tcg_temp_free_i32(tr1
);
2393 tcg_temp_free_i32(tr2
);
2394 tcg_temp_free_i32(chk
);
2399 #ifndef CONFIG_USER_ONLY
2400 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2402 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
2403 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
2404 TCGv_i32 func_code
= tcg_const_i32(get_field(s
, i2
));
2406 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2408 tcg_temp_free_i32(func_code
);
2409 tcg_temp_free_i32(r3
);
2410 tcg_temp_free_i32(r1
);
2415 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2417 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2418 return_low128(o
->out
);
2422 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2424 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2425 return_low128(o
->out
);
2429 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2431 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2432 return_low128(o
->out
);
2436 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2438 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2439 return_low128(o
->out
);
2443 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2445 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2449 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2451 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2455 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2457 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2458 return_low128(o
->out2
);
2462 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2464 int r2
= get_field(s
, r2
);
2465 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2469 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2471 /* No cache information provided. */
2472 tcg_gen_movi_i64(o
->out
, -1);
2476 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2478 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2482 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2484 int r1
= get_field(s
, r1
);
2485 int r2
= get_field(s
, r2
);
2486 TCGv_i64 t
= tcg_temp_new_i64();
2488 /* Note the "subsequently" in the PoO, which implies a defined result
2489 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2490 tcg_gen_shri_i64(t
, psw_mask
, 32);
2491 store_reg32_i64(r1
, t
);
2493 store_reg32_i64(r2
, psw_mask
);
2496 tcg_temp_free_i64(t
);
2500 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2502 int r1
= get_field(s
, r1
);
2506 /* Nested EXECUTE is not allowed. */
2507 if (unlikely(s
->ex_value
)) {
2508 gen_program_exception(s
, PGM_EXECUTE
);
2509 return DISAS_NORETURN
;
2516 v1
= tcg_const_i64(0);
2521 ilen
= tcg_const_i32(s
->ilen
);
2522 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2523 tcg_temp_free_i32(ilen
);
2526 tcg_temp_free_i64(v1
);
2529 return DISAS_PC_CC_UPDATED
;
2532 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2534 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2537 return DISAS_NORETURN
;
2539 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m34
);
2540 tcg_temp_free_i32(m34
);
2544 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2546 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2549 return DISAS_NORETURN
;
2551 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m34
);
2552 tcg_temp_free_i32(m34
);
2556 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2558 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2561 return DISAS_NORETURN
;
2563 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2564 return_low128(o
->out2
);
2565 tcg_temp_free_i32(m34
);
2569 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2571 /* We'll use the original input for cc computation, since we get to
2572 compare that against 0, which ought to be better than comparing
2573 the real output against 64. It also lets cc_dst be a convenient
2574 temporary during our computation. */
2575 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2577 /* R1 = IN ? CLZ(IN) : 64. */
2578 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2580 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2581 value by 64, which is undefined. But since the shift is 64 iff the
2582 input is zero, we still get the correct result after and'ing. */
2583 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2584 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2585 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2589 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2591 int m3
= get_field(s
, m3
);
2592 int pos
, len
, base
= s
->insn
->data
;
2593 TCGv_i64 tmp
= tcg_temp_new_i64();
2598 /* Effectively a 32-bit load. */
2599 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2606 /* Effectively a 16-bit load. */
2607 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2615 /* Effectively an 8-bit load. */
2616 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2621 pos
= base
+ ctz32(m3
) * 8;
2622 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2623 ccm
= ((1ull << len
) - 1) << pos
;
2627 /* This is going to be a sequence of loads and inserts. */
2628 pos
= base
+ 32 - 8;
2632 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2633 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2634 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2637 m3
= (m3
<< 1) & 0xf;
2643 tcg_gen_movi_i64(tmp
, ccm
);
2644 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2645 tcg_temp_free_i64(tmp
);
2649 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2651 int shift
= s
->insn
->data
& 0xff;
2652 int size
= s
->insn
->data
>> 8;
2653 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2657 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2662 t1
= tcg_temp_new_i64();
2663 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2664 t2
= tcg_temp_new_i64();
2665 tcg_gen_extu_i32_i64(t2
, cc_op
);
2666 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2667 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2668 tcg_temp_free_i64(t1
);
2669 tcg_temp_free_i64(t2
);
2673 #ifndef CONFIG_USER_ONLY
2674 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2678 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2679 m4
= tcg_const_i32(get_field(s
, m4
));
2681 m4
= tcg_const_i32(0);
2683 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2684 tcg_temp_free_i32(m4
);
2688 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2692 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2693 m4
= tcg_const_i32(get_field(s
, m4
));
2695 m4
= tcg_const_i32(0);
2697 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2698 tcg_temp_free_i32(m4
);
2702 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2704 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2709 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2711 int r1
= have_field(s
, r1
) ? get_field(s
, r1
) : 0;
2712 int r2
= have_field(s
, r2
) ? get_field(s
, r2
) : 0;
2713 int r3
= have_field(s
, r3
) ? get_field(s
, r3
) : 0;
2714 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2716 switch (s
->insn
->data
) {
2717 case S390_FEAT_TYPE_KMA
:
2718 if (r3
== r1
|| r3
== r2
) {
2719 gen_program_exception(s
, PGM_SPECIFICATION
);
2720 return DISAS_NORETURN
;
2723 case S390_FEAT_TYPE_KMCTR
:
2724 if (r3
& 1 || !r3
) {
2725 gen_program_exception(s
, PGM_SPECIFICATION
);
2726 return DISAS_NORETURN
;
2729 case S390_FEAT_TYPE_PPNO
:
2730 case S390_FEAT_TYPE_KMF
:
2731 case S390_FEAT_TYPE_KMC
:
2732 case S390_FEAT_TYPE_KMO
:
2733 case S390_FEAT_TYPE_KM
:
2734 if (r1
& 1 || !r1
) {
2735 gen_program_exception(s
, PGM_SPECIFICATION
);
2736 return DISAS_NORETURN
;
2739 case S390_FEAT_TYPE_KMAC
:
2740 case S390_FEAT_TYPE_KIMD
:
2741 case S390_FEAT_TYPE_KLMD
:
2742 if (r2
& 1 || !r2
) {
2743 gen_program_exception(s
, PGM_SPECIFICATION
);
2744 return DISAS_NORETURN
;
2747 case S390_FEAT_TYPE_PCKMO
:
2748 case S390_FEAT_TYPE_PCC
:
2751 g_assert_not_reached();
2754 t_r1
= tcg_const_i32(r1
);
2755 t_r2
= tcg_const_i32(r2
);
2756 t_r3
= tcg_const_i32(r3
);
2757 type
= tcg_const_i32(s
->insn
->data
);
2758 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2760 tcg_temp_free_i32(t_r1
);
2761 tcg_temp_free_i32(t_r2
);
2762 tcg_temp_free_i32(t_r3
);
2763 tcg_temp_free_i32(type
);
2767 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2769 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2774 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2776 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2781 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2783 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2788 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2790 /* The real output is indeed the original value in memory;
2791 recompute the addition for the computation of CC. */
2792 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2793 s
->insn
->data
| MO_ALIGN
);
2794 /* However, we need to recompute the addition for setting CC. */
2795 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2799 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2801 /* The real output is indeed the original value in memory;
2802 recompute the addition for the computation of CC. */
2803 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2804 s
->insn
->data
| MO_ALIGN
);
2805 /* However, we need to recompute the operation for setting CC. */
2806 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2810 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2812 /* The real output is indeed the original value in memory;
2813 recompute the addition for the computation of CC. */
2814 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2815 s
->insn
->data
| MO_ALIGN
);
2816 /* However, we need to recompute the operation for setting CC. */
2817 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2821 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2823 /* The real output is indeed the original value in memory;
2824 recompute the addition for the computation of CC. */
2825 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2826 s
->insn
->data
| MO_ALIGN
);
2827 /* However, we need to recompute the operation for setting CC. */
2828 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2832 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2834 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2838 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2840 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2843 return DISAS_NORETURN
;
2845 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
, m34
);
2846 tcg_temp_free_i32(m34
);
2850 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2852 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2855 return DISAS_NORETURN
;
2857 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2858 tcg_temp_free_i32(m34
);
2862 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2864 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2867 return DISAS_NORETURN
;
2869 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2870 tcg_temp_free_i32(m34
);
2874 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2876 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2877 return_low128(o
->out2
);
2881 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2883 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2884 return_low128(o
->out2
);
2888 static DisasJumpType
op_lde(DisasContext
*s
, DisasOps
*o
)
2890 tcg_gen_shli_i64(o
->out
, o
->in2
, 32);
2894 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2896 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2900 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2902 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2906 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2908 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2912 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2914 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2918 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2920 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2924 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2926 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2930 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2932 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2936 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2938 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2942 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2944 TCGLabel
*lab
= gen_new_label();
2945 store_reg32_i64(get_field(s
, r1
), o
->in2
);
2946 /* The value is stored even in case of trap. */
2947 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2953 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2955 TCGLabel
*lab
= gen_new_label();
2956 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2957 /* The value is stored even in case of trap. */
2958 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2964 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2966 TCGLabel
*lab
= gen_new_label();
2967 store_reg32h_i64(get_field(s
, r1
), o
->in2
);
2968 /* The value is stored even in case of trap. */
2969 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2975 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2977 TCGLabel
*lab
= gen_new_label();
2978 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2979 /* The value is stored even in case of trap. */
2980 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2986 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2988 TCGLabel
*lab
= gen_new_label();
2989 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2990 /* The value is stored even in case of trap. */
2991 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2997 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
3001 disas_jcc(s
, &c
, get_field(s
, m3
));
3004 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
3008 TCGv_i32 t32
= tcg_temp_new_i32();
3011 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
3014 t
= tcg_temp_new_i64();
3015 tcg_gen_extu_i32_i64(t
, t32
);
3016 tcg_temp_free_i32(t32
);
3018 z
= tcg_const_i64(0);
3019 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
3020 tcg_temp_free_i64(t
);
3021 tcg_temp_free_i64(z
);
3027 #ifndef CONFIG_USER_ONLY
3028 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
3030 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3031 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3032 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
3033 tcg_temp_free_i32(r1
);
3034 tcg_temp_free_i32(r3
);
3035 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3036 return DISAS_PC_STALE_NOCHAIN
;
3039 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
3041 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3042 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3043 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
3044 tcg_temp_free_i32(r1
);
3045 tcg_temp_free_i32(r3
);
3046 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3047 return DISAS_PC_STALE_NOCHAIN
;
3050 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
3052 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
3057 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
3059 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
3063 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
3067 per_breaking_event(s
);
3069 t1
= tcg_temp_new_i64();
3070 t2
= tcg_temp_new_i64();
3071 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3072 MO_TEUL
| MO_ALIGN_8
);
3073 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
3074 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
3075 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3076 tcg_gen_shli_i64(t1
, t1
, 32);
3077 gen_helper_load_psw(cpu_env
, t1
, t2
);
3078 tcg_temp_free_i64(t1
);
3079 tcg_temp_free_i64(t2
);
3080 return DISAS_NORETURN
;
3083 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
3087 per_breaking_event(s
);
3089 t1
= tcg_temp_new_i64();
3090 t2
= tcg_temp_new_i64();
3091 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3092 MO_TEQ
| MO_ALIGN_8
);
3093 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3094 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
3095 gen_helper_load_psw(cpu_env
, t1
, t2
);
3096 tcg_temp_free_i64(t1
);
3097 tcg_temp_free_i64(t2
);
3098 return DISAS_NORETURN
;
3102 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
3104 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3105 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3106 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
3107 tcg_temp_free_i32(r1
);
3108 tcg_temp_free_i32(r3
);
3112 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
3114 int r1
= get_field(s
, r1
);
3115 int r3
= get_field(s
, r3
);
3118 /* Only one register to read. */
3119 t1
= tcg_temp_new_i64();
3120 if (unlikely(r1
== r3
)) {
3121 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3122 store_reg32_i64(r1
, t1
);
3127 /* First load the values of the first and last registers to trigger
3128 possible page faults. */
3129 t2
= tcg_temp_new_i64();
3130 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3131 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3132 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3133 store_reg32_i64(r1
, t1
);
3134 store_reg32_i64(r3
, t2
);
3136 /* Only two registers to read. */
3137 if (((r1
+ 1) & 15) == r3
) {
3143 /* Then load the remaining registers. Page fault can't occur. */
3145 tcg_gen_movi_i64(t2
, 4);
3148 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3149 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3150 store_reg32_i64(r1
, t1
);
3158 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
3160 int r1
= get_field(s
, r1
);
3161 int r3
= get_field(s
, r3
);
3164 /* Only one register to read. */
3165 t1
= tcg_temp_new_i64();
3166 if (unlikely(r1
== r3
)) {
3167 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3168 store_reg32h_i64(r1
, t1
);
3173 /* First load the values of the first and last registers to trigger
3174 possible page faults. */
3175 t2
= tcg_temp_new_i64();
3176 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3177 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3178 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3179 store_reg32h_i64(r1
, t1
);
3180 store_reg32h_i64(r3
, t2
);
3182 /* Only two registers to read. */
3183 if (((r1
+ 1) & 15) == r3
) {
3189 /* Then load the remaining registers. Page fault can't occur. */
3191 tcg_gen_movi_i64(t2
, 4);
3194 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3195 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3196 store_reg32h_i64(r1
, t1
);
3204 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
3206 int r1
= get_field(s
, r1
);
3207 int r3
= get_field(s
, r3
);
3210 /* Only one register to read. */
3211 if (unlikely(r1
== r3
)) {
3212 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3216 /* First load the values of the first and last registers to trigger
3217 possible page faults. */
3218 t1
= tcg_temp_new_i64();
3219 t2
= tcg_temp_new_i64();
3220 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
3221 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
3222 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
3223 tcg_gen_mov_i64(regs
[r1
], t1
);
3226 /* Only two registers to read. */
3227 if (((r1
+ 1) & 15) == r3
) {
3232 /* Then load the remaining registers. Page fault can't occur. */
3234 tcg_gen_movi_i64(t1
, 8);
3237 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
3238 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3245 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3248 MemOp mop
= s
->insn
->data
;
3250 /* In a parallel context, stop the world and single step. */
3251 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3254 gen_exception(EXCP_ATOMIC
);
3255 return DISAS_NORETURN
;
3258 /* In a serial context, perform the two loads ... */
3259 a1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
3260 a2
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
3261 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3262 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3263 tcg_temp_free_i64(a1
);
3264 tcg_temp_free_i64(a2
);
3266 /* ... and indicate that we performed them while interlocked. */
3267 gen_op_movi_cc(s
, 0);
3271 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3273 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3274 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3275 } else if (HAVE_ATOMIC128
) {
3276 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3278 gen_helper_exit_atomic(cpu_env
);
3279 return DISAS_NORETURN
;
3281 return_low128(o
->out2
);
3285 #ifndef CONFIG_USER_ONLY
3286 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3288 tcg_gen_qemu_ld_tl(o
->out
, o
->in2
, MMU_REAL_IDX
, s
->insn
->data
);
3293 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3295 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3299 static DisasJumpType
op_lcbb(DisasContext
*s
, DisasOps
*o
)
3301 const int64_t block_size
= (1ull << (get_field(s
, m3
) + 6));
3303 if (get_field(s
, m3
) > 6) {
3304 gen_program_exception(s
, PGM_SPECIFICATION
);
3305 return DISAS_NORETURN
;
3308 tcg_gen_ori_i64(o
->addr1
, o
->addr1
, -block_size
);
3309 tcg_gen_neg_i64(o
->addr1
, o
->addr1
);
3310 tcg_gen_movi_i64(o
->out
, 16);
3311 tcg_gen_umin_i64(o
->out
, o
->out
, o
->addr1
);
3312 gen_op_update1_cc_i64(s
, CC_OP_LCBB
, o
->out
);
3316 static DisasJumpType
op_mc(DisasContext
*s
, DisasOps
*o
)
3318 #if !defined(CONFIG_USER_ONLY)
3321 const uint16_t monitor_class
= get_field(s
, i2
);
3323 if (monitor_class
& 0xff00) {
3324 gen_program_exception(s
, PGM_SPECIFICATION
);
3325 return DISAS_NORETURN
;
3328 #if !defined(CONFIG_USER_ONLY)
3329 i2
= tcg_const_i32(monitor_class
);
3330 gen_helper_monitor_call(cpu_env
, o
->addr1
, i2
);
3331 tcg_temp_free_i32(i2
);
3333 /* Defaults to a NOP. */
3337 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3340 o
->g_out
= o
->g_in2
;
3346 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3348 int b2
= get_field(s
, b2
);
3349 TCGv ar1
= tcg_temp_new_i64();
3352 o
->g_out
= o
->g_in2
;
3356 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3357 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3358 tcg_gen_movi_i64(ar1
, 0);
3360 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3361 tcg_gen_movi_i64(ar1
, 1);
3363 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3365 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3367 tcg_gen_movi_i64(ar1
, 0);
3370 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3371 tcg_gen_movi_i64(ar1
, 2);
3375 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3376 tcg_temp_free_i64(ar1
);
3381 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3385 o
->g_out
= o
->g_in1
;
3386 o
->g_out2
= o
->g_in2
;
3389 o
->g_in1
= o
->g_in2
= false;
3393 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3395 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3396 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3397 tcg_temp_free_i32(l
);
3401 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3403 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3404 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3405 tcg_temp_free_i32(l
);
3409 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3411 int r1
= get_field(s
, r1
);
3412 int r2
= get_field(s
, r2
);
3415 /* r1 and r2 must be even. */
3416 if (r1
& 1 || r2
& 1) {
3417 gen_program_exception(s
, PGM_SPECIFICATION
);
3418 return DISAS_NORETURN
;
3421 t1
= tcg_const_i32(r1
);
3422 t2
= tcg_const_i32(r2
);
3423 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3424 tcg_temp_free_i32(t1
);
3425 tcg_temp_free_i32(t2
);
3430 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3432 int r1
= get_field(s
, r1
);
3433 int r3
= get_field(s
, r3
);
3436 /* r1 and r3 must be even. */
3437 if (r1
& 1 || r3
& 1) {
3438 gen_program_exception(s
, PGM_SPECIFICATION
);
3439 return DISAS_NORETURN
;
3442 t1
= tcg_const_i32(r1
);
3443 t3
= tcg_const_i32(r3
);
3444 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3445 tcg_temp_free_i32(t1
);
3446 tcg_temp_free_i32(t3
);
3451 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3453 int r1
= get_field(s
, r1
);
3454 int r3
= get_field(s
, r3
);
3457 /* r1 and r3 must be even. */
3458 if (r1
& 1 || r3
& 1) {
3459 gen_program_exception(s
, PGM_SPECIFICATION
);
3460 return DISAS_NORETURN
;
3463 t1
= tcg_const_i32(r1
);
3464 t3
= tcg_const_i32(r3
);
3465 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3466 tcg_temp_free_i32(t1
);
3467 tcg_temp_free_i32(t3
);
3472 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3474 int r3
= get_field(s
, r3
);
3475 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3480 #ifndef CONFIG_USER_ONLY
3481 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3483 int r1
= get_field(s
, l1
);
3484 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3489 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3491 int r1
= get_field(s
, l1
);
3492 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3498 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3500 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3501 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3502 tcg_temp_free_i32(l
);
3506 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3508 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3509 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3510 tcg_temp_free_i32(l
);
3514 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3516 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3521 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3523 TCGv_i32 t1
= tcg_const_i32(get_field(s
, r1
));
3524 TCGv_i32 t2
= tcg_const_i32(get_field(s
, r2
));
3526 gen_helper_mvst(cc_op
, cpu_env
, t1
, t2
);
3527 tcg_temp_free_i32(t1
);
3528 tcg_temp_free_i32(t2
);
3533 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3535 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3536 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3537 tcg_temp_free_i32(l
);
3541 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3543 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3547 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3549 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3553 static DisasJumpType
op_muls128(DisasContext
*s
, DisasOps
*o
)
3555 tcg_gen_muls2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3559 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3561 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3565 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3567 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3571 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3573 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3577 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3579 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3580 return_low128(o
->out2
);
3584 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3586 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3587 return_low128(o
->out2
);
3591 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3593 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3594 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3595 tcg_temp_free_i64(r3
);
3599 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3601 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3602 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3603 tcg_temp_free_i64(r3
);
3607 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3609 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3610 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3611 tcg_temp_free_i64(r3
);
3615 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3617 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3618 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3619 tcg_temp_free_i64(r3
);
3623 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3626 z
= tcg_const_i64(0);
3627 n
= tcg_temp_new_i64();
3628 tcg_gen_neg_i64(n
, o
->in2
);
3629 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3630 tcg_temp_free_i64(n
);
3631 tcg_temp_free_i64(z
);
3635 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3637 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3641 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3643 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3647 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3649 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3650 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3654 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3656 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3657 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3658 tcg_temp_free_i32(l
);
3663 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3665 tcg_gen_neg_i64(o
->out
, o
->in2
);
3669 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3671 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3675 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3677 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3681 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3683 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3684 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3688 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3690 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3691 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3692 tcg_temp_free_i32(l
);
3697 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3699 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3703 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3705 int shift
= s
->insn
->data
& 0xff;
3706 int size
= s
->insn
->data
>> 8;
3707 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3710 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3711 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3713 /* Produce the CC from only the bits manipulated. */
3714 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3715 set_cc_nz_u64(s
, cc_dst
);
3719 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3721 o
->in1
= tcg_temp_new_i64();
3723 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3724 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3726 /* Perform the atomic operation in memory. */
3727 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3731 /* Recompute also for atomic case: needed for setting CC. */
3732 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3734 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3735 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3740 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3742 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3743 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3744 tcg_temp_free_i32(l
);
3748 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3750 int l2
= get_field(s
, l2
) + 1;
3753 /* The length must not exceed 32 bytes. */
3755 gen_program_exception(s
, PGM_SPECIFICATION
);
3756 return DISAS_NORETURN
;
3758 l
= tcg_const_i32(l2
);
3759 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3760 tcg_temp_free_i32(l
);
3764 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3766 int l2
= get_field(s
, l2
) + 1;
3769 /* The length must be even and should not exceed 64 bytes. */
3770 if ((l2
& 1) || (l2
> 64)) {
3771 gen_program_exception(s
, PGM_SPECIFICATION
);
3772 return DISAS_NORETURN
;
3774 l
= tcg_const_i32(l2
);
3775 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3776 tcg_temp_free_i32(l
);
3780 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3782 gen_helper_popcnt(o
->out
, o
->in2
);
3786 #ifndef CONFIG_USER_ONLY
3787 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3789 gen_helper_ptlb(cpu_env
);
3794 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3796 int i3
= get_field(s
, i3
);
3797 int i4
= get_field(s
, i4
);
3798 int i5
= get_field(s
, i5
);
3799 int do_zero
= i4
& 0x80;
3800 uint64_t mask
, imask
, pmask
;
3803 /* Adjust the arguments for the specific insn. */
3804 switch (s
->fields
.op2
) {
3805 case 0x55: /* risbg */
3806 case 0x59: /* risbgn */
3811 case 0x5d: /* risbhg */
3814 pmask
= 0xffffffff00000000ull
;
3816 case 0x51: /* risblg */
3817 i3
= (i3
& 31) + 32;
3818 i4
= (i4
& 31) + 32;
3819 pmask
= 0x00000000ffffffffull
;
3822 g_assert_not_reached();
3825 /* MASK is the set of bits to be inserted from R2. */
3827 /* [0...i3---i4...63] */
3828 mask
= (-1ull >> i3
) & (-1ull << (63 - i4
));
3830 /* [0---i4...i3---63] */
3831 mask
= (-1ull >> i3
) | (-1ull << (63 - i4
));
3833 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3836 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3837 insns, we need to keep the other half of the register. */
3838 imask
= ~mask
| ~pmask
;
3847 /* In some cases we can implement this with extract. */
3848 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3849 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3853 /* In some cases we can implement this with deposit. */
3854 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3855 /* Note that we rotate the bits to be inserted to the lsb, not to
3856 the position as described in the PoO. */
3857 rot
= (rot
- pos
) & 63;
3862 /* Rotate the input as necessary. */
3863 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3865 /* Insert the selected bits into the output. */
3868 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3870 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3872 } else if (imask
== 0) {
3873 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3875 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3876 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3877 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3882 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3884 int i3
= get_field(s
, i3
);
3885 int i4
= get_field(s
, i4
);
3886 int i5
= get_field(s
, i5
);
3889 /* If this is a test-only form, arrange to discard the result. */
3891 o
->out
= tcg_temp_new_i64();
3899 /* MASK is the set of bits to be operated on from R2.
3900 Take care for I3/I4 wraparound. */
3903 mask
^= ~0ull >> i4
>> 1;
3905 mask
|= ~(~0ull >> i4
>> 1);
3908 /* Rotate the input as necessary. */
3909 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3912 switch (s
->fields
.op2
) {
3913 case 0x54: /* AND */
3914 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3915 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3918 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3919 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3921 case 0x57: /* XOR */
3922 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3923 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3930 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3931 set_cc_nz_u64(s
, cc_dst
);
3935 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3937 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3941 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3943 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3947 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3949 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3953 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3955 TCGv_i32 t1
= tcg_temp_new_i32();
3956 TCGv_i32 t2
= tcg_temp_new_i32();
3957 TCGv_i32 to
= tcg_temp_new_i32();
3958 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3959 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3960 tcg_gen_rotl_i32(to
, t1
, t2
);
3961 tcg_gen_extu_i32_i64(o
->out
, to
);
3962 tcg_temp_free_i32(t1
);
3963 tcg_temp_free_i32(t2
);
3964 tcg_temp_free_i32(to
);
3968 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3970 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3974 #ifndef CONFIG_USER_ONLY
3975 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3977 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3982 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3984 gen_helper_sacf(cpu_env
, o
->in2
);
3985 /* Addressing mode has changed, so end the block. */
3986 return DISAS_PC_STALE
;
3990 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3992 int sam
= s
->insn
->data
;
4008 /* Bizarre but true, we check the address of the current insn for the
4009 specification exception, not the next to be executed. Thus the PoO
4010 documents that Bad Things Happen two bytes before the end. */
4011 if (s
->base
.pc_next
& ~mask
) {
4012 gen_program_exception(s
, PGM_SPECIFICATION
);
4013 return DISAS_NORETURN
;
4017 tsam
= tcg_const_i64(sam
);
4018 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
4019 tcg_temp_free_i64(tsam
);
4021 /* Always exit the TB, since we (may have) changed execution mode. */
4022 return DISAS_PC_STALE
;
4025 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
4027 int r1
= get_field(s
, r1
);
4028 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
4032 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
4034 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4038 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
4040 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4044 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
4046 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
4047 return_low128(o
->out2
);
4051 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
4053 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
4057 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
4059 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
4063 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
4065 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4066 return_low128(o
->out2
);
4070 #ifndef CONFIG_USER_ONLY
4071 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
4073 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
4078 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
4080 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4081 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4082 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
4084 tcg_temp_free_i32(r1
);
4085 tcg_temp_free_i32(r3
);
4090 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
4097 disas_jcc(s
, &c
, get_field(s
, m3
));
4099 /* We want to store when the condition is fulfilled, so branch
4100 out when it's not */
4101 c
.cond
= tcg_invert_cond(c
.cond
);
4103 lab
= gen_new_label();
4105 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
4107 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
4111 r1
= get_field(s
, r1
);
4112 a
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
4113 switch (s
->insn
->data
) {
4115 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
4118 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
4120 case 2: /* STOCFH */
4121 h
= tcg_temp_new_i64();
4122 tcg_gen_shri_i64(h
, regs
[r1
], 32);
4123 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
4124 tcg_temp_free_i64(h
);
4127 g_assert_not_reached();
4129 tcg_temp_free_i64(a
);
4135 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
4137 uint64_t sign
= 1ull << s
->insn
->data
;
4138 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
4139 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
4140 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4141 /* The arithmetic left shift is curious in that it does not affect
4142 the sign bit. Copy that over from the source unchanged. */
4143 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
4144 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
4145 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
4149 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
4151 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4155 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
4157 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
4161 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
4163 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
4167 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
4169 gen_helper_sfpc(cpu_env
, o
->in2
);
4173 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
4175 gen_helper_sfas(cpu_env
, o
->in2
);
4179 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
4181 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4182 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x3ull
);
4183 gen_helper_srnm(cpu_env
, o
->addr1
);
4187 static DisasJumpType
op_srnmb(DisasContext
*s
, DisasOps
*o
)
4189 /* Bits 0-55 are are ignored. */
4190 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0xffull
);
4191 gen_helper_srnm(cpu_env
, o
->addr1
);
4195 static DisasJumpType
op_srnmt(DisasContext
*s
, DisasOps
*o
)
4197 TCGv_i64 tmp
= tcg_temp_new_i64();
4199 /* Bits other than 61-63 are ignored. */
4200 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x7ull
);
4202 /* No need to call a helper, we don't implement dfp */
4203 tcg_gen_ld32u_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4204 tcg_gen_deposit_i64(tmp
, tmp
, o
->addr1
, 4, 3);
4205 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4207 tcg_temp_free_i64(tmp
);
4211 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
4213 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
4214 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
4217 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
4218 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
4222 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
4224 int b1
= get_field(s
, b1
);
4225 int d1
= get_field(s
, d1
);
4226 int b2
= get_field(s
, b2
);
4227 int d2
= get_field(s
, d2
);
4228 int r3
= get_field(s
, r3
);
4229 TCGv_i64 tmp
= tcg_temp_new_i64();
4231 /* fetch all operands first */
4232 o
->in1
= tcg_temp_new_i64();
4233 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
4234 o
->in2
= tcg_temp_new_i64();
4235 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
4236 o
->addr1
= tcg_temp_new_i64();
4237 gen_addi_and_wrap_i64(s
, o
->addr1
, regs
[r3
], 0);
4239 /* load the third operand into r3 before modifying anything */
4240 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
4242 /* subtract CPU timer from first operand and store in GR0 */
4243 gen_helper_stpt(tmp
, cpu_env
);
4244 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
4246 /* store second operand in GR1 */
4247 tcg_gen_mov_i64(regs
[1], o
->in2
);
4249 tcg_temp_free_i64(tmp
);
4253 #ifndef CONFIG_USER_ONLY
4254 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
4256 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
4257 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
4261 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
4263 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
4267 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
4269 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
4270 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4271 return DISAS_PC_STALE_NOCHAIN
;
4274 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
4276 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
4281 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4283 gen_helper_stck(o
->out
, cpu_env
);
4284 /* ??? We don't implement clock states. */
4285 gen_op_movi_cc(s
, 0);
4289 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4291 TCGv_i64 c1
= tcg_temp_new_i64();
4292 TCGv_i64 c2
= tcg_temp_new_i64();
4293 TCGv_i64 todpr
= tcg_temp_new_i64();
4294 gen_helper_stck(c1
, cpu_env
);
4295 /* 16 bit value store in an uint32_t (only valid bits set) */
4296 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4297 /* Shift the 64-bit value into its place as a zero-extended
4298 104-bit value. Note that "bit positions 64-103 are always
4299 non-zero so that they compare differently to STCK"; we set
4300 the least significant bit to 1. */
4301 tcg_gen_shli_i64(c2
, c1
, 56);
4302 tcg_gen_shri_i64(c1
, c1
, 8);
4303 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4304 tcg_gen_or_i64(c2
, c2
, todpr
);
4305 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4306 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4307 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4308 tcg_temp_free_i64(c1
);
4309 tcg_temp_free_i64(c2
);
4310 tcg_temp_free_i64(todpr
);
4311 /* ??? We don't implement clock states. */
4312 gen_op_movi_cc(s
, 0);
4316 #ifndef CONFIG_USER_ONLY
4317 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4319 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4320 gen_helper_sck(cc_op
, cpu_env
, o
->in1
);
4325 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4327 gen_helper_sckc(cpu_env
, o
->in2
);
4331 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4333 gen_helper_sckpf(cpu_env
, regs
[0]);
4337 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4339 gen_helper_stckc(o
->out
, cpu_env
);
4343 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4345 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4346 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4347 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4348 tcg_temp_free_i32(r1
);
4349 tcg_temp_free_i32(r3
);
4353 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4355 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4356 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4357 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4358 tcg_temp_free_i32(r1
);
4359 tcg_temp_free_i32(r3
);
4363 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4365 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4369 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4371 gen_helper_spt(cpu_env
, o
->in2
);
4375 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4377 gen_helper_stfl(cpu_env
);
4381 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4383 gen_helper_stpt(o
->out
, cpu_env
);
4387 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4389 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4394 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4396 gen_helper_spx(cpu_env
, o
->in2
);
4400 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4402 gen_helper_xsch(cpu_env
, regs
[1]);
4407 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4409 gen_helper_csch(cpu_env
, regs
[1]);
4414 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4416 gen_helper_hsch(cpu_env
, regs
[1]);
4421 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4423 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4428 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4430 gen_helper_rchp(cpu_env
, regs
[1]);
4435 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4437 gen_helper_rsch(cpu_env
, regs
[1]);
4442 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4444 gen_helper_sal(cpu_env
, regs
[1]);
4448 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4450 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4454 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4456 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4457 gen_op_movi_cc(s
, 3);
4461 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4463 /* The instruction is suppressed if not provided. */
4467 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4469 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4474 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4476 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4481 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4483 gen_helper_stcrw(cpu_env
, o
->in2
);
4488 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4490 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4495 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4497 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4502 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4504 gen_helper_chsc(cpu_env
, o
->in2
);
4509 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4511 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4512 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4516 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4518 uint64_t i2
= get_field(s
, i2
);
4521 /* It is important to do what the instruction name says: STORE THEN.
4522 If we let the output hook perform the store then if we fault and
4523 restart, we'll have the wrong SYSTEM MASK in place. */
4524 t
= tcg_temp_new_i64();
4525 tcg_gen_shri_i64(t
, psw_mask
, 56);
4526 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4527 tcg_temp_free_i64(t
);
4529 if (s
->fields
.op
== 0xac) {
4530 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4531 (i2
<< 56) | 0x00ffffffffffffffull
);
4533 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4536 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4537 return DISAS_PC_STALE_NOCHAIN
;
4540 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4542 tcg_gen_qemu_st_tl(o
->in1
, o
->in2
, MMU_REAL_IDX
, s
->insn
->data
);
4544 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
4546 gen_helper_per_store_real(cpu_env
);
4552 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4554 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4559 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4561 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4565 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4567 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4571 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4573 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4577 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4579 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4583 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4585 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4586 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4587 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4588 tcg_temp_free_i32(r1
);
4589 tcg_temp_free_i32(r3
);
4593 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4595 int m3
= get_field(s
, m3
);
4596 int pos
, base
= s
->insn
->data
;
4597 TCGv_i64 tmp
= tcg_temp_new_i64();
4599 pos
= base
+ ctz32(m3
) * 8;
4602 /* Effectively a 32-bit store. */
4603 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4604 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4610 /* Effectively a 16-bit store. */
4611 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4612 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4619 /* Effectively an 8-bit store. */
4620 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4621 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4625 /* This is going to be a sequence of shifts and stores. */
4626 pos
= base
+ 32 - 8;
4629 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4630 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4631 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4633 m3
= (m3
<< 1) & 0xf;
4638 tcg_temp_free_i64(tmp
);
4642 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4644 int r1
= get_field(s
, r1
);
4645 int r3
= get_field(s
, r3
);
4646 int size
= s
->insn
->data
;
4647 TCGv_i64 tsize
= tcg_const_i64(size
);
4651 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4653 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4658 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4662 tcg_temp_free_i64(tsize
);
4666 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4668 int r1
= get_field(s
, r1
);
4669 int r3
= get_field(s
, r3
);
4670 TCGv_i64 t
= tcg_temp_new_i64();
4671 TCGv_i64 t4
= tcg_const_i64(4);
4672 TCGv_i64 t32
= tcg_const_i64(32);
4675 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4676 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4680 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4684 tcg_temp_free_i64(t
);
4685 tcg_temp_free_i64(t4
);
4686 tcg_temp_free_i64(t32
);
4690 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4692 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4693 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4694 } else if (HAVE_ATOMIC128
) {
4695 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4697 gen_helper_exit_atomic(cpu_env
);
4698 return DISAS_NORETURN
;
4703 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4705 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4706 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4708 gen_helper_srst(cpu_env
, r1
, r2
);
4710 tcg_temp_free_i32(r1
);
4711 tcg_temp_free_i32(r2
);
4716 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4718 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4719 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4721 gen_helper_srstu(cpu_env
, r1
, r2
);
4723 tcg_temp_free_i32(r1
);
4724 tcg_temp_free_i32(r2
);
4729 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4731 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4735 static DisasJumpType
op_subu64(DisasContext
*s
, DisasOps
*o
)
4737 tcg_gen_movi_i64(cc_src
, 0);
4738 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
4742 /* Compute borrow (0, -1) into cc_src. */
4743 static void compute_borrow(DisasContext
*s
)
4747 /* The borrow value is already in cc_src (0,-1). */
4753 /* The carry flag is the msb of CC; compute into cc_src. */
4754 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
4755 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
4758 /* Convert carry (1,0) to borrow (0,-1). */
4759 tcg_gen_subi_i64(cc_src
, cc_src
, 1);
4764 static DisasJumpType
op_subb32(DisasContext
*s
, DisasOps
*o
)
4768 /* Borrow is {0, -1}, so add to subtract. */
4769 tcg_gen_add_i64(o
->out
, o
->in1
, cc_src
);
4770 tcg_gen_sub_i64(o
->out
, o
->out
, o
->in2
);
4774 static DisasJumpType
op_subb64(DisasContext
*s
, DisasOps
*o
)
4779 * Borrow is {0, -1}, so add to subtract; replicate the
4780 * borrow input to produce 128-bit -1 for the addition.
4782 TCGv_i64 zero
= tcg_const_i64(0);
4783 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, cc_src
);
4784 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
4785 tcg_temp_free_i64(zero
);
4790 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4797 t
= tcg_const_i32(get_field(s
, i1
) & 0xff);
4798 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4799 tcg_temp_free_i32(t
);
4801 t
= tcg_const_i32(s
->ilen
);
4802 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4803 tcg_temp_free_i32(t
);
4805 gen_exception(EXCP_SVC
);
4806 return DISAS_NORETURN
;
4809 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4813 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4814 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4815 gen_op_movi_cc(s
, cc
);
4819 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4821 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4826 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4828 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4833 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4835 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4840 #ifndef CONFIG_USER_ONLY
4842 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4844 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4849 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4851 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4858 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4860 TCGv_i32 l1
= tcg_const_i32(get_field(s
, l1
) + 1);
4861 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4862 tcg_temp_free_i32(l1
);
4867 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4869 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4870 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4871 tcg_temp_free_i32(l
);
4876 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4878 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4879 return_low128(o
->out2
);
4884 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4886 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4887 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4888 tcg_temp_free_i32(l
);
4893 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4895 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4896 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4897 tcg_temp_free_i32(l
);
4902 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4904 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4905 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4906 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4907 TCGv_i32 tst
= tcg_temp_new_i32();
4908 int m3
= get_field(s
, m3
);
4910 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4914 tcg_gen_movi_i32(tst
, -1);
4916 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4917 if (s
->insn
->opc
& 3) {
4918 tcg_gen_ext8u_i32(tst
, tst
);
4920 tcg_gen_ext16u_i32(tst
, tst
);
4923 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4925 tcg_temp_free_i32(r1
);
4926 tcg_temp_free_i32(r2
);
4927 tcg_temp_free_i32(sizes
);
4928 tcg_temp_free_i32(tst
);
4933 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4935 TCGv_i32 t1
= tcg_const_i32(0xff);
4936 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4937 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4938 tcg_temp_free_i32(t1
);
4943 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4945 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4946 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4947 tcg_temp_free_i32(l
);
4951 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4953 int l1
= get_field(s
, l1
) + 1;
4956 /* The length must not exceed 32 bytes. */
4958 gen_program_exception(s
, PGM_SPECIFICATION
);
4959 return DISAS_NORETURN
;
4961 l
= tcg_const_i32(l1
);
4962 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4963 tcg_temp_free_i32(l
);
4968 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4970 int l1
= get_field(s
, l1
) + 1;
4973 /* The length must be even and should not exceed 64 bytes. */
4974 if ((l1
& 1) || (l1
> 64)) {
4975 gen_program_exception(s
, PGM_SPECIFICATION
);
4976 return DISAS_NORETURN
;
4978 l
= tcg_const_i32(l1
);
4979 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4980 tcg_temp_free_i32(l
);
4986 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4988 int d1
= get_field(s
, d1
);
4989 int d2
= get_field(s
, d2
);
4990 int b1
= get_field(s
, b1
);
4991 int b2
= get_field(s
, b2
);
4992 int l
= get_field(s
, l1
);
4995 o
->addr1
= get_address(s
, 0, b1
, d1
);
4997 /* If the addresses are identical, this is a store/memset of zero. */
4998 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4999 o
->in2
= tcg_const_i64(0);
5003 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
5006 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
5010 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
5013 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
5017 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
5020 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
5024 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
5026 gen_op_movi_cc(s
, 0);
5030 /* But in general we'll defer to a helper. */
5031 o
->in2
= get_address(s
, 0, b2
, d2
);
5032 t32
= tcg_const_i32(l
);
5033 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
5034 tcg_temp_free_i32(t32
);
5039 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
5041 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5045 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
5047 int shift
= s
->insn
->data
& 0xff;
5048 int size
= s
->insn
->data
>> 8;
5049 uint64_t mask
= ((1ull << size
) - 1) << shift
;
5052 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
5053 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5055 /* Produce the CC from only the bits manipulated. */
5056 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
5057 set_cc_nz_u64(s
, cc_dst
);
5061 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
5063 o
->in1
= tcg_temp_new_i64();
5065 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5066 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5068 /* Perform the atomic operation in memory. */
5069 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
5073 /* Recompute also for atomic case: needed for setting CC. */
5074 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5076 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5077 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5082 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
5084 o
->out
= tcg_const_i64(0);
5088 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
5090 o
->out
= tcg_const_i64(0);
5096 #ifndef CONFIG_USER_ONLY
5097 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
5099 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5101 gen_helper_clp(cpu_env
, r2
);
5102 tcg_temp_free_i32(r2
);
5107 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
5109 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5110 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5112 gen_helper_pcilg(cpu_env
, r1
, r2
);
5113 tcg_temp_free_i32(r1
);
5114 tcg_temp_free_i32(r2
);
5119 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
5121 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5122 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5124 gen_helper_pcistg(cpu_env
, r1
, r2
);
5125 tcg_temp_free_i32(r1
);
5126 tcg_temp_free_i32(r2
);
5131 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
5133 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5134 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5136 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5137 tcg_temp_free_i32(ar
);
5138 tcg_temp_free_i32(r1
);
5143 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
5145 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
5149 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
5151 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5152 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5154 gen_helper_rpcit(cpu_env
, r1
, r2
);
5155 tcg_temp_free_i32(r1
);
5156 tcg_temp_free_i32(r2
);
5161 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
5163 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5164 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
5165 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5167 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
5168 tcg_temp_free_i32(ar
);
5169 tcg_temp_free_i32(r1
);
5170 tcg_temp_free_i32(r3
);
5175 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
5177 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5178 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5180 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5181 tcg_temp_free_i32(ar
);
5182 tcg_temp_free_i32(r1
);
5188 #include "translate_vx.c.inc"
5190 /* ====================================================================== */
5191 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5192 the original inputs), update the various cc data structures in order to
5193 be able to compute the new condition code. */
5195 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
5197 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
5200 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
5202 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
5205 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
5207 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
5210 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
5212 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
5215 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
5217 tcg_gen_shri_i64(cc_src
, o
->out
, 32);
5218 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5219 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, cc_dst
);
5222 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
5224 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, o
->out
);
5227 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
5229 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
5232 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
5234 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
5237 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
5239 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
5242 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
5244 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
5247 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
5249 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
5252 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
5254 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
5257 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
5259 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
5262 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
5264 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
5267 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
5269 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
5272 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
5274 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
5277 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
5279 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
5282 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
5284 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5285 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
5288 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5290 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5293 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5295 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5298 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5300 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5303 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5305 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5308 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5310 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5313 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5315 tcg_gen_sari_i64(cc_src
, o
->out
, 32);
5316 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5317 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, cc_dst
);
5320 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5322 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, o
->out
);
5325 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5327 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5330 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5332 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5335 static void cout_muls32(DisasContext
*s
, DisasOps
*o
)
5337 gen_op_update1_cc_i64(s
, CC_OP_MULS_32
, o
->out
);
5340 static void cout_muls64(DisasContext
*s
, DisasOps
*o
)
5342 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5343 gen_op_update2_cc_i64(s
, CC_OP_MULS_64
, o
->out
, o
->out2
);
5346 /* ====================================================================== */
5347 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5348 with the TCG register to which we will write. Used in combination with
5349 the "wout" generators, in some cases we need a new temporary, and in
5350 some cases we can write to a TCG global. */
5352 static void prep_new(DisasContext
*s
, DisasOps
*o
)
5354 o
->out
= tcg_temp_new_i64();
5356 #define SPEC_prep_new 0
5358 static void prep_new_P(DisasContext
*s
, DisasOps
*o
)
5360 o
->out
= tcg_temp_new_i64();
5361 o
->out2
= tcg_temp_new_i64();
5363 #define SPEC_prep_new_P 0
5365 static void prep_r1(DisasContext
*s
, DisasOps
*o
)
5367 o
->out
= regs
[get_field(s
, r1
)];
5370 #define SPEC_prep_r1 0
5372 static void prep_r1_P(DisasContext
*s
, DisasOps
*o
)
5374 int r1
= get_field(s
, r1
);
5376 o
->out2
= regs
[r1
+ 1];
5377 o
->g_out
= o
->g_out2
= true;
5379 #define SPEC_prep_r1_P SPEC_r1_even
5381 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5382 static void prep_x1(DisasContext
*s
, DisasOps
*o
)
5384 o
->out
= load_freg(get_field(s
, r1
));
5385 o
->out2
= load_freg(get_field(s
, r1
) + 2);
5387 #define SPEC_prep_x1 SPEC_r1_f128
5389 /* ====================================================================== */
5390 /* The "Write OUTput" generators. These generally perform some non-trivial
5391 copy of data to TCG globals, or to main memory. The trivial cases are
5392 generally handled by having a "prep" generator install the TCG global
5393 as the destination of the operation. */
5395 static void wout_r1(DisasContext
*s
, DisasOps
*o
)
5397 store_reg(get_field(s
, r1
), o
->out
);
5399 #define SPEC_wout_r1 0
5401 static void wout_out2_r1(DisasContext
*s
, DisasOps
*o
)
5403 store_reg(get_field(s
, r1
), o
->out2
);
5405 #define SPEC_wout_out2_r1 0
5407 static void wout_r1_8(DisasContext
*s
, DisasOps
*o
)
5409 int r1
= get_field(s
, r1
);
5410 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5412 #define SPEC_wout_r1_8 0
5414 static void wout_r1_16(DisasContext
*s
, DisasOps
*o
)
5416 int r1
= get_field(s
, r1
);
5417 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5419 #define SPEC_wout_r1_16 0
5421 static void wout_r1_32(DisasContext
*s
, DisasOps
*o
)
5423 store_reg32_i64(get_field(s
, r1
), o
->out
);
5425 #define SPEC_wout_r1_32 0
5427 static void wout_r1_32h(DisasContext
*s
, DisasOps
*o
)
5429 store_reg32h_i64(get_field(s
, r1
), o
->out
);
5431 #define SPEC_wout_r1_32h 0
5433 static void wout_r1_P32(DisasContext
*s
, DisasOps
*o
)
5435 int r1
= get_field(s
, r1
);
5436 store_reg32_i64(r1
, o
->out
);
5437 store_reg32_i64(r1
+ 1, o
->out2
);
5439 #define SPEC_wout_r1_P32 SPEC_r1_even
5441 static void wout_r1_D32(DisasContext
*s
, DisasOps
*o
)
5443 int r1
= get_field(s
, r1
);
5444 store_reg32_i64(r1
+ 1, o
->out
);
5445 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5446 store_reg32_i64(r1
, o
->out
);
5448 #define SPEC_wout_r1_D32 SPEC_r1_even
5450 static void wout_r3_P32(DisasContext
*s
, DisasOps
*o
)
5452 int r3
= get_field(s
, r3
);
5453 store_reg32_i64(r3
, o
->out
);
5454 store_reg32_i64(r3
+ 1, o
->out2
);
5456 #define SPEC_wout_r3_P32 SPEC_r3_even
5458 static void wout_r3_P64(DisasContext
*s
, DisasOps
*o
)
5460 int r3
= get_field(s
, r3
);
5461 store_reg(r3
, o
->out
);
5462 store_reg(r3
+ 1, o
->out2
);
5464 #define SPEC_wout_r3_P64 SPEC_r3_even
5466 static void wout_e1(DisasContext
*s
, DisasOps
*o
)
5468 store_freg32_i64(get_field(s
, r1
), o
->out
);
5470 #define SPEC_wout_e1 0
5472 static void wout_f1(DisasContext
*s
, DisasOps
*o
)
5474 store_freg(get_field(s
, r1
), o
->out
);
5476 #define SPEC_wout_f1 0
5478 static void wout_x1(DisasContext
*s
, DisasOps
*o
)
5480 int f1
= get_field(s
, r1
);
5481 store_freg(f1
, o
->out
);
5482 store_freg(f1
+ 2, o
->out2
);
5484 #define SPEC_wout_x1 SPEC_r1_f128
5486 static void wout_cond_r1r2_32(DisasContext
*s
, DisasOps
*o
)
5488 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5489 store_reg32_i64(get_field(s
, r1
), o
->out
);
5492 #define SPEC_wout_cond_r1r2_32 0
5494 static void wout_cond_e1e2(DisasContext
*s
, DisasOps
*o
)
5496 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5497 store_freg32_i64(get_field(s
, r1
), o
->out
);
5500 #define SPEC_wout_cond_e1e2 0
5502 static void wout_m1_8(DisasContext
*s
, DisasOps
*o
)
5504 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5506 #define SPEC_wout_m1_8 0
5508 static void wout_m1_16(DisasContext
*s
, DisasOps
*o
)
5510 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5512 #define SPEC_wout_m1_16 0
5514 #ifndef CONFIG_USER_ONLY
5515 static void wout_m1_16a(DisasContext
*s
, DisasOps
*o
)
5517 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5519 #define SPEC_wout_m1_16a 0
5522 static void wout_m1_32(DisasContext
*s
, DisasOps
*o
)
5524 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5526 #define SPEC_wout_m1_32 0
5528 #ifndef CONFIG_USER_ONLY
5529 static void wout_m1_32a(DisasContext
*s
, DisasOps
*o
)
5531 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5533 #define SPEC_wout_m1_32a 0
5536 static void wout_m1_64(DisasContext
*s
, DisasOps
*o
)
5538 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5540 #define SPEC_wout_m1_64 0
5542 #ifndef CONFIG_USER_ONLY
5543 static void wout_m1_64a(DisasContext
*s
, DisasOps
*o
)
5545 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5547 #define SPEC_wout_m1_64a 0
5550 static void wout_m2_32(DisasContext
*s
, DisasOps
*o
)
5552 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5554 #define SPEC_wout_m2_32 0
5556 static void wout_in2_r1(DisasContext
*s
, DisasOps
*o
)
5558 store_reg(get_field(s
, r1
), o
->in2
);
5560 #define SPEC_wout_in2_r1 0
5562 static void wout_in2_r1_32(DisasContext
*s
, DisasOps
*o
)
5564 store_reg32_i64(get_field(s
, r1
), o
->in2
);
5566 #define SPEC_wout_in2_r1_32 0
5568 /* ====================================================================== */
5569 /* The "INput 1" generators. These load the first operand to an insn. */
5571 static void in1_r1(DisasContext
*s
, DisasOps
*o
)
5573 o
->in1
= load_reg(get_field(s
, r1
));
5575 #define SPEC_in1_r1 0
5577 static void in1_r1_o(DisasContext
*s
, DisasOps
*o
)
5579 o
->in1
= regs
[get_field(s
, r1
)];
5582 #define SPEC_in1_r1_o 0
5584 static void in1_r1_32s(DisasContext
*s
, DisasOps
*o
)
5586 o
->in1
= tcg_temp_new_i64();
5587 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5589 #define SPEC_in1_r1_32s 0
5591 static void in1_r1_32u(DisasContext
*s
, DisasOps
*o
)
5593 o
->in1
= tcg_temp_new_i64();
5594 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5596 #define SPEC_in1_r1_32u 0
5598 static void in1_r1_sr32(DisasContext
*s
, DisasOps
*o
)
5600 o
->in1
= tcg_temp_new_i64();
5601 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r1
)], 32);
5603 #define SPEC_in1_r1_sr32 0
5605 static void in1_r1p1(DisasContext
*s
, DisasOps
*o
)
5607 o
->in1
= load_reg(get_field(s
, r1
) + 1);
5609 #define SPEC_in1_r1p1 SPEC_r1_even
5611 static void in1_r1p1_o(DisasContext
*s
, DisasOps
*o
)
5613 o
->in1
= regs
[get_field(s
, r1
) + 1];
5616 #define SPEC_in1_r1p1_o SPEC_r1_even
5618 static void in1_r1p1_32s(DisasContext
*s
, DisasOps
*o
)
5620 o
->in1
= tcg_temp_new_i64();
5621 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5623 #define SPEC_in1_r1p1_32s SPEC_r1_even
5625 static void in1_r1p1_32u(DisasContext
*s
, DisasOps
*o
)
5627 o
->in1
= tcg_temp_new_i64();
5628 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5630 #define SPEC_in1_r1p1_32u SPEC_r1_even
5632 static void in1_r1_D32(DisasContext
*s
, DisasOps
*o
)
5634 int r1
= get_field(s
, r1
);
5635 o
->in1
= tcg_temp_new_i64();
5636 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5638 #define SPEC_in1_r1_D32 SPEC_r1_even
5640 static void in1_r2(DisasContext
*s
, DisasOps
*o
)
5642 o
->in1
= load_reg(get_field(s
, r2
));
5644 #define SPEC_in1_r2 0
5646 static void in1_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5648 o
->in1
= tcg_temp_new_i64();
5649 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r2
)], 32);
5651 #define SPEC_in1_r2_sr32 0
5653 static void in1_r2_32u(DisasContext
*s
, DisasOps
*o
)
5655 o
->in1
= tcg_temp_new_i64();
5656 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r2
)]);
5658 #define SPEC_in1_r2_32u 0
5660 static void in1_r3(DisasContext
*s
, DisasOps
*o
)
5662 o
->in1
= load_reg(get_field(s
, r3
));
5664 #define SPEC_in1_r3 0
5666 static void in1_r3_o(DisasContext
*s
, DisasOps
*o
)
5668 o
->in1
= regs
[get_field(s
, r3
)];
5671 #define SPEC_in1_r3_o 0
5673 static void in1_r3_32s(DisasContext
*s
, DisasOps
*o
)
5675 o
->in1
= tcg_temp_new_i64();
5676 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5678 #define SPEC_in1_r3_32s 0
5680 static void in1_r3_32u(DisasContext
*s
, DisasOps
*o
)
5682 o
->in1
= tcg_temp_new_i64();
5683 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5685 #define SPEC_in1_r3_32u 0
5687 static void in1_r3_D32(DisasContext
*s
, DisasOps
*o
)
5689 int r3
= get_field(s
, r3
);
5690 o
->in1
= tcg_temp_new_i64();
5691 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5693 #define SPEC_in1_r3_D32 SPEC_r3_even
5695 static void in1_e1(DisasContext
*s
, DisasOps
*o
)
5697 o
->in1
= load_freg32_i64(get_field(s
, r1
));
5699 #define SPEC_in1_e1 0
5701 static void in1_f1(DisasContext
*s
, DisasOps
*o
)
5703 o
->in1
= load_freg(get_field(s
, r1
));
5705 #define SPEC_in1_f1 0
5707 /* Load the high double word of an extended (128-bit) format FP number */
5708 static void in1_x2h(DisasContext
*s
, DisasOps
*o
)
5710 o
->in1
= load_freg(get_field(s
, r2
));
5712 #define SPEC_in1_x2h SPEC_r2_f128
5714 static void in1_f3(DisasContext
*s
, DisasOps
*o
)
5716 o
->in1
= load_freg(get_field(s
, r3
));
5718 #define SPEC_in1_f3 0
5720 static void in1_la1(DisasContext
*s
, DisasOps
*o
)
5722 o
->addr1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
5724 #define SPEC_in1_la1 0
5726 static void in1_la2(DisasContext
*s
, DisasOps
*o
)
5728 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5729 o
->addr1
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5731 #define SPEC_in1_la2 0
5733 static void in1_m1_8u(DisasContext
*s
, DisasOps
*o
)
5736 o
->in1
= tcg_temp_new_i64();
5737 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5739 #define SPEC_in1_m1_8u 0
5741 static void in1_m1_16s(DisasContext
*s
, DisasOps
*o
)
5744 o
->in1
= tcg_temp_new_i64();
5745 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5747 #define SPEC_in1_m1_16s 0
5749 static void in1_m1_16u(DisasContext
*s
, DisasOps
*o
)
5752 o
->in1
= tcg_temp_new_i64();
5753 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5755 #define SPEC_in1_m1_16u 0
5757 static void in1_m1_32s(DisasContext
*s
, DisasOps
*o
)
5760 o
->in1
= tcg_temp_new_i64();
5761 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5763 #define SPEC_in1_m1_32s 0
5765 static void in1_m1_32u(DisasContext
*s
, DisasOps
*o
)
5768 o
->in1
= tcg_temp_new_i64();
5769 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5771 #define SPEC_in1_m1_32u 0
5773 static void in1_m1_64(DisasContext
*s
, DisasOps
*o
)
5776 o
->in1
= tcg_temp_new_i64();
5777 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5779 #define SPEC_in1_m1_64 0
5781 /* ====================================================================== */
5782 /* The "INput 2" generators. These load the second operand to an insn. */
5784 static void in2_r1_o(DisasContext
*s
, DisasOps
*o
)
5786 o
->in2
= regs
[get_field(s
, r1
)];
5789 #define SPEC_in2_r1_o 0
5791 static void in2_r1_16u(DisasContext
*s
, DisasOps
*o
)
5793 o
->in2
= tcg_temp_new_i64();
5794 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5796 #define SPEC_in2_r1_16u 0
5798 static void in2_r1_32u(DisasContext
*s
, DisasOps
*o
)
5800 o
->in2
= tcg_temp_new_i64();
5801 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5803 #define SPEC_in2_r1_32u 0
5805 static void in2_r1_D32(DisasContext
*s
, DisasOps
*o
)
5807 int r1
= get_field(s
, r1
);
5808 o
->in2
= tcg_temp_new_i64();
5809 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5811 #define SPEC_in2_r1_D32 SPEC_r1_even
5813 static void in2_r2(DisasContext
*s
, DisasOps
*o
)
5815 o
->in2
= load_reg(get_field(s
, r2
));
5817 #define SPEC_in2_r2 0
5819 static void in2_r2_o(DisasContext
*s
, DisasOps
*o
)
5821 o
->in2
= regs
[get_field(s
, r2
)];
5824 #define SPEC_in2_r2_o 0
5826 static void in2_r2_nz(DisasContext
*s
, DisasOps
*o
)
5828 int r2
= get_field(s
, r2
);
5830 o
->in2
= load_reg(r2
);
5833 #define SPEC_in2_r2_nz 0
5835 static void in2_r2_8s(DisasContext
*s
, DisasOps
*o
)
5837 o
->in2
= tcg_temp_new_i64();
5838 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5840 #define SPEC_in2_r2_8s 0
5842 static void in2_r2_8u(DisasContext
*s
, DisasOps
*o
)
5844 o
->in2
= tcg_temp_new_i64();
5845 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5847 #define SPEC_in2_r2_8u 0
5849 static void in2_r2_16s(DisasContext
*s
, DisasOps
*o
)
5851 o
->in2
= tcg_temp_new_i64();
5852 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5854 #define SPEC_in2_r2_16s 0
5856 static void in2_r2_16u(DisasContext
*s
, DisasOps
*o
)
5858 o
->in2
= tcg_temp_new_i64();
5859 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5861 #define SPEC_in2_r2_16u 0
5863 static void in2_r3(DisasContext
*s
, DisasOps
*o
)
5865 o
->in2
= load_reg(get_field(s
, r3
));
5867 #define SPEC_in2_r3 0
5869 static void in2_r3_sr32(DisasContext
*s
, DisasOps
*o
)
5871 o
->in2
= tcg_temp_new_i64();
5872 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r3
)], 32);
5874 #define SPEC_in2_r3_sr32 0
5876 static void in2_r3_32u(DisasContext
*s
, DisasOps
*o
)
5878 o
->in2
= tcg_temp_new_i64();
5879 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r3
)]);
5881 #define SPEC_in2_r3_32u 0
5883 static void in2_r2_32s(DisasContext
*s
, DisasOps
*o
)
5885 o
->in2
= tcg_temp_new_i64();
5886 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5888 #define SPEC_in2_r2_32s 0
5890 static void in2_r2_32u(DisasContext
*s
, DisasOps
*o
)
5892 o
->in2
= tcg_temp_new_i64();
5893 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5895 #define SPEC_in2_r2_32u 0
5897 static void in2_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5899 o
->in2
= tcg_temp_new_i64();
5900 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r2
)], 32);
5902 #define SPEC_in2_r2_sr32 0
5904 static void in2_e2(DisasContext
*s
, DisasOps
*o
)
5906 o
->in2
= load_freg32_i64(get_field(s
, r2
));
5908 #define SPEC_in2_e2 0
5910 static void in2_f2(DisasContext
*s
, DisasOps
*o
)
5912 o
->in2
= load_freg(get_field(s
, r2
));
5914 #define SPEC_in2_f2 0
5916 /* Load the low double word of an extended (128-bit) format FP number */
5917 static void in2_x2l(DisasContext
*s
, DisasOps
*o
)
5919 o
->in2
= load_freg(get_field(s
, r2
) + 2);
5921 #define SPEC_in2_x2l SPEC_r2_f128
5923 static void in2_ra2(DisasContext
*s
, DisasOps
*o
)
5925 int r2
= get_field(s
, r2
);
5927 /* Note: *don't* treat !r2 as 0, use the reg value. */
5928 o
->in2
= tcg_temp_new_i64();
5929 gen_addi_and_wrap_i64(s
, o
->in2
, regs
[r2
], 0);
5931 #define SPEC_in2_ra2 0
5933 static void in2_a2(DisasContext
*s
, DisasOps
*o
)
5935 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5936 o
->in2
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5938 #define SPEC_in2_a2 0
5940 static void in2_ri2(DisasContext
*s
, DisasOps
*o
)
5942 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(s
, i2
) * 2);
5944 #define SPEC_in2_ri2 0
5946 static void in2_sh32(DisasContext
*s
, DisasOps
*o
)
5948 help_l2_shift(s
, o
, 31);
5950 #define SPEC_in2_sh32 0
5952 static void in2_sh64(DisasContext
*s
, DisasOps
*o
)
5954 help_l2_shift(s
, o
, 63);
5956 #define SPEC_in2_sh64 0
5958 static void in2_m2_8u(DisasContext
*s
, DisasOps
*o
)
5961 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5963 #define SPEC_in2_m2_8u 0
5965 static void in2_m2_16s(DisasContext
*s
, DisasOps
*o
)
5968 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5970 #define SPEC_in2_m2_16s 0
5972 static void in2_m2_16u(DisasContext
*s
, DisasOps
*o
)
5975 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5977 #define SPEC_in2_m2_16u 0
5979 static void in2_m2_32s(DisasContext
*s
, DisasOps
*o
)
5982 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5984 #define SPEC_in2_m2_32s 0
5986 static void in2_m2_32u(DisasContext
*s
, DisasOps
*o
)
5989 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5991 #define SPEC_in2_m2_32u 0
5993 #ifndef CONFIG_USER_ONLY
5994 static void in2_m2_32ua(DisasContext
*s
, DisasOps
*o
)
5997 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5999 #define SPEC_in2_m2_32ua 0
6002 static void in2_m2_64(DisasContext
*s
, DisasOps
*o
)
6005 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6007 #define SPEC_in2_m2_64 0
6009 static void in2_m2_64w(DisasContext
*s
, DisasOps
*o
)
6012 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6013 gen_addi_and_wrap_i64(s
, o
->in2
, o
->in2
, 0);
6015 #define SPEC_in2_m2_64w 0
6017 #ifndef CONFIG_USER_ONLY
6018 static void in2_m2_64a(DisasContext
*s
, DisasOps
*o
)
6021 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
6023 #define SPEC_in2_m2_64a 0
6026 static void in2_mri2_16u(DisasContext
*s
, DisasOps
*o
)
6029 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
6031 #define SPEC_in2_mri2_16u 0
6033 static void in2_mri2_32s(DisasContext
*s
, DisasOps
*o
)
6036 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
6038 #define SPEC_in2_mri2_32s 0
6040 static void in2_mri2_32u(DisasContext
*s
, DisasOps
*o
)
6043 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
6045 #define SPEC_in2_mri2_32u 0
6047 static void in2_mri2_64(DisasContext
*s
, DisasOps
*o
)
6050 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6052 #define SPEC_in2_mri2_64 0
6054 static void in2_i2(DisasContext
*s
, DisasOps
*o
)
6056 o
->in2
= tcg_const_i64(get_field(s
, i2
));
6058 #define SPEC_in2_i2 0
6060 static void in2_i2_8u(DisasContext
*s
, DisasOps
*o
)
6062 o
->in2
= tcg_const_i64((uint8_t)get_field(s
, i2
));
6064 #define SPEC_in2_i2_8u 0
6066 static void in2_i2_16u(DisasContext
*s
, DisasOps
*o
)
6068 o
->in2
= tcg_const_i64((uint16_t)get_field(s
, i2
));
6070 #define SPEC_in2_i2_16u 0
6072 static void in2_i2_32u(DisasContext
*s
, DisasOps
*o
)
6074 o
->in2
= tcg_const_i64((uint32_t)get_field(s
, i2
));
6076 #define SPEC_in2_i2_32u 0
6078 static void in2_i2_16u_shl(DisasContext
*s
, DisasOps
*o
)
6080 uint64_t i2
= (uint16_t)get_field(s
, i2
);
6081 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
6083 #define SPEC_in2_i2_16u_shl 0
6085 static void in2_i2_32u_shl(DisasContext
*s
, DisasOps
*o
)
6087 uint64_t i2
= (uint32_t)get_field(s
, i2
);
6088 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
6090 #define SPEC_in2_i2_32u_shl 0
6092 #ifndef CONFIG_USER_ONLY
6093 static void in2_insn(DisasContext
*s
, DisasOps
*o
)
6095 o
->in2
= tcg_const_i64(s
->fields
.raw_insn
);
6097 #define SPEC_in2_insn 0
6100 /* ====================================================================== */
6102 /* Find opc within the table of insns. This is formulated as a switch
6103 statement so that (1) we get compile-time notice of cut-paste errors
6104 for duplicated opcodes, and (2) the compiler generates the binary
6105 search tree, rather than us having to post-process the table. */
6107 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6108 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6110 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6111 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6113 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6114 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6116 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6118 enum DisasInsnEnum
{
6119 #include "insn-data.def"
6123 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6128 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6130 .help_in1 = in1_##I1, \
6131 .help_in2 = in2_##I2, \
6132 .help_prep = prep_##P, \
6133 .help_wout = wout_##W, \
6134 .help_cout = cout_##CC, \
6135 .help_op = op_##OP, \
6139 /* Allow 0 to be used for NULL in the table below. */
6147 #define SPEC_in1_0 0
6148 #define SPEC_in2_0 0
6149 #define SPEC_prep_0 0
6150 #define SPEC_wout_0 0
6152 /* Give smaller names to the various facilities. */
6153 #define FAC_Z S390_FEAT_ZARCH
6154 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6155 #define FAC_DFP S390_FEAT_DFP
6156 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6157 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6158 #define FAC_EE S390_FEAT_EXECUTE_EXT
6159 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6160 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6161 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6162 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6163 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6164 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6165 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6166 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6167 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6168 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6169 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6170 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6171 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6172 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6173 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6174 #define FAC_SFLE S390_FEAT_STFLE
6175 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6176 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6177 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6178 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6179 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6180 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6181 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6182 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6183 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6184 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6185 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6186 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6187 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6188 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6189 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6190 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6191 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6192 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6193 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6194 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6196 static const DisasInsn insn_info
[] = {
6197 #include "insn-data.def"
6201 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6202 case OPC: return &insn_info[insn_ ## NM];
6204 static const DisasInsn
*lookup_opc(uint16_t opc
)
6207 #include "insn-data.def"
6218 /* Extract a field from the insn. The INSN should be left-aligned in
6219 the uint64_t so that we can more easily utilize the big-bit-endian
6220 definitions we extract from the Principals of Operation. */
6222 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
6230 /* Zero extract the field from the insn. */
6231 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
6233 /* Sign-extend, or un-swap the field as necessary. */
6235 case 0: /* unsigned */
6237 case 1: /* signed */
6238 assert(f
->size
<= 32);
6239 m
= 1u << (f
->size
- 1);
6242 case 2: /* dl+dh split, signed 20 bit. */
6243 r
= ((int8_t)r
<< 12) | (r
>> 8);
6245 case 3: /* MSB stored in RXB */
6246 g_assert(f
->size
== 4);
6249 r
|= extract64(insn
, 63 - 36, 1) << 4;
6252 r
|= extract64(insn
, 63 - 37, 1) << 4;
6255 r
|= extract64(insn
, 63 - 38, 1) << 4;
6258 r
|= extract64(insn
, 63 - 39, 1) << 4;
6261 g_assert_not_reached();
6268 /* Validate that the "compressed" encoding we selected above is valid.
6269 I.e. we havn't make two different original fields overlap. */
6270 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
6271 o
->presentC
|= 1 << f
->indexC
;
6272 o
->presentO
|= 1 << f
->indexO
;
6274 o
->c
[f
->indexC
] = r
;
6277 /* Lookup the insn at the current PC, extracting the operands into O and
6278 returning the info struct for the insn. Returns NULL for invalid insn. */
6280 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
)
6282 uint64_t insn
, pc
= s
->base
.pc_next
;
6284 const DisasInsn
*info
;
6286 if (unlikely(s
->ex_value
)) {
6287 /* Drop the EX data now, so that it's clear on exception paths. */
6288 TCGv_i64 zero
= tcg_const_i64(0);
6289 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
6290 tcg_temp_free_i64(zero
);
6292 /* Extract the values saved by EXECUTE. */
6293 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
6294 ilen
= s
->ex_value
& 0xf;
6297 insn
= ld_code2(env
, pc
);
6298 op
= (insn
>> 8) & 0xff;
6299 ilen
= get_ilen(op
);
6305 insn
= ld_code4(env
, pc
) << 32;
6308 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
6311 g_assert_not_reached();
6314 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
6317 /* We can't actually determine the insn format until we've looked up
6318 the full insn opcode. Which we can't do without locating the
6319 secondary opcode. Assume by default that OP2 is at bit 40; for
6320 those smaller insns that don't actually have a secondary opcode
6321 this will correctly result in OP2 = 0. */
6327 case 0xb2: /* S, RRF, RRE, IE */
6328 case 0xb3: /* RRE, RRD, RRF */
6329 case 0xb9: /* RRE, RRF */
6330 case 0xe5: /* SSE, SIL */
6331 op2
= (insn
<< 8) >> 56;
6335 case 0xc0: /* RIL */
6336 case 0xc2: /* RIL */
6337 case 0xc4: /* RIL */
6338 case 0xc6: /* RIL */
6339 case 0xc8: /* SSF */
6340 case 0xcc: /* RIL */
6341 op2
= (insn
<< 12) >> 60;
6343 case 0xc5: /* MII */
6344 case 0xc7: /* SMI */
6345 case 0xd0 ... 0xdf: /* SS */
6351 case 0xee ... 0xf3: /* SS */
6352 case 0xf8 ... 0xfd: /* SS */
6356 op2
= (insn
<< 40) >> 56;
6360 memset(&s
->fields
, 0, sizeof(s
->fields
));
6361 s
->fields
.raw_insn
= insn
;
6363 s
->fields
.op2
= op2
;
6365 /* Lookup the instruction. */
6366 info
= lookup_opc(op
<< 8 | op2
);
6369 /* If we found it, extract the operands. */
6371 DisasFormat fmt
= info
->fmt
;
6374 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6375 extract_field(&s
->fields
, &format_info
[fmt
].op
[i
], insn
);
6381 static bool is_afp_reg(int reg
)
6383 return reg
% 2 || reg
> 6;
6386 static bool is_fp_pair(int reg
)
6388 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6389 return !(reg
& 0x2);
6392 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6394 const DisasInsn
*insn
;
6395 DisasJumpType ret
= DISAS_NEXT
;
6397 bool icount
= false;
6399 /* Search for the insn in the table. */
6400 insn
= extract_insn(env
, s
);
6402 /* Emit insn_start now that we know the ILEN. */
6403 tcg_gen_insn_start(s
->base
.pc_next
, s
->cc_op
, s
->ilen
);
6405 /* Not found means unimplemented/illegal opcode. */
6407 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6408 s
->fields
.op
, s
->fields
.op2
);
6409 gen_illegal_opcode(s
);
6410 return DISAS_NORETURN
;
6413 #ifndef CONFIG_USER_ONLY
6414 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6415 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6416 gen_helper_per_ifetch(cpu_env
, addr
);
6417 tcg_temp_free_i64(addr
);
6423 /* privileged instruction */
6424 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6425 gen_program_exception(s
, PGM_PRIVILEGED
);
6426 return DISAS_NORETURN
;
6429 /* if AFP is not enabled, instructions and registers are forbidden */
6430 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6433 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(s
, r1
))) {
6436 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(s
, r2
))) {
6439 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(s
, r3
))) {
6442 if (insn
->flags
& IF_BFP
) {
6445 if (insn
->flags
& IF_DFP
) {
6448 if (insn
->flags
& IF_VEC
) {
6452 gen_data_exception(dxc
);
6453 return DISAS_NORETURN
;
6457 /* if vector instructions not enabled, executing them is forbidden */
6458 if (insn
->flags
& IF_VEC
) {
6459 if (!((s
->base
.tb
->flags
& FLAG_MASK_VECTOR
))) {
6460 gen_data_exception(0xfe);
6461 return DISAS_NORETURN
;
6465 /* input/output is the special case for icount mode */
6466 if (unlikely(insn
->flags
& IF_IO
)) {
6467 icount
= tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
;
6474 /* Check for insn specification exceptions. */
6476 if ((insn
->spec
& SPEC_r1_even
&& get_field(s
, r1
) & 1) ||
6477 (insn
->spec
& SPEC_r2_even
&& get_field(s
, r2
) & 1) ||
6478 (insn
->spec
& SPEC_r3_even
&& get_field(s
, r3
) & 1) ||
6479 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(s
, r1
))) ||
6480 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(s
, r2
)))) {
6481 gen_program_exception(s
, PGM_SPECIFICATION
);
6482 return DISAS_NORETURN
;
6486 /* Implement the instruction. */
6487 if (insn
->help_in1
) {
6488 insn
->help_in1(s
, &o
);
6490 if (insn
->help_in2
) {
6491 insn
->help_in2(s
, &o
);
6493 if (insn
->help_prep
) {
6494 insn
->help_prep(s
, &o
);
6496 if (insn
->help_op
) {
6497 ret
= insn
->help_op(s
, &o
);
6499 if (ret
!= DISAS_NORETURN
) {
6500 if (insn
->help_wout
) {
6501 insn
->help_wout(s
, &o
);
6503 if (insn
->help_cout
) {
6504 insn
->help_cout(s
, &o
);
6508 /* Free any temporaries created by the helpers. */
6509 if (o
.out
&& !o
.g_out
) {
6510 tcg_temp_free_i64(o
.out
);
6512 if (o
.out2
&& !o
.g_out2
) {
6513 tcg_temp_free_i64(o
.out2
);
6515 if (o
.in1
&& !o
.g_in1
) {
6516 tcg_temp_free_i64(o
.in1
);
6518 if (o
.in2
&& !o
.g_in2
) {
6519 tcg_temp_free_i64(o
.in2
);
6522 tcg_temp_free_i64(o
.addr1
);
6525 /* io should be the last instruction in tb when icount is enabled */
6526 if (unlikely(icount
&& ret
== DISAS_NEXT
)) {
6527 ret
= DISAS_PC_STALE
;
6530 #ifndef CONFIG_USER_ONLY
6531 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6532 /* An exception might be triggered, save PSW if not already done. */
6533 if (ret
== DISAS_NEXT
|| ret
== DISAS_PC_STALE
) {
6534 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6537 /* Call the helper to check for a possible PER exception. */
6538 gen_helper_per_check_exception(cpu_env
);
6542 /* Advance to the next instruction. */
6543 s
->base
.pc_next
= s
->pc_tmp
;
6547 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6549 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6552 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6553 dc
->base
.pc_first
&= 0x7fffffff;
6554 dc
->base
.pc_next
= dc
->base
.pc_first
;
6557 dc
->cc_op
= CC_OP_DYNAMIC
;
6558 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6559 dc
->do_debug
= dc
->base
.singlestep_enabled
;
6562 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6566 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6570 static bool s390x_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
6571 const CPUBreakpoint
*bp
)
6573 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6576 * Emit an insn_start to accompany the breakpoint exception.
6577 * The ILEN value is a dummy, since this does not result in
6578 * an s390x exception, but an internal qemu exception which
6579 * brings us back to interact with the gdbstub.
6581 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
, 2);
6583 dc
->base
.is_jmp
= DISAS_PC_STALE
;
6584 dc
->do_debug
= true;
6585 /* The address covered by the breakpoint must be included in
6586 [tb->pc, tb->pc + tb->size) in order to for it to be
6587 properly cleared -- thus we increment the PC here so that
6588 the logic setting tb->size does the right thing. */
6589 dc
->base
.pc_next
+= 2;
6593 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6595 CPUS390XState
*env
= cs
->env_ptr
;
6596 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6598 dc
->base
.is_jmp
= translate_one(env
, dc
);
6599 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6600 uint64_t page_start
;
6602 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6603 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6604 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6609 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6611 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6613 switch (dc
->base
.is_jmp
) {
6615 case DISAS_NORETURN
:
6617 case DISAS_TOO_MANY
:
6618 case DISAS_PC_STALE
:
6619 case DISAS_PC_STALE_NOCHAIN
:
6620 update_psw_addr(dc
);
6622 case DISAS_PC_UPDATED
:
6623 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6624 cc op type is in env */
6627 case DISAS_PC_CC_UPDATED
:
6628 /* Exit the TB, either by raising a debug exception or by return. */
6630 gen_exception(EXCP_DEBUG
);
6631 } else if (use_exit_tb(dc
) ||
6632 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6633 tcg_gen_exit_tb(NULL
, 0);
6635 tcg_gen_lookup_and_goto_ptr();
6639 g_assert_not_reached();
6643 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
6645 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6647 if (unlikely(dc
->ex_value
)) {
6648 /* ??? Unfortunately log_target_disas can't use host memory. */
6649 qemu_log("IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6651 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6652 log_target_disas(cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6656 static const TranslatorOps s390x_tr_ops
= {
6657 .init_disas_context
= s390x_tr_init_disas_context
,
6658 .tb_start
= s390x_tr_tb_start
,
6659 .insn_start
= s390x_tr_insn_start
,
6660 .breakpoint_check
= s390x_tr_breakpoint_check
,
6661 .translate_insn
= s390x_tr_translate_insn
,
6662 .tb_stop
= s390x_tr_tb_stop
,
6663 .disas_log
= s390x_tr_disas_log
,
6666 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
6670 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
, max_insns
);
6673 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6676 int cc_op
= data
[1];
6678 env
->psw
.addr
= data
[0];
6680 /* Update the CC opcode if it is not already up-to-date. */
6681 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {
6686 env
->int_pgm_ilen
= data
[2];