4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/translator.h"
47 #include "qemu/atomic128.h"
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext
;
52 typedef struct DisasInsn DisasInsn
;
53 typedef struct DisasFields DisasFields
;
56 DisasContextBase base
;
57 const DisasInsn
*insn
;
61 * During translate_one(), pc_tmp is used to determine the instruction
62 * to be executed after base.pc_next - e.g. next sequential instruction
71 /* Information carried about a condition to be evaluated. */
78 struct { TCGv_i64 a
, b
; } s64
;
79 struct { TCGv_i32 a
, b
; } s32
;
83 #ifdef DEBUG_INLINE_BRANCHES
84 static uint64_t inline_branch_hit
[CC_OP_MAX
];
85 static uint64_t inline_branch_miss
[CC_OP_MAX
];
88 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
92 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
93 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
94 tcg_gen_movi_i64(out
, pc
);
99 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
100 tmp
= tcg_const_i64(pc
);
101 tcg_gen_deposit_i64(out
, out
, tmp
, 0, 32);
102 tcg_temp_free_i64(tmp
);
105 static TCGv_i64 psw_addr
;
106 static TCGv_i64 psw_mask
;
107 static TCGv_i64 gbea
;
109 static TCGv_i32 cc_op
;
110 static TCGv_i64 cc_src
;
111 static TCGv_i64 cc_dst
;
112 static TCGv_i64 cc_vr
;
114 static char cpu_reg_names
[16][4];
115 static TCGv_i64 regs
[16];
117 void s390x_translate_init(void)
121 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
122 offsetof(CPUS390XState
, psw
.addr
),
124 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
125 offsetof(CPUS390XState
, psw
.mask
),
127 gbea
= tcg_global_mem_new_i64(cpu_env
,
128 offsetof(CPUS390XState
, gbea
),
131 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
133 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
135 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
137 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
140 for (i
= 0; i
< 16; i
++) {
141 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
142 regs
[i
] = tcg_global_mem_new(cpu_env
,
143 offsetof(CPUS390XState
, regs
[i
]),
148 static inline int vec_reg_offset(uint8_t reg
, uint8_t enr
, TCGMemOp size
)
150 const uint8_t es
= 1 << size
;
155 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
156 * of the 16 byte vector, on both, little and big endian systems.
158 * Big Endian (target/possible host)
159 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
160 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
161 * W: [ 0][ 1] - [ 2][ 3]
164 * Little Endian (possible host)
165 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
166 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
167 * W: [ 1][ 0] - [ 3][ 2]
170 * For 16 byte elements, the two 8 byte halves will not form a host
171 * int128 if the host is little endian, since they're in the wrong order.
172 * Some operations (e.g. xor) do not care. For operations like addition,
173 * the two 8 byte elements have to be loaded separately. Let's force all
174 * 16 byte operations to handle it in a special way.
176 g_assert(size
<= MO_64
);
177 #ifndef HOST_WORDS_BIGENDIAN
180 return offs
+ offsetof(CPUS390XState
, vregs
[reg
][0].d
);
183 static inline int freg64_offset(uint8_t reg
)
186 return vec_reg_offset(reg
, 0, MO_64
);
189 static inline int freg32_offset(uint8_t reg
)
192 return vec_reg_offset(reg
, 0, MO_32
);
195 static TCGv_i64
load_reg(int reg
)
197 TCGv_i64 r
= tcg_temp_new_i64();
198 tcg_gen_mov_i64(r
, regs
[reg
]);
202 static TCGv_i64
load_freg(int reg
)
204 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_ld_i64(r
, cpu_env
, freg64_offset(reg
));
210 static TCGv_i64
load_freg32_i64(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
214 tcg_gen_ld32u_i64(r
, cpu_env
, freg32_offset(reg
));
218 static void store_reg(int reg
, TCGv_i64 v
)
220 tcg_gen_mov_i64(regs
[reg
], v
);
223 static void store_freg(int reg
, TCGv_i64 v
)
225 tcg_gen_st_i64(v
, cpu_env
, freg64_offset(reg
));
228 static void store_reg32_i64(int reg
, TCGv_i64 v
)
230 /* 32 bit register writes keep the upper half */
231 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
234 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
236 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
239 static void store_freg32_i64(int reg
, TCGv_i64 v
)
241 tcg_gen_st32_i64(v
, cpu_env
, freg32_offset(reg
));
244 static void return_low128(TCGv_i64 dest
)
246 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
249 static void update_psw_addr(DisasContext
*s
)
252 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
255 static void per_branch(DisasContext
*s
, bool to_next
)
257 #ifndef CONFIG_USER_ONLY
258 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
260 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
261 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
262 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
264 tcg_temp_free_i64(next_pc
);
270 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
271 TCGv_i64 arg1
, TCGv_i64 arg2
)
273 #ifndef CONFIG_USER_ONLY
274 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
275 TCGLabel
*lab
= gen_new_label();
276 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
278 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
279 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
283 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
284 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
285 tcg_temp_free_i64(pc
);
290 static void per_breaking_event(DisasContext
*s
)
292 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
295 static void update_cc_op(DisasContext
*s
)
297 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
298 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
302 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
304 return (uint64_t)cpu_lduw_code(env
, pc
);
307 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
309 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
312 static int get_mem_index(DisasContext
*s
)
314 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
318 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
319 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
320 return MMU_PRIMARY_IDX
;
321 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
322 return MMU_SECONDARY_IDX
;
323 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
331 static void gen_exception(int excp
)
333 TCGv_i32 tmp
= tcg_const_i32(excp
);
334 gen_helper_exception(cpu_env
, tmp
);
335 tcg_temp_free_i32(tmp
);
338 static void gen_program_exception(DisasContext
*s
, int code
)
342 /* Remember what pgm exeption this was. */
343 tmp
= tcg_const_i32(code
);
344 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
345 tcg_temp_free_i32(tmp
);
347 tmp
= tcg_const_i32(s
->ilen
);
348 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
349 tcg_temp_free_i32(tmp
);
357 /* Trigger exception. */
358 gen_exception(EXCP_PGM
);
361 static inline void gen_illegal_opcode(DisasContext
*s
)
363 gen_program_exception(s
, PGM_OPERATION
);
366 static inline void gen_data_exception(uint8_t dxc
)
368 TCGv_i32 tmp
= tcg_const_i32(dxc
);
369 gen_helper_data_exception(cpu_env
, tmp
);
370 tcg_temp_free_i32(tmp
);
373 static inline void gen_trap(DisasContext
*s
)
375 /* Set DXC to 0xff */
376 gen_data_exception(0xff);
379 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
381 TCGv_i64 tmp
= tcg_temp_new_i64();
382 bool need_31
= !(s
->base
.tb
->flags
& FLAG_MASK_64
);
384 /* Note that d2 is limited to 20 bits, signed. If we crop negative
385 displacements early we create larger immedate addends. */
387 /* Note that addi optimizes the imm==0 case. */
389 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
390 tcg_gen_addi_i64(tmp
, tmp
, d2
);
392 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
394 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
400 tcg_gen_movi_i64(tmp
, d2
);
403 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
409 static inline bool live_cc_data(DisasContext
*s
)
411 return (s
->cc_op
!= CC_OP_DYNAMIC
412 && s
->cc_op
!= CC_OP_STATIC
416 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
418 if (live_cc_data(s
)) {
419 tcg_gen_discard_i64(cc_src
);
420 tcg_gen_discard_i64(cc_dst
);
421 tcg_gen_discard_i64(cc_vr
);
423 s
->cc_op
= CC_OP_CONST0
+ val
;
426 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
428 if (live_cc_data(s
)) {
429 tcg_gen_discard_i64(cc_src
);
430 tcg_gen_discard_i64(cc_vr
);
432 tcg_gen_mov_i64(cc_dst
, dst
);
436 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
439 if (live_cc_data(s
)) {
440 tcg_gen_discard_i64(cc_vr
);
442 tcg_gen_mov_i64(cc_src
, src
);
443 tcg_gen_mov_i64(cc_dst
, dst
);
447 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
448 TCGv_i64 dst
, TCGv_i64 vr
)
450 tcg_gen_mov_i64(cc_src
, src
);
451 tcg_gen_mov_i64(cc_dst
, dst
);
452 tcg_gen_mov_i64(cc_vr
, vr
);
456 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
458 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
461 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
463 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
466 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
468 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
471 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
473 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
476 /* CC value is in env->cc_op */
477 static void set_cc_static(DisasContext
*s
)
479 if (live_cc_data(s
)) {
480 tcg_gen_discard_i64(cc_src
);
481 tcg_gen_discard_i64(cc_dst
);
482 tcg_gen_discard_i64(cc_vr
);
484 s
->cc_op
= CC_OP_STATIC
;
487 /* calculates cc into cc_op */
488 static void gen_op_calc_cc(DisasContext
*s
)
490 TCGv_i32 local_cc_op
= NULL
;
491 TCGv_i64 dummy
= NULL
;
495 dummy
= tcg_const_i64(0);
509 local_cc_op
= tcg_const_i32(s
->cc_op
);
525 /* s->cc_op is the cc value */
526 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
529 /* env->cc_op already is the cc value */
544 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
549 case CC_OP_LTUGTU_32
:
550 case CC_OP_LTUGTU_64
:
557 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
572 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
575 /* unknown operation - assume 3 arguments and cc_op in env */
576 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
583 tcg_temp_free_i32(local_cc_op
);
586 tcg_temp_free_i64(dummy
);
589 /* We now have cc in cc_op as constant */
593 static bool use_exit_tb(DisasContext
*s
)
595 return s
->base
.singlestep_enabled
||
596 (tb_cflags(s
->base
.tb
) & CF_LAST_IO
) ||
597 (s
->base
.tb
->flags
& FLAG_MASK_PER
);
600 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
602 if (unlikely(use_exit_tb(s
))) {
605 #ifndef CONFIG_USER_ONLY
606 return (dest
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
607 (dest
& TARGET_PAGE_MASK
) == (s
->base
.pc_next
& TARGET_PAGE_MASK
);
613 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
615 #ifdef DEBUG_INLINE_BRANCHES
616 inline_branch_miss
[cc_op
]++;
620 static void account_inline_branch(DisasContext
*s
, int cc_op
)
622 #ifdef DEBUG_INLINE_BRANCHES
623 inline_branch_hit
[cc_op
]++;
627 /* Table of mask values to comparison codes, given a comparison as input.
628 For such, CC=3 should not be possible. */
629 static const TCGCond ltgt_cond
[16] = {
630 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
631 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
632 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
633 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
634 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
635 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
636 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
637 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
640 /* Table of mask values to comparison codes, given a logic op as input.
641 For such, only CC=0 and CC=1 should be possible. */
642 static const TCGCond nz_cond
[16] = {
643 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
644 TCG_COND_NEVER
, TCG_COND_NEVER
,
645 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
646 TCG_COND_NE
, TCG_COND_NE
,
647 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
648 TCG_COND_EQ
, TCG_COND_EQ
,
649 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
650 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
653 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
654 details required to generate a TCG comparison. */
655 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
658 enum cc_op old_cc_op
= s
->cc_op
;
660 if (mask
== 15 || mask
== 0) {
661 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
664 c
->g1
= c
->g2
= true;
669 /* Find the TCG condition for the mask + cc op. */
675 cond
= ltgt_cond
[mask
];
676 if (cond
== TCG_COND_NEVER
) {
679 account_inline_branch(s
, old_cc_op
);
682 case CC_OP_LTUGTU_32
:
683 case CC_OP_LTUGTU_64
:
684 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
685 if (cond
== TCG_COND_NEVER
) {
688 account_inline_branch(s
, old_cc_op
);
692 cond
= nz_cond
[mask
];
693 if (cond
== TCG_COND_NEVER
) {
696 account_inline_branch(s
, old_cc_op
);
711 account_inline_branch(s
, old_cc_op
);
726 account_inline_branch(s
, old_cc_op
);
730 switch (mask
& 0xa) {
731 case 8: /* src == 0 -> no one bit found */
734 case 2: /* src != 0 -> one bit found */
740 account_inline_branch(s
, old_cc_op
);
746 case 8 | 2: /* vr == 0 */
749 case 4 | 1: /* vr != 0 */
752 case 8 | 4: /* no carry -> vr >= src */
755 case 2 | 1: /* carry -> vr < src */
761 account_inline_branch(s
, old_cc_op
);
766 /* Note that CC=0 is impossible; treat it as dont-care. */
768 case 2: /* zero -> op1 == op2 */
771 case 4 | 1: /* !zero -> op1 != op2 */
774 case 4: /* borrow (!carry) -> op1 < op2 */
777 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
783 account_inline_branch(s
, old_cc_op
);
788 /* Calculate cc value. */
793 /* Jump based on CC. We'll load up the real cond below;
794 the assignment here merely avoids a compiler warning. */
795 account_noninline_branch(s
, old_cc_op
);
796 old_cc_op
= CC_OP_STATIC
;
797 cond
= TCG_COND_NEVER
;
801 /* Load up the arguments of the comparison. */
803 c
->g1
= c
->g2
= false;
807 c
->u
.s32
.a
= tcg_temp_new_i32();
808 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
809 c
->u
.s32
.b
= tcg_const_i32(0);
812 case CC_OP_LTUGTU_32
:
815 c
->u
.s32
.a
= tcg_temp_new_i32();
816 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
817 c
->u
.s32
.b
= tcg_temp_new_i32();
818 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
825 c
->u
.s64
.b
= tcg_const_i64(0);
829 case CC_OP_LTUGTU_64
:
833 c
->g1
= c
->g2
= true;
839 c
->u
.s64
.a
= tcg_temp_new_i64();
840 c
->u
.s64
.b
= tcg_const_i64(0);
841 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
846 c
->u
.s32
.a
= tcg_temp_new_i32();
847 c
->u
.s32
.b
= tcg_temp_new_i32();
848 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
849 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
850 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
852 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
859 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
860 c
->u
.s64
.b
= tcg_const_i64(0);
872 case 0x8 | 0x4 | 0x2: /* cc != 3 */
874 c
->u
.s32
.b
= tcg_const_i32(3);
876 case 0x8 | 0x4 | 0x1: /* cc != 2 */
878 c
->u
.s32
.b
= tcg_const_i32(2);
880 case 0x8 | 0x2 | 0x1: /* cc != 1 */
882 c
->u
.s32
.b
= tcg_const_i32(1);
884 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
887 c
->u
.s32
.a
= tcg_temp_new_i32();
888 c
->u
.s32
.b
= tcg_const_i32(0);
889 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
891 case 0x8 | 0x4: /* cc < 2 */
893 c
->u
.s32
.b
= tcg_const_i32(2);
895 case 0x8: /* cc == 0 */
897 c
->u
.s32
.b
= tcg_const_i32(0);
899 case 0x4 | 0x2 | 0x1: /* cc != 0 */
901 c
->u
.s32
.b
= tcg_const_i32(0);
903 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
906 c
->u
.s32
.a
= tcg_temp_new_i32();
907 c
->u
.s32
.b
= tcg_const_i32(0);
908 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
910 case 0x4: /* cc == 1 */
912 c
->u
.s32
.b
= tcg_const_i32(1);
914 case 0x2 | 0x1: /* cc > 1 */
916 c
->u
.s32
.b
= tcg_const_i32(1);
918 case 0x2: /* cc == 2 */
920 c
->u
.s32
.b
= tcg_const_i32(2);
922 case 0x1: /* cc == 3 */
924 c
->u
.s32
.b
= tcg_const_i32(3);
927 /* CC is masked by something else: (8 >> cc) & mask. */
930 c
->u
.s32
.a
= tcg_const_i32(8);
931 c
->u
.s32
.b
= tcg_const_i32(0);
932 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
933 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
944 static void free_compare(DisasCompare
*c
)
948 tcg_temp_free_i64(c
->u
.s64
.a
);
950 tcg_temp_free_i32(c
->u
.s32
.a
);
955 tcg_temp_free_i64(c
->u
.s64
.b
);
957 tcg_temp_free_i32(c
->u
.s32
.b
);
962 /* ====================================================================== */
963 /* Define the insn format enumeration. */
964 #define F0(N) FMT_##N,
965 #define F1(N, X1) F0(N)
966 #define F2(N, X1, X2) F0(N)
967 #define F3(N, X1, X2, X3) F0(N)
968 #define F4(N, X1, X2, X3, X4) F0(N)
969 #define F5(N, X1, X2, X3, X4, X5) F0(N)
972 #include "insn-format.def"
982 /* Define a structure to hold the decoded fields. We'll store each inside
983 an array indexed by an enum. In order to conserve memory, we'll arrange
984 for fields that do not exist at the same time to overlap, thus the "C"
985 for compact. For checking purposes there is an "O" for original index
986 as well that will be applied to availability bitmaps. */
988 enum DisasFieldIndexO
{
1011 enum DisasFieldIndexC
{
1042 struct DisasFields
{
1046 unsigned presentC
:16;
1047 unsigned int presentO
;
1051 /* This is the way fields are to be accessed out of DisasFields. */
1052 #define have_field(S, F) have_field1((S), FLD_O_##F)
1053 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1055 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1057 return (f
->presentO
>> c
) & 1;
1060 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1061 enum DisasFieldIndexC c
)
1063 assert(have_field1(f
, o
));
1067 /* Describe the layout of each field in each format. */
1068 typedef struct DisasField
{
1070 unsigned int size
:8;
1071 unsigned int type
:2;
1072 unsigned int indexC
:6;
1073 enum DisasFieldIndexO indexO
:8;
1076 typedef struct DisasFormatInfo
{
1077 DisasField op
[NUM_C_FIELD
];
1080 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1081 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1082 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1083 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1084 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1085 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1086 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1087 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1088 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1089 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1090 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1091 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1092 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1093 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1095 #define F0(N) { { } },
1096 #define F1(N, X1) { { X1 } },
1097 #define F2(N, X1, X2) { { X1, X2 } },
1098 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1099 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1100 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1102 static const DisasFormatInfo format_info
[] = {
1103 #include "insn-format.def"
1121 /* Generally, we'll extract operands into this structures, operate upon
1122 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1123 of routines below for more details. */
1125 bool g_out
, g_out2
, g_in1
, g_in2
;
1126 TCGv_i64 out
, out2
, in1
, in2
;
1130 /* Instructions can place constraints on their operands, raising specification
1131 exceptions if they are violated. To make this easy to automate, each "in1",
1132 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1133 of the following, or 0. To make this easy to document, we'll put the
1134 SPEC_<name> defines next to <name>. */
1136 #define SPEC_r1_even 1
1137 #define SPEC_r2_even 2
1138 #define SPEC_r3_even 4
1139 #define SPEC_r1_f128 8
1140 #define SPEC_r2_f128 16
1142 /* Return values from translate_one, indicating the state of the TB. */
1144 /* We are not using a goto_tb (for whatever reason), but have updated
1145 the PC (for whatever reason), so there's no need to do it again on
1147 #define DISAS_PC_UPDATED DISAS_TARGET_0
1149 /* We have emitted one or more goto_tb. No fixup required. */
1150 #define DISAS_GOTO_TB DISAS_TARGET_1
1152 /* We have updated the PC and CC values. */
1153 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1155 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1156 updated the PC for the next instruction to be executed. */
1157 #define DISAS_PC_STALE DISAS_TARGET_3
1159 /* We are exiting the TB to the main loop. */
1160 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1163 /* Instruction flags */
1164 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1165 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1166 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1167 #define IF_BFP 0x0008 /* binary floating point instruction */
1168 #define IF_DFP 0x0010 /* decimal floating point instruction */
1169 #define IF_PRIV 0x0020 /* privileged instruction */
1180 /* Pre-process arguments before HELP_OP. */
1181 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1182 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1183 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1186 * Post-process output after HELP_OP.
1187 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1189 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1190 void (*help_cout
)(DisasContext
*, DisasOps
*);
1192 /* Implement the operation itself. */
1193 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1198 /* ====================================================================== */
1199 /* Miscellaneous helpers, used by several operations. */
1201 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1202 DisasOps
*o
, int mask
)
1204 int b2
= get_field(f
, b2
);
1205 int d2
= get_field(f
, d2
);
1208 o
->in2
= tcg_const_i64(d2
& mask
);
1210 o
->in2
= get_address(s
, 0, b2
, d2
);
1211 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1215 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1217 if (dest
== s
->pc_tmp
) {
1218 per_branch(s
, true);
1221 if (use_goto_tb(s
, dest
)) {
1223 per_breaking_event(s
);
1225 tcg_gen_movi_i64(psw_addr
, dest
);
1226 tcg_gen_exit_tb(s
->base
.tb
, 0);
1227 return DISAS_GOTO_TB
;
1229 tcg_gen_movi_i64(psw_addr
, dest
);
1230 per_branch(s
, false);
1231 return DISAS_PC_UPDATED
;
1235 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1236 bool is_imm
, int imm
, TCGv_i64 cdest
)
1239 uint64_t dest
= s
->base
.pc_next
+ 2 * imm
;
1242 /* Take care of the special cases first. */
1243 if (c
->cond
== TCG_COND_NEVER
) {
1248 if (dest
== s
->pc_tmp
) {
1249 /* Branch to next. */
1250 per_branch(s
, true);
1254 if (c
->cond
== TCG_COND_ALWAYS
) {
1255 ret
= help_goto_direct(s
, dest
);
1260 /* E.g. bcr %r0 -> no branch. */
1264 if (c
->cond
== TCG_COND_ALWAYS
) {
1265 tcg_gen_mov_i64(psw_addr
, cdest
);
1266 per_branch(s
, false);
1267 ret
= DISAS_PC_UPDATED
;
1272 if (use_goto_tb(s
, s
->pc_tmp
)) {
1273 if (is_imm
&& use_goto_tb(s
, dest
)) {
1274 /* Both exits can use goto_tb. */
1277 lab
= gen_new_label();
1279 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1281 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1284 /* Branch not taken. */
1286 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1287 tcg_gen_exit_tb(s
->base
.tb
, 0);
1291 per_breaking_event(s
);
1293 tcg_gen_movi_i64(psw_addr
, dest
);
1294 tcg_gen_exit_tb(s
->base
.tb
, 1);
1296 ret
= DISAS_GOTO_TB
;
1298 /* Fallthru can use goto_tb, but taken branch cannot. */
1299 /* Store taken branch destination before the brcond. This
1300 avoids having to allocate a new local temp to hold it.
1301 We'll overwrite this in the not taken case anyway. */
1303 tcg_gen_mov_i64(psw_addr
, cdest
);
1306 lab
= gen_new_label();
1308 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1310 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1313 /* Branch not taken. */
1316 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1317 tcg_gen_exit_tb(s
->base
.tb
, 0);
1321 tcg_gen_movi_i64(psw_addr
, dest
);
1323 per_breaking_event(s
);
1324 ret
= DISAS_PC_UPDATED
;
1327 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1328 Most commonly we're single-stepping or some other condition that
1329 disables all use of goto_tb. Just update the PC and exit. */
1331 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1333 cdest
= tcg_const_i64(dest
);
1337 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1339 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1341 TCGv_i32 t0
= tcg_temp_new_i32();
1342 TCGv_i64 t1
= tcg_temp_new_i64();
1343 TCGv_i64 z
= tcg_const_i64(0);
1344 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1345 tcg_gen_extu_i32_i64(t1
, t0
);
1346 tcg_temp_free_i32(t0
);
1347 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1348 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1349 tcg_temp_free_i64(t1
);
1350 tcg_temp_free_i64(z
);
1354 tcg_temp_free_i64(cdest
);
1356 tcg_temp_free_i64(next
);
1358 ret
= DISAS_PC_UPDATED
;
1366 /* ====================================================================== */
1367 /* The operations. These perform the bulk of the work for any insn,
1368 usually after the operands have been loaded and output initialized. */
1370 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1373 z
= tcg_const_i64(0);
1374 n
= tcg_temp_new_i64();
1375 tcg_gen_neg_i64(n
, o
->in2
);
1376 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1377 tcg_temp_free_i64(n
);
1378 tcg_temp_free_i64(z
);
1382 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1384 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1388 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1390 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1394 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1396 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1397 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1401 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1403 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1407 static DisasJumpType
op_addc(DisasContext
*s
, DisasOps
*o
)
1412 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1414 /* The carry flag is the msb of CC, therefore the branch mask that would
1415 create that comparison is 3. Feeding the generated comparison to
1416 setcond produces the carry flag that we desire. */
1417 disas_jcc(s
, &cmp
, 3);
1418 carry
= tcg_temp_new_i64();
1420 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1422 TCGv_i32 t
= tcg_temp_new_i32();
1423 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1424 tcg_gen_extu_i32_i64(carry
, t
);
1425 tcg_temp_free_i32(t
);
1429 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1430 tcg_temp_free_i64(carry
);
1434 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1436 o
->in1
= tcg_temp_new_i64();
1438 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1439 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1441 /* Perform the atomic addition in memory. */
1442 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1446 /* Recompute also for atomic case: needed for setting CC. */
1447 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1449 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1450 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1455 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1457 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1461 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1463 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1467 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1469 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1470 return_low128(o
->out2
);
1474 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1476 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1480 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1482 int shift
= s
->insn
->data
& 0xff;
1483 int size
= s
->insn
->data
>> 8;
1484 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1487 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1488 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1489 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1491 /* Produce the CC from only the bits manipulated. */
1492 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1493 set_cc_nz_u64(s
, cc_dst
);
1497 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1499 o
->in1
= tcg_temp_new_i64();
1501 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1502 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1504 /* Perform the atomic operation in memory. */
1505 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1509 /* Recompute also for atomic case: needed for setting CC. */
1510 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1512 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1513 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1518 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1520 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1522 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1523 per_branch(s
, false);
1524 return DISAS_PC_UPDATED
;
1530 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1534 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1535 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1539 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1540 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1541 t
= tcg_temp_new_i64();
1542 tcg_gen_shri_i64(t
, psw_mask
, 16);
1543 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1544 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1545 tcg_gen_extu_i32_i64(t
, cc_op
);
1546 tcg_gen_shli_i64(t
, t
, 28);
1547 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1548 tcg_temp_free_i64(t
);
1551 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1553 save_link_info(s
, o
);
1555 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1556 per_branch(s
, false);
1557 return DISAS_PC_UPDATED
;
1563 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1565 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1566 return help_goto_direct(s
, s
->base
.pc_next
+ 2 * get_field(s
->fields
, i2
));
1569 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1571 int m1
= get_field(s
->fields
, m1
);
1572 bool is_imm
= have_field(s
->fields
, i2
);
1573 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1576 /* BCR with R2 = 0 causes no branching */
1577 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1579 /* Perform serialization */
1580 /* FIXME: check for fast-BCR-serialization facility */
1581 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1584 /* Perform serialization */
1585 /* FIXME: perform checkpoint-synchronisation */
1586 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1591 disas_jcc(s
, &c
, m1
);
1592 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1595 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1597 int r1
= get_field(s
->fields
, r1
);
1598 bool is_imm
= have_field(s
->fields
, i2
);
1599 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1603 c
.cond
= TCG_COND_NE
;
1608 t
= tcg_temp_new_i64();
1609 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1610 store_reg32_i64(r1
, t
);
1611 c
.u
.s32
.a
= tcg_temp_new_i32();
1612 c
.u
.s32
.b
= tcg_const_i32(0);
1613 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1614 tcg_temp_free_i64(t
);
1616 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1619 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1621 int r1
= get_field(s
->fields
, r1
);
1622 int imm
= get_field(s
->fields
, i2
);
1626 c
.cond
= TCG_COND_NE
;
1631 t
= tcg_temp_new_i64();
1632 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1633 tcg_gen_subi_i64(t
, t
, 1);
1634 store_reg32h_i64(r1
, t
);
1635 c
.u
.s32
.a
= tcg_temp_new_i32();
1636 c
.u
.s32
.b
= tcg_const_i32(0);
1637 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1638 tcg_temp_free_i64(t
);
1640 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1643 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1645 int r1
= get_field(s
->fields
, r1
);
1646 bool is_imm
= have_field(s
->fields
, i2
);
1647 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1650 c
.cond
= TCG_COND_NE
;
1655 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1656 c
.u
.s64
.a
= regs
[r1
];
1657 c
.u
.s64
.b
= tcg_const_i64(0);
1659 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1662 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1664 int r1
= get_field(s
->fields
, r1
);
1665 int r3
= get_field(s
->fields
, r3
);
1666 bool is_imm
= have_field(s
->fields
, i2
);
1667 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1671 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1676 t
= tcg_temp_new_i64();
1677 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1678 c
.u
.s32
.a
= tcg_temp_new_i32();
1679 c
.u
.s32
.b
= tcg_temp_new_i32();
1680 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1681 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1682 store_reg32_i64(r1
, t
);
1683 tcg_temp_free_i64(t
);
1685 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1688 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1690 int r1
= get_field(s
->fields
, r1
);
1691 int r3
= get_field(s
->fields
, r3
);
1692 bool is_imm
= have_field(s
->fields
, i2
);
1693 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1696 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1699 if (r1
== (r3
| 1)) {
1700 c
.u
.s64
.b
= load_reg(r3
| 1);
1703 c
.u
.s64
.b
= regs
[r3
| 1];
1707 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1708 c
.u
.s64
.a
= regs
[r1
];
1711 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1714 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1716 int imm
, m3
= get_field(s
->fields
, m3
);
1720 c
.cond
= ltgt_cond
[m3
];
1721 if (s
->insn
->data
) {
1722 c
.cond
= tcg_unsigned_cond(c
.cond
);
1724 c
.is_64
= c
.g1
= c
.g2
= true;
1728 is_imm
= have_field(s
->fields
, i4
);
1730 imm
= get_field(s
->fields
, i4
);
1733 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1734 get_field(s
->fields
, d4
));
1737 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1740 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1742 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1747 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1749 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1754 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1756 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1761 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1763 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1764 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1765 tcg_temp_free_i32(m3
);
1766 gen_set_cc_nz_f32(s
, o
->in2
);
1770 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1772 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1773 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1774 tcg_temp_free_i32(m3
);
1775 gen_set_cc_nz_f64(s
, o
->in2
);
1779 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1781 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1782 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1783 tcg_temp_free_i32(m3
);
1784 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1788 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1790 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1791 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1792 tcg_temp_free_i32(m3
);
1793 gen_set_cc_nz_f32(s
, o
->in2
);
1797 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1799 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1800 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1801 tcg_temp_free_i32(m3
);
1802 gen_set_cc_nz_f64(s
, o
->in2
);
1806 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1808 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1809 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1810 tcg_temp_free_i32(m3
);
1811 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1815 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1817 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1818 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1819 tcg_temp_free_i32(m3
);
1820 gen_set_cc_nz_f32(s
, o
->in2
);
1824 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1826 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1827 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1828 tcg_temp_free_i32(m3
);
1829 gen_set_cc_nz_f64(s
, o
->in2
);
1833 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1835 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1836 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1837 tcg_temp_free_i32(m3
);
1838 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1842 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1844 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1845 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1846 tcg_temp_free_i32(m3
);
1847 gen_set_cc_nz_f32(s
, o
->in2
);
1851 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1853 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1854 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1855 tcg_temp_free_i32(m3
);
1856 gen_set_cc_nz_f64(s
, o
->in2
);
1860 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1862 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1863 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1864 tcg_temp_free_i32(m3
);
1865 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1869 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1871 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1872 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1873 tcg_temp_free_i32(m3
);
1877 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1879 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1880 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1881 tcg_temp_free_i32(m3
);
1885 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1887 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1888 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1889 tcg_temp_free_i32(m3
);
1890 return_low128(o
->out2
);
1894 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
1896 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1897 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1898 tcg_temp_free_i32(m3
);
1902 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1904 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1905 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1906 tcg_temp_free_i32(m3
);
1910 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1912 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1913 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1914 tcg_temp_free_i32(m3
);
1915 return_low128(o
->out2
);
1919 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
1921 int r2
= get_field(s
->fields
, r2
);
1922 TCGv_i64 len
= tcg_temp_new_i64();
1924 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1926 return_low128(o
->out
);
1928 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1929 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1930 tcg_temp_free_i64(len
);
1935 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
1937 int l
= get_field(s
->fields
, l1
);
1942 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1943 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1946 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1947 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1950 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1951 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1954 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1955 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1958 vl
= tcg_const_i32(l
);
1959 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1960 tcg_temp_free_i32(vl
);
1964 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1968 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
1970 int r1
= get_field(s
->fields
, r1
);
1971 int r2
= get_field(s
->fields
, r2
);
1974 /* r1 and r2 must be even. */
1975 if (r1
& 1 || r2
& 1) {
1976 gen_program_exception(s
, PGM_SPECIFICATION
);
1977 return DISAS_NORETURN
;
1980 t1
= tcg_const_i32(r1
);
1981 t2
= tcg_const_i32(r2
);
1982 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1983 tcg_temp_free_i32(t1
);
1984 tcg_temp_free_i32(t2
);
1989 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
1991 int r1
= get_field(s
->fields
, r1
);
1992 int r3
= get_field(s
->fields
, r3
);
1995 /* r1 and r3 must be even. */
1996 if (r1
& 1 || r3
& 1) {
1997 gen_program_exception(s
, PGM_SPECIFICATION
);
1998 return DISAS_NORETURN
;
2001 t1
= tcg_const_i32(r1
);
2002 t3
= tcg_const_i32(r3
);
2003 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2004 tcg_temp_free_i32(t1
);
2005 tcg_temp_free_i32(t3
);
2010 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
2012 int r1
= get_field(s
->fields
, r1
);
2013 int r3
= get_field(s
->fields
, r3
);
2016 /* r1 and r3 must be even. */
2017 if (r1
& 1 || r3
& 1) {
2018 gen_program_exception(s
, PGM_SPECIFICATION
);
2019 return DISAS_NORETURN
;
2022 t1
= tcg_const_i32(r1
);
2023 t3
= tcg_const_i32(r3
);
2024 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2025 tcg_temp_free_i32(t1
);
2026 tcg_temp_free_i32(t3
);
2031 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
2033 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2034 TCGv_i32 t1
= tcg_temp_new_i32();
2035 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
2036 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2038 tcg_temp_free_i32(t1
);
2039 tcg_temp_free_i32(m3
);
2043 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
2045 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2047 return_low128(o
->in2
);
2051 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
2053 TCGv_i64 t
= tcg_temp_new_i64();
2054 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
2055 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
2056 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2057 tcg_temp_free_i64(t
);
2061 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2063 int d2
= get_field(s
->fields
, d2
);
2064 int b2
= get_field(s
->fields
, b2
);
2067 /* Note that in1 = R3 (new value) and
2068 in2 = (zero-extended) R1 (expected value). */
2070 addr
= get_address(s
, 0, b2
, d2
);
2071 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2072 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2073 tcg_temp_free_i64(addr
);
2075 /* Are the memory and expected values (un)equal? Note that this setcond
2076 produces the output CC value, thus the NE sense of the test. */
2077 cc
= tcg_temp_new_i64();
2078 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2079 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2080 tcg_temp_free_i64(cc
);
2086 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2088 int r1
= get_field(s
->fields
, r1
);
2089 int r3
= get_field(s
->fields
, r3
);
2090 int d2
= get_field(s
->fields
, d2
);
2091 int b2
= get_field(s
->fields
, b2
);
2092 DisasJumpType ret
= DISAS_NEXT
;
2094 TCGv_i32 t_r1
, t_r3
;
2096 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2097 addr
= get_address(s
, 0, b2
, d2
);
2098 t_r1
= tcg_const_i32(r1
);
2099 t_r3
= tcg_const_i32(r3
);
2100 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
2101 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2102 } else if (HAVE_CMPXCHG128
) {
2103 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2105 gen_helper_exit_atomic(cpu_env
);
2106 ret
= DISAS_NORETURN
;
2108 tcg_temp_free_i64(addr
);
2109 tcg_temp_free_i32(t_r1
);
2110 tcg_temp_free_i32(t_r3
);
2116 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2118 int r3
= get_field(s
->fields
, r3
);
2119 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2121 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2122 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2124 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2126 tcg_temp_free_i32(t_r3
);
2132 #ifndef CONFIG_USER_ONLY
2133 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2135 TCGMemOp mop
= s
->insn
->data
;
2136 TCGv_i64 addr
, old
, cc
;
2137 TCGLabel
*lab
= gen_new_label();
2139 /* Note that in1 = R1 (zero-extended expected value),
2140 out = R1 (original reg), out2 = R1+1 (new value). */
2142 addr
= tcg_temp_new_i64();
2143 old
= tcg_temp_new_i64();
2144 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2145 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2146 get_mem_index(s
), mop
| MO_ALIGN
);
2147 tcg_temp_free_i64(addr
);
2149 /* Are the memory and expected values (un)equal? */
2150 cc
= tcg_temp_new_i64();
2151 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2152 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2154 /* Write back the output now, so that it happens before the
2155 following branch, so that we don't need local temps. */
2156 if ((mop
& MO_SIZE
) == MO_32
) {
2157 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2159 tcg_gen_mov_i64(o
->out
, old
);
2161 tcg_temp_free_i64(old
);
2163 /* If the comparison was equal, and the LSB of R2 was set,
2164 then we need to flush the TLB (for all cpus). */
2165 tcg_gen_xori_i64(cc
, cc
, 1);
2166 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2167 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2168 tcg_temp_free_i64(cc
);
2170 gen_helper_purge(cpu_env
);
2177 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2179 TCGv_i64 t1
= tcg_temp_new_i64();
2180 TCGv_i32 t2
= tcg_temp_new_i32();
2181 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2182 gen_helper_cvd(t1
, t2
);
2183 tcg_temp_free_i32(t2
);
2184 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2185 tcg_temp_free_i64(t1
);
2189 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2191 int m3
= get_field(s
->fields
, m3
);
2192 TCGLabel
*lab
= gen_new_label();
2195 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2196 if (s
->insn
->data
) {
2197 c
= tcg_unsigned_cond(c
);
2199 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2208 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2210 int m3
= get_field(s
->fields
, m3
);
2211 int r1
= get_field(s
->fields
, r1
);
2212 int r2
= get_field(s
->fields
, r2
);
2213 TCGv_i32 tr1
, tr2
, chk
;
2215 /* R1 and R2 must both be even. */
2216 if ((r1
| r2
) & 1) {
2217 gen_program_exception(s
, PGM_SPECIFICATION
);
2218 return DISAS_NORETURN
;
2220 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2224 tr1
= tcg_const_i32(r1
);
2225 tr2
= tcg_const_i32(r2
);
2226 chk
= tcg_const_i32(m3
);
2228 switch (s
->insn
->data
) {
2230 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2233 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2236 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2239 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2242 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2245 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2248 g_assert_not_reached();
2251 tcg_temp_free_i32(tr1
);
2252 tcg_temp_free_i32(tr2
);
2253 tcg_temp_free_i32(chk
);
2258 #ifndef CONFIG_USER_ONLY
2259 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2261 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2262 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2263 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2265 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2267 tcg_temp_free_i32(func_code
);
2268 tcg_temp_free_i32(r3
);
2269 tcg_temp_free_i32(r1
);
2274 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2276 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2277 return_low128(o
->out
);
2281 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2283 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2284 return_low128(o
->out
);
2288 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2290 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2291 return_low128(o
->out
);
2295 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2297 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2298 return_low128(o
->out
);
2302 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2304 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2308 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2310 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2314 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2316 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2317 return_low128(o
->out2
);
2321 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2323 int r2
= get_field(s
->fields
, r2
);
2324 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2328 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2330 /* No cache information provided. */
2331 tcg_gen_movi_i64(o
->out
, -1);
2335 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2337 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2341 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2343 int r1
= get_field(s
->fields
, r1
);
2344 int r2
= get_field(s
->fields
, r2
);
2345 TCGv_i64 t
= tcg_temp_new_i64();
2347 /* Note the "subsequently" in the PoO, which implies a defined result
2348 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2349 tcg_gen_shri_i64(t
, psw_mask
, 32);
2350 store_reg32_i64(r1
, t
);
2352 store_reg32_i64(r2
, psw_mask
);
2355 tcg_temp_free_i64(t
);
2359 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2361 int r1
= get_field(s
->fields
, r1
);
2365 /* Nested EXECUTE is not allowed. */
2366 if (unlikely(s
->ex_value
)) {
2367 gen_program_exception(s
, PGM_EXECUTE
);
2368 return DISAS_NORETURN
;
2375 v1
= tcg_const_i64(0);
2380 ilen
= tcg_const_i32(s
->ilen
);
2381 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2382 tcg_temp_free_i32(ilen
);
2385 tcg_temp_free_i64(v1
);
2388 return DISAS_PC_CC_UPDATED
;
2391 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2393 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2394 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2395 tcg_temp_free_i32(m3
);
2399 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2401 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2402 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2403 tcg_temp_free_i32(m3
);
2407 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2409 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2410 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2411 return_low128(o
->out2
);
2412 tcg_temp_free_i32(m3
);
2416 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2418 /* We'll use the original input for cc computation, since we get to
2419 compare that against 0, which ought to be better than comparing
2420 the real output against 64. It also lets cc_dst be a convenient
2421 temporary during our computation. */
2422 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2424 /* R1 = IN ? CLZ(IN) : 64. */
2425 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2427 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2428 value by 64, which is undefined. But since the shift is 64 iff the
2429 input is zero, we still get the correct result after and'ing. */
2430 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2431 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2432 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2436 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2438 int m3
= get_field(s
->fields
, m3
);
2439 int pos
, len
, base
= s
->insn
->data
;
2440 TCGv_i64 tmp
= tcg_temp_new_i64();
2445 /* Effectively a 32-bit load. */
2446 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2453 /* Effectively a 16-bit load. */
2454 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2462 /* Effectively an 8-bit load. */
2463 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2468 pos
= base
+ ctz32(m3
) * 8;
2469 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2470 ccm
= ((1ull << len
) - 1) << pos
;
2474 /* This is going to be a sequence of loads and inserts. */
2475 pos
= base
+ 32 - 8;
2479 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2480 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2481 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2484 m3
= (m3
<< 1) & 0xf;
2490 tcg_gen_movi_i64(tmp
, ccm
);
2491 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2492 tcg_temp_free_i64(tmp
);
2496 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2498 int shift
= s
->insn
->data
& 0xff;
2499 int size
= s
->insn
->data
>> 8;
2500 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2504 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2509 t1
= tcg_temp_new_i64();
2510 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2511 t2
= tcg_temp_new_i64();
2512 tcg_gen_extu_i32_i64(t2
, cc_op
);
2513 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2514 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2515 tcg_temp_free_i64(t1
);
2516 tcg_temp_free_i64(t2
);
2520 #ifndef CONFIG_USER_ONLY
2521 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2525 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2526 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2528 m4
= tcg_const_i32(0);
2530 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2531 tcg_temp_free_i32(m4
);
2535 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2539 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2540 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2542 m4
= tcg_const_i32(0);
2544 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2545 tcg_temp_free_i32(m4
);
2549 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2551 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2556 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2558 int r1
= have_field(s
->fields
, r1
) ? get_field(s
->fields
, r1
) : 0;
2559 int r2
= have_field(s
->fields
, r2
) ? get_field(s
->fields
, r2
) : 0;
2560 int r3
= have_field(s
->fields
, r3
) ? get_field(s
->fields
, r3
) : 0;
2561 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2563 switch (s
->insn
->data
) {
2564 case S390_FEAT_TYPE_KMCTR
:
2565 if (r3
& 1 || !r3
) {
2566 gen_program_exception(s
, PGM_SPECIFICATION
);
2567 return DISAS_NORETURN
;
2570 case S390_FEAT_TYPE_PPNO
:
2571 case S390_FEAT_TYPE_KMF
:
2572 case S390_FEAT_TYPE_KMC
:
2573 case S390_FEAT_TYPE_KMO
:
2574 case S390_FEAT_TYPE_KM
:
2575 if (r1
& 1 || !r1
) {
2576 gen_program_exception(s
, PGM_SPECIFICATION
);
2577 return DISAS_NORETURN
;
2580 case S390_FEAT_TYPE_KMAC
:
2581 case S390_FEAT_TYPE_KIMD
:
2582 case S390_FEAT_TYPE_KLMD
:
2583 if (r2
& 1 || !r2
) {
2584 gen_program_exception(s
, PGM_SPECIFICATION
);
2585 return DISAS_NORETURN
;
2588 case S390_FEAT_TYPE_PCKMO
:
2589 case S390_FEAT_TYPE_PCC
:
2592 g_assert_not_reached();
2595 t_r1
= tcg_const_i32(r1
);
2596 t_r2
= tcg_const_i32(r2
);
2597 t_r3
= tcg_const_i32(r3
);
2598 type
= tcg_const_i32(s
->insn
->data
);
2599 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2601 tcg_temp_free_i32(t_r1
);
2602 tcg_temp_free_i32(t_r2
);
2603 tcg_temp_free_i32(t_r3
);
2604 tcg_temp_free_i32(type
);
2608 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2610 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2615 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2617 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2622 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2624 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2629 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2631 /* The real output is indeed the original value in memory;
2632 recompute the addition for the computation of CC. */
2633 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2634 s
->insn
->data
| MO_ALIGN
);
2635 /* However, we need to recompute the addition for setting CC. */
2636 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2640 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2642 /* The real output is indeed the original value in memory;
2643 recompute the addition for the computation of CC. */
2644 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2645 s
->insn
->data
| MO_ALIGN
);
2646 /* However, we need to recompute the operation for setting CC. */
2647 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2651 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2653 /* The real output is indeed the original value in memory;
2654 recompute the addition for the computation of CC. */
2655 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2656 s
->insn
->data
| MO_ALIGN
);
2657 /* However, we need to recompute the operation for setting CC. */
2658 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2662 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2664 /* The real output is indeed the original value in memory;
2665 recompute the addition for the computation of CC. */
2666 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2667 s
->insn
->data
| MO_ALIGN
);
2668 /* However, we need to recompute the operation for setting CC. */
2669 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2673 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2675 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2679 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2681 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2685 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2687 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2691 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2693 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2697 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2699 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2700 return_low128(o
->out2
);
2704 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2706 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2707 return_low128(o
->out2
);
2711 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2713 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2717 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2719 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2723 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2725 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2729 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2731 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2735 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2737 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2741 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2743 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2747 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2749 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2753 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2755 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2759 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2761 TCGLabel
*lab
= gen_new_label();
2762 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2763 /* The value is stored even in case of trap. */
2764 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2770 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2772 TCGLabel
*lab
= gen_new_label();
2773 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2774 /* The value is stored even in case of trap. */
2775 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2781 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2783 TCGLabel
*lab
= gen_new_label();
2784 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2785 /* The value is stored even in case of trap. */
2786 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2792 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2794 TCGLabel
*lab
= gen_new_label();
2795 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2796 /* The value is stored even in case of trap. */
2797 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2803 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2805 TCGLabel
*lab
= gen_new_label();
2806 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2807 /* The value is stored even in case of trap. */
2808 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2814 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
2818 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2821 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2825 TCGv_i32 t32
= tcg_temp_new_i32();
2828 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2831 t
= tcg_temp_new_i64();
2832 tcg_gen_extu_i32_i64(t
, t32
);
2833 tcg_temp_free_i32(t32
);
2835 z
= tcg_const_i64(0);
2836 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2837 tcg_temp_free_i64(t
);
2838 tcg_temp_free_i64(z
);
2844 #ifndef CONFIG_USER_ONLY
2845 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
2847 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2848 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2849 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2850 tcg_temp_free_i32(r1
);
2851 tcg_temp_free_i32(r3
);
2852 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2853 return DISAS_PC_STALE_NOCHAIN
;
2856 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2858 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2859 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2860 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2861 tcg_temp_free_i32(r1
);
2862 tcg_temp_free_i32(r3
);
2863 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2864 return DISAS_PC_STALE_NOCHAIN
;
2867 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
2869 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2874 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
2876 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2880 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2884 per_breaking_event(s
);
2886 t1
= tcg_temp_new_i64();
2887 t2
= tcg_temp_new_i64();
2888 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
2889 MO_TEUL
| MO_ALIGN_8
);
2890 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2891 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2892 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2893 tcg_gen_shli_i64(t1
, t1
, 32);
2894 gen_helper_load_psw(cpu_env
, t1
, t2
);
2895 tcg_temp_free_i64(t1
);
2896 tcg_temp_free_i64(t2
);
2897 return DISAS_NORETURN
;
2900 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2904 per_breaking_event(s
);
2906 t1
= tcg_temp_new_i64();
2907 t2
= tcg_temp_new_i64();
2908 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
2909 MO_TEQ
| MO_ALIGN_8
);
2910 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2911 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2912 gen_helper_load_psw(cpu_env
, t1
, t2
);
2913 tcg_temp_free_i64(t1
);
2914 tcg_temp_free_i64(t2
);
2915 return DISAS_NORETURN
;
2919 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
2921 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2922 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2923 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2924 tcg_temp_free_i32(r1
);
2925 tcg_temp_free_i32(r3
);
2929 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
2931 int r1
= get_field(s
->fields
, r1
);
2932 int r3
= get_field(s
->fields
, r3
);
2935 /* Only one register to read. */
2936 t1
= tcg_temp_new_i64();
2937 if (unlikely(r1
== r3
)) {
2938 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2939 store_reg32_i64(r1
, t1
);
2944 /* First load the values of the first and last registers to trigger
2945 possible page faults. */
2946 t2
= tcg_temp_new_i64();
2947 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2948 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2949 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2950 store_reg32_i64(r1
, t1
);
2951 store_reg32_i64(r3
, t2
);
2953 /* Only two registers to read. */
2954 if (((r1
+ 1) & 15) == r3
) {
2960 /* Then load the remaining registers. Page fault can't occur. */
2962 tcg_gen_movi_i64(t2
, 4);
2965 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2966 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2967 store_reg32_i64(r1
, t1
);
2975 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
2977 int r1
= get_field(s
->fields
, r1
);
2978 int r3
= get_field(s
->fields
, r3
);
2981 /* Only one register to read. */
2982 t1
= tcg_temp_new_i64();
2983 if (unlikely(r1
== r3
)) {
2984 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2985 store_reg32h_i64(r1
, t1
);
2990 /* First load the values of the first and last registers to trigger
2991 possible page faults. */
2992 t2
= tcg_temp_new_i64();
2993 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2994 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2995 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2996 store_reg32h_i64(r1
, t1
);
2997 store_reg32h_i64(r3
, t2
);
2999 /* Only two registers to read. */
3000 if (((r1
+ 1) & 15) == r3
) {
3006 /* Then load the remaining registers. Page fault can't occur. */
3008 tcg_gen_movi_i64(t2
, 4);
3011 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3012 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3013 store_reg32h_i64(r1
, t1
);
3021 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
3023 int r1
= get_field(s
->fields
, r1
);
3024 int r3
= get_field(s
->fields
, r3
);
3027 /* Only one register to read. */
3028 if (unlikely(r1
== r3
)) {
3029 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3033 /* First load the values of the first and last registers to trigger
3034 possible page faults. */
3035 t1
= tcg_temp_new_i64();
3036 t2
= tcg_temp_new_i64();
3037 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
3038 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
3039 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
3040 tcg_gen_mov_i64(regs
[r1
], t1
);
3043 /* Only two registers to read. */
3044 if (((r1
+ 1) & 15) == r3
) {
3049 /* Then load the remaining registers. Page fault can't occur. */
3051 tcg_gen_movi_i64(t1
, 8);
3054 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
3055 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3062 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3065 TCGMemOp mop
= s
->insn
->data
;
3067 /* In a parallel context, stop the world and single step. */
3068 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3071 gen_exception(EXCP_ATOMIC
);
3072 return DISAS_NORETURN
;
3075 /* In a serial context, perform the two loads ... */
3076 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
3077 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3078 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3079 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3080 tcg_temp_free_i64(a1
);
3081 tcg_temp_free_i64(a2
);
3083 /* ... and indicate that we performed them while interlocked. */
3084 gen_op_movi_cc(s
, 0);
3088 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3090 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3091 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3092 } else if (HAVE_ATOMIC128
) {
3093 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3095 gen_helper_exit_atomic(cpu_env
);
3096 return DISAS_NORETURN
;
3098 return_low128(o
->out2
);
3102 #ifndef CONFIG_USER_ONLY
3103 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3105 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
3109 static DisasJumpType
op_lurag(DisasContext
*s
, DisasOps
*o
)
3111 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
3116 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3118 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3122 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3125 o
->g_out
= o
->g_in2
;
3131 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3133 int b2
= get_field(s
->fields
, b2
);
3134 TCGv ar1
= tcg_temp_new_i64();
3137 o
->g_out
= o
->g_in2
;
3141 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3142 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3143 tcg_gen_movi_i64(ar1
, 0);
3145 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3146 tcg_gen_movi_i64(ar1
, 1);
3148 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3150 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3152 tcg_gen_movi_i64(ar1
, 0);
3155 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3156 tcg_gen_movi_i64(ar1
, 2);
3160 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3161 tcg_temp_free_i64(ar1
);
3166 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3170 o
->g_out
= o
->g_in1
;
3171 o
->g_out2
= o
->g_in2
;
3174 o
->g_in1
= o
->g_in2
= false;
3178 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3180 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3181 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3182 tcg_temp_free_i32(l
);
3186 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3188 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3189 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3190 tcg_temp_free_i32(l
);
3194 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3196 int r1
= get_field(s
->fields
, r1
);
3197 int r2
= get_field(s
->fields
, r2
);
3200 /* r1 and r2 must be even. */
3201 if (r1
& 1 || r2
& 1) {
3202 gen_program_exception(s
, PGM_SPECIFICATION
);
3203 return DISAS_NORETURN
;
3206 t1
= tcg_const_i32(r1
);
3207 t2
= tcg_const_i32(r2
);
3208 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3209 tcg_temp_free_i32(t1
);
3210 tcg_temp_free_i32(t2
);
3215 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3217 int r1
= get_field(s
->fields
, r1
);
3218 int r3
= get_field(s
->fields
, r3
);
3221 /* r1 and r3 must be even. */
3222 if (r1
& 1 || r3
& 1) {
3223 gen_program_exception(s
, PGM_SPECIFICATION
);
3224 return DISAS_NORETURN
;
3227 t1
= tcg_const_i32(r1
);
3228 t3
= tcg_const_i32(r3
);
3229 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3230 tcg_temp_free_i32(t1
);
3231 tcg_temp_free_i32(t3
);
3236 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3238 int r1
= get_field(s
->fields
, r1
);
3239 int r3
= get_field(s
->fields
, r3
);
3242 /* r1 and r3 must be even. */
3243 if (r1
& 1 || r3
& 1) {
3244 gen_program_exception(s
, PGM_SPECIFICATION
);
3245 return DISAS_NORETURN
;
3248 t1
= tcg_const_i32(r1
);
3249 t3
= tcg_const_i32(r3
);
3250 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3251 tcg_temp_free_i32(t1
);
3252 tcg_temp_free_i32(t3
);
3257 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3259 int r3
= get_field(s
->fields
, r3
);
3260 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3265 #ifndef CONFIG_USER_ONLY
3266 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3268 int r1
= get_field(s
->fields
, l1
);
3269 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3274 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3276 int r1
= get_field(s
->fields
, l1
);
3277 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3283 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3285 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3286 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3287 tcg_temp_free_i32(l
);
3291 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3293 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3294 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3295 tcg_temp_free_i32(l
);
3299 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3301 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3306 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3308 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3310 return_low128(o
->in2
);
3314 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3316 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3317 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3318 tcg_temp_free_i32(l
);
3322 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3324 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3328 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3330 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3334 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3336 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3340 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3342 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3346 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3348 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3352 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3354 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3355 return_low128(o
->out2
);
3359 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3361 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3362 return_low128(o
->out2
);
3366 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3368 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3369 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3370 tcg_temp_free_i64(r3
);
3374 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3376 TCGv_i64 r3
= load_freg(get_field(s
->fields
, r3
));
3377 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3378 tcg_temp_free_i64(r3
);
3382 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3384 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3385 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3386 tcg_temp_free_i64(r3
);
3390 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3392 TCGv_i64 r3
= load_freg(get_field(s
->fields
, r3
));
3393 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3394 tcg_temp_free_i64(r3
);
3398 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3401 z
= tcg_const_i64(0);
3402 n
= tcg_temp_new_i64();
3403 tcg_gen_neg_i64(n
, o
->in2
);
3404 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3405 tcg_temp_free_i64(n
);
3406 tcg_temp_free_i64(z
);
3410 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3412 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3416 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3418 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3422 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3424 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3425 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3429 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3431 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3432 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3433 tcg_temp_free_i32(l
);
3438 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3440 tcg_gen_neg_i64(o
->out
, o
->in2
);
3444 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3446 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3450 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3452 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3456 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3458 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3459 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3463 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3465 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3466 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3467 tcg_temp_free_i32(l
);
3472 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3474 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3478 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3480 int shift
= s
->insn
->data
& 0xff;
3481 int size
= s
->insn
->data
>> 8;
3482 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3485 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3486 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3488 /* Produce the CC from only the bits manipulated. */
3489 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3490 set_cc_nz_u64(s
, cc_dst
);
3494 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3496 o
->in1
= tcg_temp_new_i64();
3498 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3499 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3501 /* Perform the atomic operation in memory. */
3502 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3506 /* Recompute also for atomic case: needed for setting CC. */
3507 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3509 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3510 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3515 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3517 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3518 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3519 tcg_temp_free_i32(l
);
3523 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3525 int l2
= get_field(s
->fields
, l2
) + 1;
3528 /* The length must not exceed 32 bytes. */
3530 gen_program_exception(s
, PGM_SPECIFICATION
);
3531 return DISAS_NORETURN
;
3533 l
= tcg_const_i32(l2
);
3534 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3535 tcg_temp_free_i32(l
);
3539 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3541 int l2
= get_field(s
->fields
, l2
) + 1;
3544 /* The length must be even and should not exceed 64 bytes. */
3545 if ((l2
& 1) || (l2
> 64)) {
3546 gen_program_exception(s
, PGM_SPECIFICATION
);
3547 return DISAS_NORETURN
;
3549 l
= tcg_const_i32(l2
);
3550 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3551 tcg_temp_free_i32(l
);
3555 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3557 gen_helper_popcnt(o
->out
, o
->in2
);
3561 #ifndef CONFIG_USER_ONLY
3562 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3564 gen_helper_ptlb(cpu_env
);
3569 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3571 int i3
= get_field(s
->fields
, i3
);
3572 int i4
= get_field(s
->fields
, i4
);
3573 int i5
= get_field(s
->fields
, i5
);
3574 int do_zero
= i4
& 0x80;
3575 uint64_t mask
, imask
, pmask
;
3578 /* Adjust the arguments for the specific insn. */
3579 switch (s
->fields
->op2
) {
3580 case 0x55: /* risbg */
3581 case 0x59: /* risbgn */
3586 case 0x5d: /* risbhg */
3589 pmask
= 0xffffffff00000000ull
;
3591 case 0x51: /* risblg */
3594 pmask
= 0x00000000ffffffffull
;
3597 g_assert_not_reached();
3600 /* MASK is the set of bits to be inserted from R2.
3601 Take care for I3/I4 wraparound. */
3604 mask
^= pmask
>> i4
>> 1;
3606 mask
|= ~(pmask
>> i4
>> 1);
3610 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3611 insns, we need to keep the other half of the register. */
3612 imask
= ~mask
| ~pmask
;
3620 if (s
->fields
->op2
== 0x5d) {
3624 /* In some cases we can implement this with extract. */
3625 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3626 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3630 /* In some cases we can implement this with deposit. */
3631 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3632 /* Note that we rotate the bits to be inserted to the lsb, not to
3633 the position as described in the PoO. */
3634 rot
= (rot
- pos
) & 63;
3639 /* Rotate the input as necessary. */
3640 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3642 /* Insert the selected bits into the output. */
3645 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3647 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3649 } else if (imask
== 0) {
3650 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3652 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3653 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3654 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3659 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3661 int i3
= get_field(s
->fields
, i3
);
3662 int i4
= get_field(s
->fields
, i4
);
3663 int i5
= get_field(s
->fields
, i5
);
3666 /* If this is a test-only form, arrange to discard the result. */
3668 o
->out
= tcg_temp_new_i64();
3676 /* MASK is the set of bits to be operated on from R2.
3677 Take care for I3/I4 wraparound. */
3680 mask
^= ~0ull >> i4
>> 1;
3682 mask
|= ~(~0ull >> i4
>> 1);
3685 /* Rotate the input as necessary. */
3686 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3689 switch (s
->fields
->op2
) {
3690 case 0x55: /* AND */
3691 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3692 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3695 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3696 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3698 case 0x57: /* XOR */
3699 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3700 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3707 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3708 set_cc_nz_u64(s
, cc_dst
);
3712 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3714 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3718 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3720 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3724 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3726 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3730 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3732 TCGv_i32 t1
= tcg_temp_new_i32();
3733 TCGv_i32 t2
= tcg_temp_new_i32();
3734 TCGv_i32 to
= tcg_temp_new_i32();
3735 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3736 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3737 tcg_gen_rotl_i32(to
, t1
, t2
);
3738 tcg_gen_extu_i32_i64(o
->out
, to
);
3739 tcg_temp_free_i32(t1
);
3740 tcg_temp_free_i32(t2
);
3741 tcg_temp_free_i32(to
);
3745 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3747 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3751 #ifndef CONFIG_USER_ONLY
3752 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3754 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3759 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3761 gen_helper_sacf(cpu_env
, o
->in2
);
3762 /* Addressing mode has changed, so end the block. */
3763 return DISAS_PC_STALE
;
3767 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3769 int sam
= s
->insn
->data
;
3785 /* Bizarre but true, we check the address of the current insn for the
3786 specification exception, not the next to be executed. Thus the PoO
3787 documents that Bad Things Happen two bytes before the end. */
3788 if (s
->base
.pc_next
& ~mask
) {
3789 gen_program_exception(s
, PGM_SPECIFICATION
);
3790 return DISAS_NORETURN
;
3794 tsam
= tcg_const_i64(sam
);
3795 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3796 tcg_temp_free_i64(tsam
);
3798 /* Always exit the TB, since we (may have) changed execution mode. */
3799 return DISAS_PC_STALE
;
3802 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
3804 int r1
= get_field(s
->fields
, r1
);
3805 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3809 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
3811 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3815 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
3817 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3821 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
3823 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3824 return_low128(o
->out2
);
3828 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3830 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3834 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3836 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3840 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3842 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3843 return_low128(o
->out2
);
3847 #ifndef CONFIG_USER_ONLY
3848 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
3850 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3855 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
3857 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3858 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3859 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
3861 tcg_temp_free_i32(r1
);
3862 tcg_temp_free_i32(r3
);
3867 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
3874 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3876 /* We want to store when the condition is fulfilled, so branch
3877 out when it's not */
3878 c
.cond
= tcg_invert_cond(c
.cond
);
3880 lab
= gen_new_label();
3882 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3884 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3888 r1
= get_field(s
->fields
, r1
);
3889 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3890 switch (s
->insn
->data
) {
3892 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3895 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3897 case 2: /* STOCFH */
3898 h
= tcg_temp_new_i64();
3899 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3900 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3901 tcg_temp_free_i64(h
);
3904 g_assert_not_reached();
3906 tcg_temp_free_i64(a
);
3912 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
3914 uint64_t sign
= 1ull << s
->insn
->data
;
3915 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3916 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3917 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3918 /* The arithmetic left shift is curious in that it does not affect
3919 the sign bit. Copy that over from the source unchanged. */
3920 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3921 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3922 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3926 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
3928 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3932 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
3934 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3938 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
3940 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3944 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3946 gen_helper_sfpc(cpu_env
, o
->in2
);
3950 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
3952 gen_helper_sfas(cpu_env
, o
->in2
);
3956 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
3958 int b2
= get_field(s
->fields
, b2
);
3959 int d2
= get_field(s
->fields
, d2
);
3960 TCGv_i64 t1
= tcg_temp_new_i64();
3961 TCGv_i64 t2
= tcg_temp_new_i64();
3964 switch (s
->fields
->op2
) {
3965 case 0x99: /* SRNM */
3968 case 0xb8: /* SRNMB */
3971 case 0xb9: /* SRNMT */
3977 mask
= (1 << len
) - 1;
3979 /* Insert the value into the appropriate field of the FPC. */
3981 tcg_gen_movi_i64(t1
, d2
& mask
);
3983 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3984 tcg_gen_andi_i64(t1
, t1
, mask
);
3986 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3987 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3988 tcg_temp_free_i64(t1
);
3990 /* Then install the new FPC to set the rounding mode in fpu_status. */
3991 gen_helper_sfpc(cpu_env
, t2
);
3992 tcg_temp_free_i64(t2
);
3996 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
3998 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
3999 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
4002 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
4003 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
4007 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
4009 int b1
= get_field(s
->fields
, b1
);
4010 int d1
= get_field(s
->fields
, d1
);
4011 int b2
= get_field(s
->fields
, b2
);
4012 int d2
= get_field(s
->fields
, d2
);
4013 int r3
= get_field(s
->fields
, r3
);
4014 TCGv_i64 tmp
= tcg_temp_new_i64();
4016 /* fetch all operands first */
4017 o
->in1
= tcg_temp_new_i64();
4018 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
4019 o
->in2
= tcg_temp_new_i64();
4020 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
4021 o
->addr1
= get_address(s
, 0, r3
, 0);
4023 /* load the third operand into r3 before modifying anything */
4024 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
4026 /* subtract CPU timer from first operand and store in GR0 */
4027 gen_helper_stpt(tmp
, cpu_env
);
4028 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
4030 /* store second operand in GR1 */
4031 tcg_gen_mov_i64(regs
[1], o
->in2
);
4033 tcg_temp_free_i64(tmp
);
4037 #ifndef CONFIG_USER_ONLY
4038 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
4040 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
4041 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
4045 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
4047 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
4051 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
4053 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
4054 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4055 return DISAS_PC_STALE_NOCHAIN
;
4058 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
4060 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
4064 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4066 gen_helper_stck(o
->out
, cpu_env
);
4067 /* ??? We don't implement clock states. */
4068 gen_op_movi_cc(s
, 0);
4072 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4074 TCGv_i64 c1
= tcg_temp_new_i64();
4075 TCGv_i64 c2
= tcg_temp_new_i64();
4076 TCGv_i64 todpr
= tcg_temp_new_i64();
4077 gen_helper_stck(c1
, cpu_env
);
4078 /* 16 bit value store in an uint32_t (only valid bits set) */
4079 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4080 /* Shift the 64-bit value into its place as a zero-extended
4081 104-bit value. Note that "bit positions 64-103 are always
4082 non-zero so that they compare differently to STCK"; we set
4083 the least significant bit to 1. */
4084 tcg_gen_shli_i64(c2
, c1
, 56);
4085 tcg_gen_shri_i64(c1
, c1
, 8);
4086 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4087 tcg_gen_or_i64(c2
, c2
, todpr
);
4088 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4089 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4090 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4091 tcg_temp_free_i64(c1
);
4092 tcg_temp_free_i64(c2
);
4093 tcg_temp_free_i64(todpr
);
4094 /* ??? We don't implement clock states. */
4095 gen_op_movi_cc(s
, 0);
4099 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4101 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4102 gen_helper_sck(cc_op
, cpu_env
, o
->in1
);
4107 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4109 gen_helper_sckc(cpu_env
, o
->in2
);
4113 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4115 gen_helper_sckpf(cpu_env
, regs
[0]);
4119 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4121 gen_helper_stckc(o
->out
, cpu_env
);
4125 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4127 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4128 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4129 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4130 tcg_temp_free_i32(r1
);
4131 tcg_temp_free_i32(r3
);
4135 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4137 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4138 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4139 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4140 tcg_temp_free_i32(r1
);
4141 tcg_temp_free_i32(r3
);
4145 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4147 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4151 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4153 gen_helper_spt(cpu_env
, o
->in2
);
4157 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4159 gen_helper_stfl(cpu_env
);
4163 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4165 gen_helper_stpt(o
->out
, cpu_env
);
4169 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4171 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4176 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4178 gen_helper_spx(cpu_env
, o
->in2
);
4182 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4184 gen_helper_xsch(cpu_env
, regs
[1]);
4189 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4191 gen_helper_csch(cpu_env
, regs
[1]);
4196 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4198 gen_helper_hsch(cpu_env
, regs
[1]);
4203 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4205 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4210 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4212 gen_helper_rchp(cpu_env
, regs
[1]);
4217 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4219 gen_helper_rsch(cpu_env
, regs
[1]);
4224 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4226 gen_helper_sal(cpu_env
, regs
[1]);
4230 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4232 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4236 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4238 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4239 gen_op_movi_cc(s
, 3);
4243 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4245 /* The instruction is suppressed if not provided. */
4249 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4251 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4256 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4258 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4263 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4265 gen_helper_stcrw(cpu_env
, o
->in2
);
4270 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4272 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4277 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4279 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4284 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4286 gen_helper_chsc(cpu_env
, o
->in2
);
4291 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4293 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4294 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4298 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4300 uint64_t i2
= get_field(s
->fields
, i2
);
4303 /* It is important to do what the instruction name says: STORE THEN.
4304 If we let the output hook perform the store then if we fault and
4305 restart, we'll have the wrong SYSTEM MASK in place. */
4306 t
= tcg_temp_new_i64();
4307 tcg_gen_shri_i64(t
, psw_mask
, 56);
4308 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4309 tcg_temp_free_i64(t
);
4311 if (s
->fields
->op
== 0xac) {
4312 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4313 (i2
<< 56) | 0x00ffffffffffffffull
);
4315 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4318 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4319 return DISAS_PC_STALE_NOCHAIN
;
4322 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4324 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4328 static DisasJumpType
op_sturg(DisasContext
*s
, DisasOps
*o
)
4330 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4335 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4337 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4342 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4344 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4348 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4350 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4354 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4356 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4360 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4362 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4366 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4368 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4369 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4370 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4371 tcg_temp_free_i32(r1
);
4372 tcg_temp_free_i32(r3
);
4376 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4378 int m3
= get_field(s
->fields
, m3
);
4379 int pos
, base
= s
->insn
->data
;
4380 TCGv_i64 tmp
= tcg_temp_new_i64();
4382 pos
= base
+ ctz32(m3
) * 8;
4385 /* Effectively a 32-bit store. */
4386 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4387 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4393 /* Effectively a 16-bit store. */
4394 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4395 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4402 /* Effectively an 8-bit store. */
4403 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4404 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4408 /* This is going to be a sequence of shifts and stores. */
4409 pos
= base
+ 32 - 8;
4412 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4413 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4414 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4416 m3
= (m3
<< 1) & 0xf;
4421 tcg_temp_free_i64(tmp
);
4425 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4427 int r1
= get_field(s
->fields
, r1
);
4428 int r3
= get_field(s
->fields
, r3
);
4429 int size
= s
->insn
->data
;
4430 TCGv_i64 tsize
= tcg_const_i64(size
);
4434 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4436 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4441 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4445 tcg_temp_free_i64(tsize
);
4449 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4451 int r1
= get_field(s
->fields
, r1
);
4452 int r3
= get_field(s
->fields
, r3
);
4453 TCGv_i64 t
= tcg_temp_new_i64();
4454 TCGv_i64 t4
= tcg_const_i64(4);
4455 TCGv_i64 t32
= tcg_const_i64(32);
4458 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4459 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4463 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4467 tcg_temp_free_i64(t
);
4468 tcg_temp_free_i64(t4
);
4469 tcg_temp_free_i64(t32
);
4473 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4475 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4476 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4477 } else if (HAVE_ATOMIC128
) {
4478 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4480 gen_helper_exit_atomic(cpu_env
);
4481 return DISAS_NORETURN
;
4486 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4488 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4489 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4491 gen_helper_srst(cpu_env
, r1
, r2
);
4493 tcg_temp_free_i32(r1
);
4494 tcg_temp_free_i32(r2
);
4499 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4501 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4502 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4504 gen_helper_srstu(cpu_env
, r1
, r2
);
4506 tcg_temp_free_i32(r1
);
4507 tcg_temp_free_i32(r2
);
4512 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4514 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4518 static DisasJumpType
op_subb(DisasContext
*s
, DisasOps
*o
)
4523 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4525 /* The !borrow flag is the msb of CC. Since we want the inverse of
4526 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4527 disas_jcc(s
, &cmp
, 8 | 4);
4528 borrow
= tcg_temp_new_i64();
4530 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4532 TCGv_i32 t
= tcg_temp_new_i32();
4533 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4534 tcg_gen_extu_i32_i64(borrow
, t
);
4535 tcg_temp_free_i32(t
);
4539 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4540 tcg_temp_free_i64(borrow
);
4544 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4551 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4552 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4553 tcg_temp_free_i32(t
);
4555 t
= tcg_const_i32(s
->ilen
);
4556 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4557 tcg_temp_free_i32(t
);
4559 gen_exception(EXCP_SVC
);
4560 return DISAS_NORETURN
;
4563 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4567 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4568 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4569 gen_op_movi_cc(s
, cc
);
4573 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4575 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4580 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4582 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4587 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4589 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4594 #ifndef CONFIG_USER_ONLY
4596 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4598 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4603 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4605 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4612 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4614 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4615 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4616 tcg_temp_free_i32(l1
);
4621 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4623 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4624 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4625 tcg_temp_free_i32(l
);
4630 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4632 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4633 return_low128(o
->out2
);
4638 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4640 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4641 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4642 tcg_temp_free_i32(l
);
4647 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4649 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4650 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4651 tcg_temp_free_i32(l
);
4656 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4658 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4659 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4660 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4661 TCGv_i32 tst
= tcg_temp_new_i32();
4662 int m3
= get_field(s
->fields
, m3
);
4664 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4668 tcg_gen_movi_i32(tst
, -1);
4670 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4671 if (s
->insn
->opc
& 3) {
4672 tcg_gen_ext8u_i32(tst
, tst
);
4674 tcg_gen_ext16u_i32(tst
, tst
);
4677 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4679 tcg_temp_free_i32(r1
);
4680 tcg_temp_free_i32(r2
);
4681 tcg_temp_free_i32(sizes
);
4682 tcg_temp_free_i32(tst
);
4687 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4689 TCGv_i32 t1
= tcg_const_i32(0xff);
4690 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4691 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4692 tcg_temp_free_i32(t1
);
4697 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4699 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4700 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4701 tcg_temp_free_i32(l
);
4705 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4707 int l1
= get_field(s
->fields
, l1
) + 1;
4710 /* The length must not exceed 32 bytes. */
4712 gen_program_exception(s
, PGM_SPECIFICATION
);
4713 return DISAS_NORETURN
;
4715 l
= tcg_const_i32(l1
);
4716 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4717 tcg_temp_free_i32(l
);
4722 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4724 int l1
= get_field(s
->fields
, l1
) + 1;
4727 /* The length must be even and should not exceed 64 bytes. */
4728 if ((l1
& 1) || (l1
> 64)) {
4729 gen_program_exception(s
, PGM_SPECIFICATION
);
4730 return DISAS_NORETURN
;
4732 l
= tcg_const_i32(l1
);
4733 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4734 tcg_temp_free_i32(l
);
4740 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4742 int d1
= get_field(s
->fields
, d1
);
4743 int d2
= get_field(s
->fields
, d2
);
4744 int b1
= get_field(s
->fields
, b1
);
4745 int b2
= get_field(s
->fields
, b2
);
4746 int l
= get_field(s
->fields
, l1
);
4749 o
->addr1
= get_address(s
, 0, b1
, d1
);
4751 /* If the addresses are identical, this is a store/memset of zero. */
4752 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4753 o
->in2
= tcg_const_i64(0);
4757 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4760 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4764 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4767 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4771 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4774 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4778 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4780 gen_op_movi_cc(s
, 0);
4784 /* But in general we'll defer to a helper. */
4785 o
->in2
= get_address(s
, 0, b2
, d2
);
4786 t32
= tcg_const_i32(l
);
4787 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4788 tcg_temp_free_i32(t32
);
4793 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
4795 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4799 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
4801 int shift
= s
->insn
->data
& 0xff;
4802 int size
= s
->insn
->data
>> 8;
4803 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4806 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4807 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4809 /* Produce the CC from only the bits manipulated. */
4810 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4811 set_cc_nz_u64(s
, cc_dst
);
4815 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
4817 o
->in1
= tcg_temp_new_i64();
4819 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4820 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4822 /* Perform the atomic operation in memory. */
4823 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
4827 /* Recompute also for atomic case: needed for setting CC. */
4828 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4830 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4831 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4836 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
4838 o
->out
= tcg_const_i64(0);
4842 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
4844 o
->out
= tcg_const_i64(0);
4850 #ifndef CONFIG_USER_ONLY
4851 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
4853 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4855 gen_helper_clp(cpu_env
, r2
);
4856 tcg_temp_free_i32(r2
);
4861 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
4863 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4864 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4866 gen_helper_pcilg(cpu_env
, r1
, r2
);
4867 tcg_temp_free_i32(r1
);
4868 tcg_temp_free_i32(r2
);
4873 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
4875 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4876 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4878 gen_helper_pcistg(cpu_env
, r1
, r2
);
4879 tcg_temp_free_i32(r1
);
4880 tcg_temp_free_i32(r2
);
4885 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
4887 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4888 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4890 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4891 tcg_temp_free_i32(ar
);
4892 tcg_temp_free_i32(r1
);
4897 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
4899 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
4903 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
4905 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4906 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4908 gen_helper_rpcit(cpu_env
, r1
, r2
);
4909 tcg_temp_free_i32(r1
);
4910 tcg_temp_free_i32(r2
);
4915 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
4917 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4918 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4919 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4921 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
4922 tcg_temp_free_i32(ar
);
4923 tcg_temp_free_i32(r1
);
4924 tcg_temp_free_i32(r3
);
4929 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
4931 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4932 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4934 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4935 tcg_temp_free_i32(ar
);
4936 tcg_temp_free_i32(r1
);
4942 /* ====================================================================== */
4943 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4944 the original inputs), update the various cc data structures in order to
4945 be able to compute the new condition code. */
4947 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4949 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4952 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4954 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4957 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4959 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4962 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4964 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4967 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4969 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4972 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4974 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4977 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4979 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4982 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4984 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4987 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4989 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4992 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4994 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4997 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4999 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
5002 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
5004 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
5007 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
5009 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
5012 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
5014 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
5017 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
5019 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
5022 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
5024 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
5027 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
5029 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
5032 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
5034 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
5037 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
5039 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
5042 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
5044 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5045 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
5048 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5050 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5053 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5055 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5058 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5060 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5063 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5065 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5068 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5070 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5073 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5075 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
5078 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5080 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
5083 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
5085 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
5088 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
5090 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
5093 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5095 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5098 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5100 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5103 /* ====================================================================== */
5104 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5105 with the TCG register to which we will write. Used in combination with
5106 the "wout" generators, in some cases we need a new temporary, and in
5107 some cases we can write to a TCG global. */
5109 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5111 o
->out
= tcg_temp_new_i64();
5113 #define SPEC_prep_new 0
5115 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5117 o
->out
= tcg_temp_new_i64();
5118 o
->out2
= tcg_temp_new_i64();
5120 #define SPEC_prep_new_P 0
5122 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5124 o
->out
= regs
[get_field(f
, r1
)];
5127 #define SPEC_prep_r1 0
5129 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5131 int r1
= get_field(f
, r1
);
5133 o
->out2
= regs
[r1
+ 1];
5134 o
->g_out
= o
->g_out2
= true;
5136 #define SPEC_prep_r1_P SPEC_r1_even
5138 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5139 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5141 o
->out
= load_freg(get_field(f
, r1
));
5142 o
->out2
= load_freg(get_field(f
, r1
) + 2);
5144 #define SPEC_prep_x1 SPEC_r1_f128
5146 /* ====================================================================== */
5147 /* The "Write OUTput" generators. These generally perform some non-trivial
5148 copy of data to TCG globals, or to main memory. The trivial cases are
5149 generally handled by having a "prep" generator install the TCG global
5150 as the destination of the operation. */
5152 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5154 store_reg(get_field(f
, r1
), o
->out
);
5156 #define SPEC_wout_r1 0
5158 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5160 int r1
= get_field(f
, r1
);
5161 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5163 #define SPEC_wout_r1_8 0
5165 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5167 int r1
= get_field(f
, r1
);
5168 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5170 #define SPEC_wout_r1_16 0
5172 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5174 store_reg32_i64(get_field(f
, r1
), o
->out
);
5176 #define SPEC_wout_r1_32 0
5178 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5180 store_reg32h_i64(get_field(f
, r1
), o
->out
);
5182 #define SPEC_wout_r1_32h 0
5184 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5186 int r1
= get_field(f
, r1
);
5187 store_reg32_i64(r1
, o
->out
);
5188 store_reg32_i64(r1
+ 1, o
->out2
);
5190 #define SPEC_wout_r1_P32 SPEC_r1_even
5192 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5194 int r1
= get_field(f
, r1
);
5195 store_reg32_i64(r1
+ 1, o
->out
);
5196 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5197 store_reg32_i64(r1
, o
->out
);
5199 #define SPEC_wout_r1_D32 SPEC_r1_even
5201 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5203 int r3
= get_field(f
, r3
);
5204 store_reg32_i64(r3
, o
->out
);
5205 store_reg32_i64(r3
+ 1, o
->out2
);
5207 #define SPEC_wout_r3_P32 SPEC_r3_even
5209 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5211 int r3
= get_field(f
, r3
);
5212 store_reg(r3
, o
->out
);
5213 store_reg(r3
+ 1, o
->out2
);
5215 #define SPEC_wout_r3_P64 SPEC_r3_even
5217 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5219 store_freg32_i64(get_field(f
, r1
), o
->out
);
5221 #define SPEC_wout_e1 0
5223 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5225 store_freg(get_field(f
, r1
), o
->out
);
5227 #define SPEC_wout_f1 0
5229 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5231 int f1
= get_field(s
->fields
, r1
);
5232 store_freg(f1
, o
->out
);
5233 store_freg(f1
+ 2, o
->out2
);
5235 #define SPEC_wout_x1 SPEC_r1_f128
5237 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5239 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5240 store_reg32_i64(get_field(f
, r1
), o
->out
);
5243 #define SPEC_wout_cond_r1r2_32 0
5245 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5247 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5248 store_freg32_i64(get_field(f
, r1
), o
->out
);
5251 #define SPEC_wout_cond_e1e2 0
5253 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5255 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5257 #define SPEC_wout_m1_8 0
5259 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5261 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5263 #define SPEC_wout_m1_16 0
5265 #ifndef CONFIG_USER_ONLY
5266 static void wout_m1_16a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5268 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5270 #define SPEC_wout_m1_16a 0
5273 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5275 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5277 #define SPEC_wout_m1_32 0
5279 #ifndef CONFIG_USER_ONLY
5280 static void wout_m1_32a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5282 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5284 #define SPEC_wout_m1_32a 0
5287 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5289 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5291 #define SPEC_wout_m1_64 0
5293 #ifndef CONFIG_USER_ONLY
5294 static void wout_m1_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5296 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5298 #define SPEC_wout_m1_64a 0
5301 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5303 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5305 #define SPEC_wout_m2_32 0
5307 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5309 store_reg(get_field(f
, r1
), o
->in2
);
5311 #define SPEC_wout_in2_r1 0
5313 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5315 store_reg32_i64(get_field(f
, r1
), o
->in2
);
5317 #define SPEC_wout_in2_r1_32 0
5319 /* ====================================================================== */
5320 /* The "INput 1" generators. These load the first operand to an insn. */
5322 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5324 o
->in1
= load_reg(get_field(f
, r1
));
5326 #define SPEC_in1_r1 0
5328 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5330 o
->in1
= regs
[get_field(f
, r1
)];
5333 #define SPEC_in1_r1_o 0
5335 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5337 o
->in1
= tcg_temp_new_i64();
5338 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5340 #define SPEC_in1_r1_32s 0
5342 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5344 o
->in1
= tcg_temp_new_i64();
5345 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5347 #define SPEC_in1_r1_32u 0
5349 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5351 o
->in1
= tcg_temp_new_i64();
5352 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
5354 #define SPEC_in1_r1_sr32 0
5356 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5358 o
->in1
= load_reg(get_field(f
, r1
) + 1);
5360 #define SPEC_in1_r1p1 SPEC_r1_even
5362 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5364 o
->in1
= tcg_temp_new_i64();
5365 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5367 #define SPEC_in1_r1p1_32s SPEC_r1_even
5369 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5371 o
->in1
= tcg_temp_new_i64();
5372 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5374 #define SPEC_in1_r1p1_32u SPEC_r1_even
5376 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5378 int r1
= get_field(f
, r1
);
5379 o
->in1
= tcg_temp_new_i64();
5380 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5382 #define SPEC_in1_r1_D32 SPEC_r1_even
5384 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5386 o
->in1
= load_reg(get_field(f
, r2
));
5388 #define SPEC_in1_r2 0
5390 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5392 o
->in1
= tcg_temp_new_i64();
5393 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5395 #define SPEC_in1_r2_sr32 0
5397 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5399 o
->in1
= load_reg(get_field(f
, r3
));
5401 #define SPEC_in1_r3 0
5403 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5405 o
->in1
= regs
[get_field(f
, r3
)];
5408 #define SPEC_in1_r3_o 0
5410 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5412 o
->in1
= tcg_temp_new_i64();
5413 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5415 #define SPEC_in1_r3_32s 0
5417 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5419 o
->in1
= tcg_temp_new_i64();
5420 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5422 #define SPEC_in1_r3_32u 0
5424 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5426 int r3
= get_field(f
, r3
);
5427 o
->in1
= tcg_temp_new_i64();
5428 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5430 #define SPEC_in1_r3_D32 SPEC_r3_even
5432 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5434 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5436 #define SPEC_in1_e1 0
5438 static void in1_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5440 o
->in1
= load_freg(get_field(f
, r1
));
5442 #define SPEC_in1_f1 0
5444 /* Load the high double word of an extended (128-bit) format FP number */
5445 static void in1_x2h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5447 o
->in1
= load_freg(get_field(f
, r2
));
5449 #define SPEC_in1_x2h SPEC_r2_f128
5451 static void in1_f3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5453 o
->in1
= load_freg(get_field(f
, r3
));
5455 #define SPEC_in1_f3 0
5457 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5459 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5461 #define SPEC_in1_la1 0
5463 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5465 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5466 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5468 #define SPEC_in1_la2 0
5470 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5473 o
->in1
= tcg_temp_new_i64();
5474 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5476 #define SPEC_in1_m1_8u 0
5478 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5481 o
->in1
= tcg_temp_new_i64();
5482 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5484 #define SPEC_in1_m1_16s 0
5486 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5489 o
->in1
= tcg_temp_new_i64();
5490 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5492 #define SPEC_in1_m1_16u 0
5494 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5497 o
->in1
= tcg_temp_new_i64();
5498 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5500 #define SPEC_in1_m1_32s 0
5502 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5505 o
->in1
= tcg_temp_new_i64();
5506 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5508 #define SPEC_in1_m1_32u 0
5510 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5513 o
->in1
= tcg_temp_new_i64();
5514 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5516 #define SPEC_in1_m1_64 0
5518 /* ====================================================================== */
5519 /* The "INput 2" generators. These load the second operand to an insn. */
5521 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5523 o
->in2
= regs
[get_field(f
, r1
)];
5526 #define SPEC_in2_r1_o 0
5528 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5530 o
->in2
= tcg_temp_new_i64();
5531 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5533 #define SPEC_in2_r1_16u 0
5535 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5537 o
->in2
= tcg_temp_new_i64();
5538 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5540 #define SPEC_in2_r1_32u 0
5542 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5544 int r1
= get_field(f
, r1
);
5545 o
->in2
= tcg_temp_new_i64();
5546 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5548 #define SPEC_in2_r1_D32 SPEC_r1_even
5550 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5552 o
->in2
= load_reg(get_field(f
, r2
));
5554 #define SPEC_in2_r2 0
5556 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5558 o
->in2
= regs
[get_field(f
, r2
)];
5561 #define SPEC_in2_r2_o 0
5563 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5565 int r2
= get_field(f
, r2
);
5567 o
->in2
= load_reg(r2
);
5570 #define SPEC_in2_r2_nz 0
5572 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5574 o
->in2
= tcg_temp_new_i64();
5575 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5577 #define SPEC_in2_r2_8s 0
5579 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5581 o
->in2
= tcg_temp_new_i64();
5582 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5584 #define SPEC_in2_r2_8u 0
5586 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5588 o
->in2
= tcg_temp_new_i64();
5589 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5591 #define SPEC_in2_r2_16s 0
5593 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5595 o
->in2
= tcg_temp_new_i64();
5596 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5598 #define SPEC_in2_r2_16u 0
5600 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5602 o
->in2
= load_reg(get_field(f
, r3
));
5604 #define SPEC_in2_r3 0
5606 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5608 o
->in2
= tcg_temp_new_i64();
5609 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5611 #define SPEC_in2_r3_sr32 0
5613 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5615 o
->in2
= tcg_temp_new_i64();
5616 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5618 #define SPEC_in2_r2_32s 0
5620 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5622 o
->in2
= tcg_temp_new_i64();
5623 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5625 #define SPEC_in2_r2_32u 0
5627 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5629 o
->in2
= tcg_temp_new_i64();
5630 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5632 #define SPEC_in2_r2_sr32 0
5634 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5636 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5638 #define SPEC_in2_e2 0
5640 static void in2_f2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5642 o
->in2
= load_freg(get_field(f
, r2
));
5644 #define SPEC_in2_f2 0
5646 /* Load the low double word of an extended (128-bit) format FP number */
5647 static void in2_x2l(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5649 o
->in2
= load_freg(get_field(f
, r2
) + 2);
5651 #define SPEC_in2_x2l SPEC_r2_f128
5653 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5655 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5657 #define SPEC_in2_ra2 0
5659 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5661 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5662 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5664 #define SPEC_in2_a2 0
5666 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5668 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(f
, i2
) * 2);
5670 #define SPEC_in2_ri2 0
5672 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5674 help_l2_shift(s
, f
, o
, 31);
5676 #define SPEC_in2_sh32 0
5678 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5680 help_l2_shift(s
, f
, o
, 63);
5682 #define SPEC_in2_sh64 0
5684 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5687 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5689 #define SPEC_in2_m2_8u 0
5691 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5694 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5696 #define SPEC_in2_m2_16s 0
5698 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5701 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5703 #define SPEC_in2_m2_16u 0
5705 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5708 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5710 #define SPEC_in2_m2_32s 0
5712 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5715 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5717 #define SPEC_in2_m2_32u 0
5719 #ifndef CONFIG_USER_ONLY
5720 static void in2_m2_32ua(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5723 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5725 #define SPEC_in2_m2_32ua 0
5728 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5731 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5733 #define SPEC_in2_m2_64 0
5735 #ifndef CONFIG_USER_ONLY
5736 static void in2_m2_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5739 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5741 #define SPEC_in2_m2_64a 0
5744 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5747 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5749 #define SPEC_in2_mri2_16u 0
5751 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5754 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5756 #define SPEC_in2_mri2_32s 0
5758 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5761 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5763 #define SPEC_in2_mri2_32u 0
5765 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5768 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5770 #define SPEC_in2_mri2_64 0
5772 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5774 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5776 #define SPEC_in2_i2 0
5778 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5780 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5782 #define SPEC_in2_i2_8u 0
5784 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5786 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5788 #define SPEC_in2_i2_16u 0
5790 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5792 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5794 #define SPEC_in2_i2_32u 0
5796 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5798 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5799 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5801 #define SPEC_in2_i2_16u_shl 0
5803 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5805 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5806 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5808 #define SPEC_in2_i2_32u_shl 0
5810 #ifndef CONFIG_USER_ONLY
5811 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5813 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5815 #define SPEC_in2_insn 0
5818 /* ====================================================================== */
5820 /* Find opc within the table of insns. This is formulated as a switch
5821 statement so that (1) we get compile-time notice of cut-paste errors
5822 for duplicated opcodes, and (2) the compiler generates the binary
5823 search tree, rather than us having to post-process the table. */
5825 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5826 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5828 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5829 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5831 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5832 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5834 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5836 enum DisasInsnEnum
{
5837 #include "insn-data.def"
5841 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
5846 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5848 .help_in1 = in1_##I1, \
5849 .help_in2 = in2_##I2, \
5850 .help_prep = prep_##P, \
5851 .help_wout = wout_##W, \
5852 .help_cout = cout_##CC, \
5853 .help_op = op_##OP, \
5857 /* Allow 0 to be used for NULL in the table below. */
5865 #define SPEC_in1_0 0
5866 #define SPEC_in2_0 0
5867 #define SPEC_prep_0 0
5868 #define SPEC_wout_0 0
5870 /* Give smaller names to the various facilities. */
5871 #define FAC_Z S390_FEAT_ZARCH
5872 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5873 #define FAC_DFP S390_FEAT_DFP
5874 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5875 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5876 #define FAC_EE S390_FEAT_EXECUTE_EXT
5877 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5878 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5879 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5880 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5881 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5882 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5883 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5884 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5885 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5886 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5887 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5888 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5889 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5890 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5891 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5892 #define FAC_SFLE S390_FEAT_STFLE
5893 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5894 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5895 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5896 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5897 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5898 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5899 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5900 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5901 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5902 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5903 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5904 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5905 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5906 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5907 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5908 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5910 static const DisasInsn insn_info
[] = {
5911 #include "insn-data.def"
5915 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
5916 case OPC: return &insn_info[insn_ ## NM];
5918 static const DisasInsn
*lookup_opc(uint16_t opc
)
5921 #include "insn-data.def"
5932 /* Extract a field from the insn. The INSN should be left-aligned in
5933 the uint64_t so that we can more easily utilize the big-bit-endian
5934 definitions we extract from the Principals of Operation. */
5936 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5944 /* Zero extract the field from the insn. */
5945 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5947 /* Sign-extend, or un-swap the field as necessary. */
5949 case 0: /* unsigned */
5951 case 1: /* signed */
5952 assert(f
->size
<= 32);
5953 m
= 1u << (f
->size
- 1);
5956 case 2: /* dl+dh split, signed 20 bit. */
5957 r
= ((int8_t)r
<< 12) | (r
>> 8);
5963 /* Validate that the "compressed" encoding we selected above is valid.
5964 I.e. we havn't make two different original fields overlap. */
5965 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5966 o
->presentC
|= 1 << f
->indexC
;
5967 o
->presentO
|= 1 << f
->indexO
;
5969 o
->c
[f
->indexC
] = r
;
5972 /* Lookup the insn at the current PC, extracting the operands into O and
5973 returning the info struct for the insn. Returns NULL for invalid insn. */
5975 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5978 uint64_t insn
, pc
= s
->base
.pc_next
;
5980 const DisasInsn
*info
;
5982 if (unlikely(s
->ex_value
)) {
5983 /* Drop the EX data now, so that it's clear on exception paths. */
5984 TCGv_i64 zero
= tcg_const_i64(0);
5985 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5986 tcg_temp_free_i64(zero
);
5988 /* Extract the values saved by EXECUTE. */
5989 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5990 ilen
= s
->ex_value
& 0xf;
5993 insn
= ld_code2(env
, pc
);
5994 op
= (insn
>> 8) & 0xff;
5995 ilen
= get_ilen(op
);
6001 insn
= ld_code4(env
, pc
) << 32;
6004 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
6007 g_assert_not_reached();
6010 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
6013 /* We can't actually determine the insn format until we've looked up
6014 the full insn opcode. Which we can't do without locating the
6015 secondary opcode. Assume by default that OP2 is at bit 40; for
6016 those smaller insns that don't actually have a secondary opcode
6017 this will correctly result in OP2 = 0. */
6023 case 0xb2: /* S, RRF, RRE, IE */
6024 case 0xb3: /* RRE, RRD, RRF */
6025 case 0xb9: /* RRE, RRF */
6026 case 0xe5: /* SSE, SIL */
6027 op2
= (insn
<< 8) >> 56;
6031 case 0xc0: /* RIL */
6032 case 0xc2: /* RIL */
6033 case 0xc4: /* RIL */
6034 case 0xc6: /* RIL */
6035 case 0xc8: /* SSF */
6036 case 0xcc: /* RIL */
6037 op2
= (insn
<< 12) >> 60;
6039 case 0xc5: /* MII */
6040 case 0xc7: /* SMI */
6041 case 0xd0 ... 0xdf: /* SS */
6047 case 0xee ... 0xf3: /* SS */
6048 case 0xf8 ... 0xfd: /* SS */
6052 op2
= (insn
<< 40) >> 56;
6056 memset(f
, 0, sizeof(*f
));
6061 /* Lookup the instruction. */
6062 info
= lookup_opc(op
<< 8 | op2
);
6064 /* If we found it, extract the operands. */
6066 DisasFormat fmt
= info
->fmt
;
6069 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6070 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
6076 static bool is_afp_reg(int reg
)
6078 return reg
% 2 || reg
> 6;
6081 static bool is_fp_pair(int reg
)
6083 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6084 return !(reg
& 0x2);
6087 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6089 const DisasInsn
*insn
;
6090 DisasJumpType ret
= DISAS_NEXT
;
6094 /* Search for the insn in the table. */
6095 insn
= extract_insn(env
, s
, &f
);
6097 /* Not found means unimplemented/illegal opcode. */
6099 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6101 gen_illegal_opcode(s
);
6102 return DISAS_NORETURN
;
6105 #ifndef CONFIG_USER_ONLY
6106 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6107 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6108 gen_helper_per_ifetch(cpu_env
, addr
);
6109 tcg_temp_free_i64(addr
);
6115 /* privileged instruction */
6116 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6117 gen_program_exception(s
, PGM_PRIVILEGED
);
6118 return DISAS_NORETURN
;
6121 /* if AFP is not enabled, instructions and registers are forbidden */
6122 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6125 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(&f
, r1
))) {
6128 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(&f
, r2
))) {
6131 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(&f
, r3
))) {
6134 if (insn
->flags
& IF_BFP
) {
6137 if (insn
->flags
& IF_DFP
) {
6141 gen_data_exception(dxc
);
6142 return DISAS_NORETURN
;
6147 /* Check for insn specification exceptions. */
6149 if ((insn
->spec
& SPEC_r1_even
&& get_field(&f
, r1
) & 1) ||
6150 (insn
->spec
& SPEC_r2_even
&& get_field(&f
, r2
) & 1) ||
6151 (insn
->spec
& SPEC_r3_even
&& get_field(&f
, r3
) & 1) ||
6152 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(&f
, r1
))) ||
6153 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(&f
, r2
)))) {
6154 gen_program_exception(s
, PGM_SPECIFICATION
);
6155 return DISAS_NORETURN
;
6159 /* Set up the strutures we use to communicate with the helpers. */
6162 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
6169 /* Implement the instruction. */
6170 if (insn
->help_in1
) {
6171 insn
->help_in1(s
, &f
, &o
);
6173 if (insn
->help_in2
) {
6174 insn
->help_in2(s
, &f
, &o
);
6176 if (insn
->help_prep
) {
6177 insn
->help_prep(s
, &f
, &o
);
6179 if (insn
->help_op
) {
6180 ret
= insn
->help_op(s
, &o
);
6182 if (ret
!= DISAS_NORETURN
) {
6183 if (insn
->help_wout
) {
6184 insn
->help_wout(s
, &f
, &o
);
6186 if (insn
->help_cout
) {
6187 insn
->help_cout(s
, &o
);
6191 /* Free any temporaries created by the helpers. */
6192 if (o
.out
&& !o
.g_out
) {
6193 tcg_temp_free_i64(o
.out
);
6195 if (o
.out2
&& !o
.g_out2
) {
6196 tcg_temp_free_i64(o
.out2
);
6198 if (o
.in1
&& !o
.g_in1
) {
6199 tcg_temp_free_i64(o
.in1
);
6201 if (o
.in2
&& !o
.g_in2
) {
6202 tcg_temp_free_i64(o
.in2
);
6205 tcg_temp_free_i64(o
.addr1
);
6208 #ifndef CONFIG_USER_ONLY
6209 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6210 /* An exception might be triggered, save PSW if not already done. */
6211 if (ret
== DISAS_NEXT
|| ret
== DISAS_PC_STALE
) {
6212 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6215 /* Call the helper to check for a possible PER exception. */
6216 gen_helper_per_check_exception(cpu_env
);
6220 /* Advance to the next instruction. */
6221 s
->base
.pc_next
= s
->pc_tmp
;
6225 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6227 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6230 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6231 dc
->base
.pc_first
&= 0x7fffffff;
6232 dc
->base
.pc_next
= dc
->base
.pc_first
;
6235 dc
->cc_op
= CC_OP_DYNAMIC
;
6236 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6237 dc
->do_debug
= dc
->base
.singlestep_enabled
;
6240 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6244 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6246 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6248 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
);
6251 static bool s390x_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
6252 const CPUBreakpoint
*bp
)
6254 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6256 dc
->base
.is_jmp
= DISAS_PC_STALE
;
6257 dc
->do_debug
= true;
6258 /* The address covered by the breakpoint must be included in
6259 [tb->pc, tb->pc + tb->size) in order to for it to be
6260 properly cleared -- thus we increment the PC here so that
6261 the logic setting tb->size does the right thing. */
6262 dc
->base
.pc_next
+= 2;
6266 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6268 CPUS390XState
*env
= cs
->env_ptr
;
6269 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6271 dc
->base
.is_jmp
= translate_one(env
, dc
);
6272 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6273 uint64_t page_start
;
6275 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6276 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6277 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6282 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6284 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6286 switch (dc
->base
.is_jmp
) {
6288 case DISAS_NORETURN
:
6290 case DISAS_TOO_MANY
:
6291 case DISAS_PC_STALE
:
6292 case DISAS_PC_STALE_NOCHAIN
:
6293 update_psw_addr(dc
);
6295 case DISAS_PC_UPDATED
:
6296 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6297 cc op type is in env */
6300 case DISAS_PC_CC_UPDATED
:
6301 /* Exit the TB, either by raising a debug exception or by return. */
6303 gen_exception(EXCP_DEBUG
);
6304 } else if (use_exit_tb(dc
) ||
6305 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6306 tcg_gen_exit_tb(NULL
, 0);
6308 tcg_gen_lookup_and_goto_ptr();
6312 g_assert_not_reached();
6316 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
6318 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6320 if (unlikely(dc
->ex_value
)) {
6321 /* ??? Unfortunately log_target_disas can't use host memory. */
6322 qemu_log("IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6324 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6325 log_target_disas(cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6329 static const TranslatorOps s390x_tr_ops
= {
6330 .init_disas_context
= s390x_tr_init_disas_context
,
6331 .tb_start
= s390x_tr_tb_start
,
6332 .insn_start
= s390x_tr_insn_start
,
6333 .breakpoint_check
= s390x_tr_breakpoint_check
,
6334 .translate_insn
= s390x_tr_translate_insn
,
6335 .tb_stop
= s390x_tr_tb_stop
,
6336 .disas_log
= s390x_tr_disas_log
,
6339 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
)
6343 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
);
6346 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6349 int cc_op
= data
[1];
6350 env
->psw
.addr
= data
[0];
6351 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {