4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext
;
50 typedef struct DisasInsn DisasInsn
;
51 typedef struct DisasFields DisasFields
;
54 struct TranslationBlock
*tb
;
55 const DisasInsn
*insn
;
61 bool singlestep_enabled
;
64 /* Information carried about a condition to be evaluated. */
71 struct { TCGv_i64 a
, b
; } s64
;
72 struct { TCGv_i32 a
, b
; } s32
;
76 /* is_jmp field values */
77 #define DISAS_EXCP DISAS_TARGET_0
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit
[CC_OP_MAX
];
81 static uint64_t inline_branch_miss
[CC_OP_MAX
];
84 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
86 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
87 if (s
->tb
->flags
& FLAG_MASK_32
) {
88 return pc
| 0x80000000;
94 static TCGv_i64 psw_addr
;
95 static TCGv_i64 psw_mask
;
98 static TCGv_i32 cc_op
;
99 static TCGv_i64 cc_src
;
100 static TCGv_i64 cc_dst
;
101 static TCGv_i64 cc_vr
;
103 static char cpu_reg_names
[32][4];
104 static TCGv_i64 regs
[16];
105 static TCGv_i64 fregs
[16];
107 void s390x_translate_init(void)
111 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
112 offsetof(CPUS390XState
, psw
.addr
),
114 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
115 offsetof(CPUS390XState
, psw
.mask
),
117 gbea
= tcg_global_mem_new_i64(cpu_env
,
118 offsetof(CPUS390XState
, gbea
),
121 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
123 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
125 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
127 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
130 for (i
= 0; i
< 16; i
++) {
131 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
132 regs
[i
] = tcg_global_mem_new(cpu_env
,
133 offsetof(CPUS390XState
, regs
[i
]),
137 for (i
= 0; i
< 16; i
++) {
138 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
139 fregs
[i
] = tcg_global_mem_new(cpu_env
,
140 offsetof(CPUS390XState
, vregs
[i
][0].d
),
141 cpu_reg_names
[i
+ 16]);
145 static TCGv_i64
load_reg(int reg
)
147 TCGv_i64 r
= tcg_temp_new_i64();
148 tcg_gen_mov_i64(r
, regs
[reg
]);
152 static TCGv_i64
load_freg32_i64(int reg
)
154 TCGv_i64 r
= tcg_temp_new_i64();
155 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
159 static void store_reg(int reg
, TCGv_i64 v
)
161 tcg_gen_mov_i64(regs
[reg
], v
);
164 static void store_freg(int reg
, TCGv_i64 v
)
166 tcg_gen_mov_i64(fregs
[reg
], v
);
169 static void store_reg32_i64(int reg
, TCGv_i64 v
)
171 /* 32 bit register writes keep the upper half */
172 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
175 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
177 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
180 static void store_freg32_i64(int reg
, TCGv_i64 v
)
182 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
185 static void return_low128(TCGv_i64 dest
)
187 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
190 static void update_psw_addr(DisasContext
*s
)
193 tcg_gen_movi_i64(psw_addr
, s
->pc
);
196 static void per_branch(DisasContext
*s
, bool to_next
)
198 #ifndef CONFIG_USER_ONLY
199 tcg_gen_movi_i64(gbea
, s
->pc
);
201 if (s
->tb
->flags
& FLAG_MASK_PER
) {
202 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
203 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
205 tcg_temp_free_i64(next_pc
);
211 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
212 TCGv_i64 arg1
, TCGv_i64 arg2
)
214 #ifndef CONFIG_USER_ONLY
215 if (s
->tb
->flags
& FLAG_MASK_PER
) {
216 TCGLabel
*lab
= gen_new_label();
217 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
219 tcg_gen_movi_i64(gbea
, s
->pc
);
220 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
224 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
225 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
226 tcg_temp_free_i64(pc
);
231 static void per_breaking_event(DisasContext
*s
)
233 tcg_gen_movi_i64(gbea
, s
->pc
);
236 static void update_cc_op(DisasContext
*s
)
238 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
239 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
243 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
245 return (uint64_t)cpu_lduw_code(env
, pc
);
248 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
250 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
253 static int get_mem_index(DisasContext
*s
)
255 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
256 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
258 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
260 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
268 static void gen_exception(int excp
)
270 TCGv_i32 tmp
= tcg_const_i32(excp
);
271 gen_helper_exception(cpu_env
, tmp
);
272 tcg_temp_free_i32(tmp
);
275 static void gen_program_exception(DisasContext
*s
, int code
)
279 /* Remember what pgm exeption this was. */
280 tmp
= tcg_const_i32(code
);
281 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
282 tcg_temp_free_i32(tmp
);
284 tmp
= tcg_const_i32(s
->ilen
);
285 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
286 tcg_temp_free_i32(tmp
);
294 /* Trigger exception. */
295 gen_exception(EXCP_PGM
);
298 static inline void gen_illegal_opcode(DisasContext
*s
)
300 gen_program_exception(s
, PGM_OPERATION
);
303 static inline void gen_trap(DisasContext
*s
)
307 /* Set DXC to 0xff. */
308 t
= tcg_temp_new_i32();
309 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
310 tcg_gen_ori_i32(t
, t
, 0xff00);
311 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
312 tcg_temp_free_i32(t
);
314 gen_program_exception(s
, PGM_DATA
);
317 #ifndef CONFIG_USER_ONLY
318 static void check_privileged(DisasContext
*s
)
320 if (s
->tb
->flags
& FLAG_MASK_PSTATE
) {
321 gen_program_exception(s
, PGM_PRIVILEGED
);
326 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
328 TCGv_i64 tmp
= tcg_temp_new_i64();
329 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
331 /* Note that d2 is limited to 20 bits, signed. If we crop negative
332 displacements early we create larger immedate addends. */
334 /* Note that addi optimizes the imm==0 case. */
336 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
337 tcg_gen_addi_i64(tmp
, tmp
, d2
);
339 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
341 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
347 tcg_gen_movi_i64(tmp
, d2
);
350 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
356 static inline bool live_cc_data(DisasContext
*s
)
358 return (s
->cc_op
!= CC_OP_DYNAMIC
359 && s
->cc_op
!= CC_OP_STATIC
363 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
365 if (live_cc_data(s
)) {
366 tcg_gen_discard_i64(cc_src
);
367 tcg_gen_discard_i64(cc_dst
);
368 tcg_gen_discard_i64(cc_vr
);
370 s
->cc_op
= CC_OP_CONST0
+ val
;
373 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
375 if (live_cc_data(s
)) {
376 tcg_gen_discard_i64(cc_src
);
377 tcg_gen_discard_i64(cc_vr
);
379 tcg_gen_mov_i64(cc_dst
, dst
);
383 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
386 if (live_cc_data(s
)) {
387 tcg_gen_discard_i64(cc_vr
);
389 tcg_gen_mov_i64(cc_src
, src
);
390 tcg_gen_mov_i64(cc_dst
, dst
);
394 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
395 TCGv_i64 dst
, TCGv_i64 vr
)
397 tcg_gen_mov_i64(cc_src
, src
);
398 tcg_gen_mov_i64(cc_dst
, dst
);
399 tcg_gen_mov_i64(cc_vr
, vr
);
403 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
405 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
408 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
410 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
413 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
415 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
418 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
420 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
423 /* CC value is in env->cc_op */
424 static void set_cc_static(DisasContext
*s
)
426 if (live_cc_data(s
)) {
427 tcg_gen_discard_i64(cc_src
);
428 tcg_gen_discard_i64(cc_dst
);
429 tcg_gen_discard_i64(cc_vr
);
431 s
->cc_op
= CC_OP_STATIC
;
434 /* calculates cc into cc_op */
435 static void gen_op_calc_cc(DisasContext
*s
)
437 TCGv_i32 local_cc_op
;
440 TCGV_UNUSED_I32(local_cc_op
);
441 TCGV_UNUSED_I64(dummy
);
444 dummy
= tcg_const_i64(0);
458 local_cc_op
= tcg_const_i32(s
->cc_op
);
474 /* s->cc_op is the cc value */
475 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
478 /* env->cc_op already is the cc value */
493 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
498 case CC_OP_LTUGTU_32
:
499 case CC_OP_LTUGTU_64
:
506 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
521 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
524 /* unknown operation - assume 3 arguments and cc_op in env */
525 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
531 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
532 tcg_temp_free_i32(local_cc_op
);
534 if (!TCGV_IS_UNUSED_I64(dummy
)) {
535 tcg_temp_free_i64(dummy
);
538 /* We now have cc in cc_op as constant */
542 static bool use_exit_tb(DisasContext
*s
)
544 return (s
->singlestep_enabled
||
545 (tb_cflags(s
->tb
) & CF_LAST_IO
) ||
546 (s
->tb
->flags
& FLAG_MASK_PER
));
549 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
551 if (unlikely(use_exit_tb(s
))) {
554 #ifndef CONFIG_USER_ONLY
555 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
556 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
562 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
564 #ifdef DEBUG_INLINE_BRANCHES
565 inline_branch_miss
[cc_op
]++;
569 static void account_inline_branch(DisasContext
*s
, int cc_op
)
571 #ifdef DEBUG_INLINE_BRANCHES
572 inline_branch_hit
[cc_op
]++;
576 /* Table of mask values to comparison codes, given a comparison as input.
577 For such, CC=3 should not be possible. */
578 static const TCGCond ltgt_cond
[16] = {
579 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
580 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
581 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
582 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
583 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
584 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
585 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
586 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
589 /* Table of mask values to comparison codes, given a logic op as input.
590 For such, only CC=0 and CC=1 should be possible. */
591 static const TCGCond nz_cond
[16] = {
592 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
593 TCG_COND_NEVER
, TCG_COND_NEVER
,
594 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
595 TCG_COND_NE
, TCG_COND_NE
,
596 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
597 TCG_COND_EQ
, TCG_COND_EQ
,
598 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
599 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
602 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
603 details required to generate a TCG comparison. */
604 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
607 enum cc_op old_cc_op
= s
->cc_op
;
609 if (mask
== 15 || mask
== 0) {
610 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
613 c
->g1
= c
->g2
= true;
618 /* Find the TCG condition for the mask + cc op. */
624 cond
= ltgt_cond
[mask
];
625 if (cond
== TCG_COND_NEVER
) {
628 account_inline_branch(s
, old_cc_op
);
631 case CC_OP_LTUGTU_32
:
632 case CC_OP_LTUGTU_64
:
633 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
634 if (cond
== TCG_COND_NEVER
) {
637 account_inline_branch(s
, old_cc_op
);
641 cond
= nz_cond
[mask
];
642 if (cond
== TCG_COND_NEVER
) {
645 account_inline_branch(s
, old_cc_op
);
660 account_inline_branch(s
, old_cc_op
);
675 account_inline_branch(s
, old_cc_op
);
679 switch (mask
& 0xa) {
680 case 8: /* src == 0 -> no one bit found */
683 case 2: /* src != 0 -> one bit found */
689 account_inline_branch(s
, old_cc_op
);
695 case 8 | 2: /* vr == 0 */
698 case 4 | 1: /* vr != 0 */
701 case 8 | 4: /* no carry -> vr >= src */
704 case 2 | 1: /* carry -> vr < src */
710 account_inline_branch(s
, old_cc_op
);
715 /* Note that CC=0 is impossible; treat it as dont-care. */
717 case 2: /* zero -> op1 == op2 */
720 case 4 | 1: /* !zero -> op1 != op2 */
723 case 4: /* borrow (!carry) -> op1 < op2 */
726 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
732 account_inline_branch(s
, old_cc_op
);
737 /* Calculate cc value. */
742 /* Jump based on CC. We'll load up the real cond below;
743 the assignment here merely avoids a compiler warning. */
744 account_noninline_branch(s
, old_cc_op
);
745 old_cc_op
= CC_OP_STATIC
;
746 cond
= TCG_COND_NEVER
;
750 /* Load up the arguments of the comparison. */
752 c
->g1
= c
->g2
= false;
756 c
->u
.s32
.a
= tcg_temp_new_i32();
757 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
758 c
->u
.s32
.b
= tcg_const_i32(0);
761 case CC_OP_LTUGTU_32
:
764 c
->u
.s32
.a
= tcg_temp_new_i32();
765 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
766 c
->u
.s32
.b
= tcg_temp_new_i32();
767 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
774 c
->u
.s64
.b
= tcg_const_i64(0);
778 case CC_OP_LTUGTU_64
:
782 c
->g1
= c
->g2
= true;
788 c
->u
.s64
.a
= tcg_temp_new_i64();
789 c
->u
.s64
.b
= tcg_const_i64(0);
790 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
795 c
->u
.s32
.a
= tcg_temp_new_i32();
796 c
->u
.s32
.b
= tcg_temp_new_i32();
797 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
798 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
799 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
801 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
808 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
809 c
->u
.s64
.b
= tcg_const_i64(0);
821 case 0x8 | 0x4 | 0x2: /* cc != 3 */
823 c
->u
.s32
.b
= tcg_const_i32(3);
825 case 0x8 | 0x4 | 0x1: /* cc != 2 */
827 c
->u
.s32
.b
= tcg_const_i32(2);
829 case 0x8 | 0x2 | 0x1: /* cc != 1 */
831 c
->u
.s32
.b
= tcg_const_i32(1);
833 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
836 c
->u
.s32
.a
= tcg_temp_new_i32();
837 c
->u
.s32
.b
= tcg_const_i32(0);
838 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
840 case 0x8 | 0x4: /* cc < 2 */
842 c
->u
.s32
.b
= tcg_const_i32(2);
844 case 0x8: /* cc == 0 */
846 c
->u
.s32
.b
= tcg_const_i32(0);
848 case 0x4 | 0x2 | 0x1: /* cc != 0 */
850 c
->u
.s32
.b
= tcg_const_i32(0);
852 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
855 c
->u
.s32
.a
= tcg_temp_new_i32();
856 c
->u
.s32
.b
= tcg_const_i32(0);
857 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
859 case 0x4: /* cc == 1 */
861 c
->u
.s32
.b
= tcg_const_i32(1);
863 case 0x2 | 0x1: /* cc > 1 */
865 c
->u
.s32
.b
= tcg_const_i32(1);
867 case 0x2: /* cc == 2 */
869 c
->u
.s32
.b
= tcg_const_i32(2);
871 case 0x1: /* cc == 3 */
873 c
->u
.s32
.b
= tcg_const_i32(3);
876 /* CC is masked by something else: (8 >> cc) & mask. */
879 c
->u
.s32
.a
= tcg_const_i32(8);
880 c
->u
.s32
.b
= tcg_const_i32(0);
881 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
882 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
893 static void free_compare(DisasCompare
*c
)
897 tcg_temp_free_i64(c
->u
.s64
.a
);
899 tcg_temp_free_i32(c
->u
.s32
.a
);
904 tcg_temp_free_i64(c
->u
.s64
.b
);
906 tcg_temp_free_i32(c
->u
.s32
.b
);
911 /* ====================================================================== */
912 /* Define the insn format enumeration. */
913 #define F0(N) FMT_##N,
914 #define F1(N, X1) F0(N)
915 #define F2(N, X1, X2) F0(N)
916 #define F3(N, X1, X2, X3) F0(N)
917 #define F4(N, X1, X2, X3, X4) F0(N)
918 #define F5(N, X1, X2, X3, X4, X5) F0(N)
921 #include "insn-format.def"
931 /* Define a structure to hold the decoded fields. We'll store each inside
932 an array indexed by an enum. In order to conserve memory, we'll arrange
933 for fields that do not exist at the same time to overlap, thus the "C"
934 for compact. For checking purposes there is an "O" for original index
935 as well that will be applied to availability bitmaps. */
937 enum DisasFieldIndexO
{
960 enum DisasFieldIndexC
{
995 unsigned presentC
:16;
996 unsigned int presentO
;
1000 /* This is the way fields are to be accessed out of DisasFields. */
1001 #define have_field(S, F) have_field1((S), FLD_O_##F)
1002 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1004 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1006 return (f
->presentO
>> c
) & 1;
1009 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1010 enum DisasFieldIndexC c
)
1012 assert(have_field1(f
, o
));
1016 /* Describe the layout of each field in each format. */
1017 typedef struct DisasField
{
1019 unsigned int size
:8;
1020 unsigned int type
:2;
1021 unsigned int indexC
:6;
1022 enum DisasFieldIndexO indexO
:8;
1025 typedef struct DisasFormatInfo
{
1026 DisasField op
[NUM_C_FIELD
];
1029 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1030 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1031 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1032 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1033 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1035 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1036 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1037 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1038 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1040 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1041 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1042 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1044 #define F0(N) { { } },
1045 #define F1(N, X1) { { X1 } },
1046 #define F2(N, X1, X2) { { X1, X2 } },
1047 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1048 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1049 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1051 static const DisasFormatInfo format_info
[] = {
1052 #include "insn-format.def"
1070 /* Generally, we'll extract operands into this structures, operate upon
1071 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1072 of routines below for more details. */
1074 bool g_out
, g_out2
, g_in1
, g_in2
;
1075 TCGv_i64 out
, out2
, in1
, in2
;
1079 /* Instructions can place constraints on their operands, raising specification
1080 exceptions if they are violated. To make this easy to automate, each "in1",
1081 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1082 of the following, or 0. To make this easy to document, we'll put the
1083 SPEC_<name> defines next to <name>. */
1085 #define SPEC_r1_even 1
1086 #define SPEC_r2_even 2
1087 #define SPEC_r3_even 4
1088 #define SPEC_r1_f128 8
1089 #define SPEC_r2_f128 16
1091 /* Return values from translate_one, indicating the state of the TB. */
1093 /* Continue the TB. */
1095 /* We have emitted one or more goto_tb. No fixup required. */
1097 /* We are not using a goto_tb (for whatever reason), but have updated
1098 the PC (for whatever reason), so there's no need to do it again on
1101 /* We have updated the PC and CC values. */
1103 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1104 updated the PC for the next instruction to be executed. */
1106 /* We are exiting the TB to the main loop. */
1107 EXIT_PC_STALE_NOCHAIN
,
1108 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1109 No following code will be executed. */
1121 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1122 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1123 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1124 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1125 void (*help_cout
)(DisasContext
*, DisasOps
*);
1126 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1131 /* ====================================================================== */
1132 /* Miscellaneous helpers, used by several operations. */
1134 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1135 DisasOps
*o
, int mask
)
1137 int b2
= get_field(f
, b2
);
1138 int d2
= get_field(f
, d2
);
1141 o
->in2
= tcg_const_i64(d2
& mask
);
1143 o
->in2
= get_address(s
, 0, b2
, d2
);
1144 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1148 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1150 if (dest
== s
->next_pc
) {
1151 per_branch(s
, true);
1154 if (use_goto_tb(s
, dest
)) {
1156 per_breaking_event(s
);
1158 tcg_gen_movi_i64(psw_addr
, dest
);
1159 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1160 return EXIT_GOTO_TB
;
1162 tcg_gen_movi_i64(psw_addr
, dest
);
1163 per_branch(s
, false);
1164 return EXIT_PC_UPDATED
;
1168 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1169 bool is_imm
, int imm
, TCGv_i64 cdest
)
1172 uint64_t dest
= s
->pc
+ 2 * imm
;
1175 /* Take care of the special cases first. */
1176 if (c
->cond
== TCG_COND_NEVER
) {
1181 if (dest
== s
->next_pc
) {
1182 /* Branch to next. */
1183 per_branch(s
, true);
1187 if (c
->cond
== TCG_COND_ALWAYS
) {
1188 ret
= help_goto_direct(s
, dest
);
1192 if (TCGV_IS_UNUSED_I64(cdest
)) {
1193 /* E.g. bcr %r0 -> no branch. */
1197 if (c
->cond
== TCG_COND_ALWAYS
) {
1198 tcg_gen_mov_i64(psw_addr
, cdest
);
1199 per_branch(s
, false);
1200 ret
= EXIT_PC_UPDATED
;
1205 if (use_goto_tb(s
, s
->next_pc
)) {
1206 if (is_imm
&& use_goto_tb(s
, dest
)) {
1207 /* Both exits can use goto_tb. */
1210 lab
= gen_new_label();
1212 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1214 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1217 /* Branch not taken. */
1219 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1220 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1224 per_breaking_event(s
);
1226 tcg_gen_movi_i64(psw_addr
, dest
);
1227 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1231 /* Fallthru can use goto_tb, but taken branch cannot. */
1232 /* Store taken branch destination before the brcond. This
1233 avoids having to allocate a new local temp to hold it.
1234 We'll overwrite this in the not taken case anyway. */
1236 tcg_gen_mov_i64(psw_addr
, cdest
);
1239 lab
= gen_new_label();
1241 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1243 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1246 /* Branch not taken. */
1249 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1250 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1254 tcg_gen_movi_i64(psw_addr
, dest
);
1256 per_breaking_event(s
);
1257 ret
= EXIT_PC_UPDATED
;
1260 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1261 Most commonly we're single-stepping or some other condition that
1262 disables all use of goto_tb. Just update the PC and exit. */
1264 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1266 cdest
= tcg_const_i64(dest
);
1270 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1272 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1274 TCGv_i32 t0
= tcg_temp_new_i32();
1275 TCGv_i64 t1
= tcg_temp_new_i64();
1276 TCGv_i64 z
= tcg_const_i64(0);
1277 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1278 tcg_gen_extu_i32_i64(t1
, t0
);
1279 tcg_temp_free_i32(t0
);
1280 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1281 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1282 tcg_temp_free_i64(t1
);
1283 tcg_temp_free_i64(z
);
1287 tcg_temp_free_i64(cdest
);
1289 tcg_temp_free_i64(next
);
1291 ret
= EXIT_PC_UPDATED
;
1299 /* ====================================================================== */
1300 /* The operations. These perform the bulk of the work for any insn,
1301 usually after the operands have been loaded and output initialized. */
1303 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1306 z
= tcg_const_i64(0);
1307 n
= tcg_temp_new_i64();
1308 tcg_gen_neg_i64(n
, o
->in2
);
1309 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1310 tcg_temp_free_i64(n
);
1311 tcg_temp_free_i64(z
);
1315 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1317 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1321 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1323 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1327 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1329 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1330 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1334 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1336 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1340 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1345 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1347 /* The carry flag is the msb of CC, therefore the branch mask that would
1348 create that comparison is 3. Feeding the generated comparison to
1349 setcond produces the carry flag that we desire. */
1350 disas_jcc(s
, &cmp
, 3);
1351 carry
= tcg_temp_new_i64();
1353 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1355 TCGv_i32 t
= tcg_temp_new_i32();
1356 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1357 tcg_gen_extu_i32_i64(carry
, t
);
1358 tcg_temp_free_i32(t
);
1362 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1363 tcg_temp_free_i64(carry
);
1367 static ExitStatus
op_asi(DisasContext
*s
, DisasOps
*o
)
1369 o
->in1
= tcg_temp_new_i64();
1371 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1372 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1374 /* Perform the atomic addition in memory. */
1375 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1379 /* Recompute also for atomic case: needed for setting CC. */
1380 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1382 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1383 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1388 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1390 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1394 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1396 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1400 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1402 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1403 return_low128(o
->out2
);
1407 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1409 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1413 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1415 int shift
= s
->insn
->data
& 0xff;
1416 int size
= s
->insn
->data
>> 8;
1417 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1420 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1421 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1422 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1424 /* Produce the CC from only the bits manipulated. */
1425 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1426 set_cc_nz_u64(s
, cc_dst
);
1430 static ExitStatus
op_ni(DisasContext
*s
, DisasOps
*o
)
1432 o
->in1
= tcg_temp_new_i64();
1434 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1435 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1437 /* Perform the atomic operation in memory. */
1438 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1442 /* Recompute also for atomic case: needed for setting CC. */
1443 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1445 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1446 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1451 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1453 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1454 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1455 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1456 per_branch(s
, false);
1457 return EXIT_PC_UPDATED
;
1463 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1465 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1466 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1469 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1471 int m1
= get_field(s
->fields
, m1
);
1472 bool is_imm
= have_field(s
->fields
, i2
);
1473 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1476 /* BCR with R2 = 0 causes no branching */
1477 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1479 /* Perform serialization */
1480 /* FIXME: check for fast-BCR-serialization facility */
1481 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1484 /* Perform serialization */
1485 /* FIXME: perform checkpoint-synchronisation */
1486 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1491 disas_jcc(s
, &c
, m1
);
1492 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1495 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1497 int r1
= get_field(s
->fields
, r1
);
1498 bool is_imm
= have_field(s
->fields
, i2
);
1499 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1503 c
.cond
= TCG_COND_NE
;
1508 t
= tcg_temp_new_i64();
1509 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1510 store_reg32_i64(r1
, t
);
1511 c
.u
.s32
.a
= tcg_temp_new_i32();
1512 c
.u
.s32
.b
= tcg_const_i32(0);
1513 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1514 tcg_temp_free_i64(t
);
1516 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1519 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1521 int r1
= get_field(s
->fields
, r1
);
1522 int imm
= get_field(s
->fields
, i2
);
1526 c
.cond
= TCG_COND_NE
;
1531 t
= tcg_temp_new_i64();
1532 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1533 tcg_gen_subi_i64(t
, t
, 1);
1534 store_reg32h_i64(r1
, t
);
1535 c
.u
.s32
.a
= tcg_temp_new_i32();
1536 c
.u
.s32
.b
= tcg_const_i32(0);
1537 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1538 tcg_temp_free_i64(t
);
1540 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1543 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1545 int r1
= get_field(s
->fields
, r1
);
1546 bool is_imm
= have_field(s
->fields
, i2
);
1547 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1550 c
.cond
= TCG_COND_NE
;
1555 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1556 c
.u
.s64
.a
= regs
[r1
];
1557 c
.u
.s64
.b
= tcg_const_i64(0);
1559 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1562 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1564 int r1
= get_field(s
->fields
, r1
);
1565 int r3
= get_field(s
->fields
, r3
);
1566 bool is_imm
= have_field(s
->fields
, i2
);
1567 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1571 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1576 t
= tcg_temp_new_i64();
1577 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1578 c
.u
.s32
.a
= tcg_temp_new_i32();
1579 c
.u
.s32
.b
= tcg_temp_new_i32();
1580 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1581 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1582 store_reg32_i64(r1
, t
);
1583 tcg_temp_free_i64(t
);
1585 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1588 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1590 int r1
= get_field(s
->fields
, r1
);
1591 int r3
= get_field(s
->fields
, r3
);
1592 bool is_imm
= have_field(s
->fields
, i2
);
1593 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1596 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1599 if (r1
== (r3
| 1)) {
1600 c
.u
.s64
.b
= load_reg(r3
| 1);
1603 c
.u
.s64
.b
= regs
[r3
| 1];
1607 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1608 c
.u
.s64
.a
= regs
[r1
];
1611 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1614 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1616 int imm
, m3
= get_field(s
->fields
, m3
);
1620 c
.cond
= ltgt_cond
[m3
];
1621 if (s
->insn
->data
) {
1622 c
.cond
= tcg_unsigned_cond(c
.cond
);
1624 c
.is_64
= c
.g1
= c
.g2
= true;
1628 is_imm
= have_field(s
->fields
, i4
);
1630 imm
= get_field(s
->fields
, i4
);
1633 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1634 get_field(s
->fields
, d4
));
1637 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1640 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1642 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1647 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1649 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1654 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1656 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1661 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1663 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1664 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1665 tcg_temp_free_i32(m3
);
1666 gen_set_cc_nz_f32(s
, o
->in2
);
1670 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1672 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1673 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1674 tcg_temp_free_i32(m3
);
1675 gen_set_cc_nz_f64(s
, o
->in2
);
1679 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1681 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1682 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1683 tcg_temp_free_i32(m3
);
1684 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1688 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1690 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1691 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1692 tcg_temp_free_i32(m3
);
1693 gen_set_cc_nz_f32(s
, o
->in2
);
1697 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1699 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1700 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1701 tcg_temp_free_i32(m3
);
1702 gen_set_cc_nz_f64(s
, o
->in2
);
1706 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1708 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1709 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1710 tcg_temp_free_i32(m3
);
1711 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1715 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1717 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1718 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1719 tcg_temp_free_i32(m3
);
1720 gen_set_cc_nz_f32(s
, o
->in2
);
1724 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1726 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1727 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1728 tcg_temp_free_i32(m3
);
1729 gen_set_cc_nz_f64(s
, o
->in2
);
1733 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1735 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1736 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1737 tcg_temp_free_i32(m3
);
1738 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1742 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1744 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1745 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1746 tcg_temp_free_i32(m3
);
1747 gen_set_cc_nz_f32(s
, o
->in2
);
1751 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1753 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1754 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1755 tcg_temp_free_i32(m3
);
1756 gen_set_cc_nz_f64(s
, o
->in2
);
1760 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1762 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1763 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1764 tcg_temp_free_i32(m3
);
1765 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1769 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1771 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1772 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1773 tcg_temp_free_i32(m3
);
1777 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1779 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1780 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1781 tcg_temp_free_i32(m3
);
1785 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1787 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1788 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1789 tcg_temp_free_i32(m3
);
1790 return_low128(o
->out2
);
1794 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1796 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1797 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1798 tcg_temp_free_i32(m3
);
1802 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1804 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1805 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1806 tcg_temp_free_i32(m3
);
1810 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1812 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1813 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1814 tcg_temp_free_i32(m3
);
1815 return_low128(o
->out2
);
1819 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1821 int r2
= get_field(s
->fields
, r2
);
1822 TCGv_i64 len
= tcg_temp_new_i64();
1824 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1826 return_low128(o
->out
);
1828 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1829 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1830 tcg_temp_free_i64(len
);
1835 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1837 int l
= get_field(s
->fields
, l1
);
1842 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1843 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1846 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1847 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1850 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1851 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1854 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1855 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1858 vl
= tcg_const_i32(l
);
1859 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1860 tcg_temp_free_i32(vl
);
1864 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1868 static ExitStatus
op_clcl(DisasContext
*s
, DisasOps
*o
)
1870 int r1
= get_field(s
->fields
, r1
);
1871 int r2
= get_field(s
->fields
, r2
);
1874 /* r1 and r2 must be even. */
1875 if (r1
& 1 || r2
& 1) {
1876 gen_program_exception(s
, PGM_SPECIFICATION
);
1877 return EXIT_NORETURN
;
1880 t1
= tcg_const_i32(r1
);
1881 t2
= tcg_const_i32(r2
);
1882 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1883 tcg_temp_free_i32(t1
);
1884 tcg_temp_free_i32(t2
);
1889 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1891 int r1
= get_field(s
->fields
, r1
);
1892 int r3
= get_field(s
->fields
, r3
);
1895 /* r1 and r3 must be even. */
1896 if (r1
& 1 || r3
& 1) {
1897 gen_program_exception(s
, PGM_SPECIFICATION
);
1898 return EXIT_NORETURN
;
1901 t1
= tcg_const_i32(r1
);
1902 t3
= tcg_const_i32(r3
);
1903 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1904 tcg_temp_free_i32(t1
);
1905 tcg_temp_free_i32(t3
);
1910 static ExitStatus
op_clclu(DisasContext
*s
, DisasOps
*o
)
1912 int r1
= get_field(s
->fields
, r1
);
1913 int r3
= get_field(s
->fields
, r3
);
1916 /* r1 and r3 must be even. */
1917 if (r1
& 1 || r3
& 1) {
1918 gen_program_exception(s
, PGM_SPECIFICATION
);
1919 return EXIT_NORETURN
;
1922 t1
= tcg_const_i32(r1
);
1923 t3
= tcg_const_i32(r3
);
1924 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1925 tcg_temp_free_i32(t1
);
1926 tcg_temp_free_i32(t3
);
1931 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1933 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1934 TCGv_i32 t1
= tcg_temp_new_i32();
1935 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1936 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1938 tcg_temp_free_i32(t1
);
1939 tcg_temp_free_i32(m3
);
1943 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1945 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1947 return_low128(o
->in2
);
1951 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1953 TCGv_i64 t
= tcg_temp_new_i64();
1954 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1955 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1956 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1957 tcg_temp_free_i64(t
);
1961 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1963 int d2
= get_field(s
->fields
, d2
);
1964 int b2
= get_field(s
->fields
, b2
);
1967 /* Note that in1 = R3 (new value) and
1968 in2 = (zero-extended) R1 (expected value). */
1970 addr
= get_address(s
, 0, b2
, d2
);
1971 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1972 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1973 tcg_temp_free_i64(addr
);
1975 /* Are the memory and expected values (un)equal? Note that this setcond
1976 produces the output CC value, thus the NE sense of the test. */
1977 cc
= tcg_temp_new_i64();
1978 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1979 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1980 tcg_temp_free_i64(cc
);
1986 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1988 int r1
= get_field(s
->fields
, r1
);
1989 int r3
= get_field(s
->fields
, r3
);
1990 int d2
= get_field(s
->fields
, d2
);
1991 int b2
= get_field(s
->fields
, b2
);
1993 TCGv_i32 t_r1
, t_r3
;
1995 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1996 addr
= get_address(s
, 0, b2
, d2
);
1997 t_r1
= tcg_const_i32(r1
);
1998 t_r3
= tcg_const_i32(r3
);
1999 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
2000 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2002 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2004 tcg_temp_free_i64(addr
);
2005 tcg_temp_free_i32(t_r1
);
2006 tcg_temp_free_i32(t_r3
);
2012 static ExitStatus
op_csst(DisasContext
*s
, DisasOps
*o
)
2014 int r3
= get_field(s
->fields
, r3
);
2015 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2017 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
2018 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
2020 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
2022 tcg_temp_free_i32(t_r3
);
2028 #ifndef CONFIG_USER_ONLY
2029 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2031 TCGMemOp mop
= s
->insn
->data
;
2032 TCGv_i64 addr
, old
, cc
;
2033 TCGLabel
*lab
= gen_new_label();
2035 /* Note that in1 = R1 (zero-extended expected value),
2036 out = R1 (original reg), out2 = R1+1 (new value). */
2038 check_privileged(s
);
2039 addr
= tcg_temp_new_i64();
2040 old
= tcg_temp_new_i64();
2041 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2042 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2043 get_mem_index(s
), mop
| MO_ALIGN
);
2044 tcg_temp_free_i64(addr
);
2046 /* Are the memory and expected values (un)equal? */
2047 cc
= tcg_temp_new_i64();
2048 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2049 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2051 /* Write back the output now, so that it happens before the
2052 following branch, so that we don't need local temps. */
2053 if ((mop
& MO_SIZE
) == MO_32
) {
2054 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2056 tcg_gen_mov_i64(o
->out
, old
);
2058 tcg_temp_free_i64(old
);
2060 /* If the comparison was equal, and the LSB of R2 was set,
2061 then we need to flush the TLB (for all cpus). */
2062 tcg_gen_xori_i64(cc
, cc
, 1);
2063 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2064 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2065 tcg_temp_free_i64(cc
);
2067 gen_helper_purge(cpu_env
);
2074 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2076 TCGv_i64 t1
= tcg_temp_new_i64();
2077 TCGv_i32 t2
= tcg_temp_new_i32();
2078 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2079 gen_helper_cvd(t1
, t2
);
2080 tcg_temp_free_i32(t2
);
2081 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2082 tcg_temp_free_i64(t1
);
2086 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2088 int m3
= get_field(s
->fields
, m3
);
2089 TCGLabel
*lab
= gen_new_label();
2092 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2093 if (s
->insn
->data
) {
2094 c
= tcg_unsigned_cond(c
);
2096 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2105 static ExitStatus
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2107 int m3
= get_field(s
->fields
, m3
);
2108 int r1
= get_field(s
->fields
, r1
);
2109 int r2
= get_field(s
->fields
, r2
);
2110 TCGv_i32 tr1
, tr2
, chk
;
2112 /* R1 and R2 must both be even. */
2113 if ((r1
| r2
) & 1) {
2114 gen_program_exception(s
, PGM_SPECIFICATION
);
2115 return EXIT_NORETURN
;
2117 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2121 tr1
= tcg_const_i32(r1
);
2122 tr2
= tcg_const_i32(r2
);
2123 chk
= tcg_const_i32(m3
);
2125 switch (s
->insn
->data
) {
2127 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2130 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2133 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2136 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2139 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2142 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2145 g_assert_not_reached();
2148 tcg_temp_free_i32(tr1
);
2149 tcg_temp_free_i32(tr2
);
2150 tcg_temp_free_i32(chk
);
2155 #ifndef CONFIG_USER_ONLY
2156 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2158 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2159 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2160 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2162 check_privileged(s
);
2163 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2165 tcg_temp_free_i32(func_code
);
2166 tcg_temp_free_i32(r3
);
2167 tcg_temp_free_i32(r1
);
2172 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2174 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2175 return_low128(o
->out
);
2179 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2181 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2182 return_low128(o
->out
);
2186 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2188 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2189 return_low128(o
->out
);
2193 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2195 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2196 return_low128(o
->out
);
2200 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2202 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2206 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2208 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2212 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2214 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2215 return_low128(o
->out2
);
2219 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2221 int r2
= get_field(s
->fields
, r2
);
2222 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2226 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2228 /* No cache information provided. */
2229 tcg_gen_movi_i64(o
->out
, -1);
2233 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2235 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2239 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2241 int r1
= get_field(s
->fields
, r1
);
2242 int r2
= get_field(s
->fields
, r2
);
2243 TCGv_i64 t
= tcg_temp_new_i64();
2245 /* Note the "subsequently" in the PoO, which implies a defined result
2246 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2247 tcg_gen_shri_i64(t
, psw_mask
, 32);
2248 store_reg32_i64(r1
, t
);
2250 store_reg32_i64(r2
, psw_mask
);
2253 tcg_temp_free_i64(t
);
2257 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2259 int r1
= get_field(s
->fields
, r1
);
2263 /* Nested EXECUTE is not allowed. */
2264 if (unlikely(s
->ex_value
)) {
2265 gen_program_exception(s
, PGM_EXECUTE
);
2266 return EXIT_NORETURN
;
2273 v1
= tcg_const_i64(0);
2278 ilen
= tcg_const_i32(s
->ilen
);
2279 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2280 tcg_temp_free_i32(ilen
);
2283 tcg_temp_free_i64(v1
);
2286 return EXIT_PC_CC_UPDATED
;
2289 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2291 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2292 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2293 tcg_temp_free_i32(m3
);
2297 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2299 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2300 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2301 tcg_temp_free_i32(m3
);
2305 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2307 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2308 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2309 return_low128(o
->out2
);
2310 tcg_temp_free_i32(m3
);
2314 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2316 /* We'll use the original input for cc computation, since we get to
2317 compare that against 0, which ought to be better than comparing
2318 the real output against 64. It also lets cc_dst be a convenient
2319 temporary during our computation. */
2320 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2322 /* R1 = IN ? CLZ(IN) : 64. */
2323 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2325 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2326 value by 64, which is undefined. But since the shift is 64 iff the
2327 input is zero, we still get the correct result after and'ing. */
2328 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2329 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2330 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2334 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2336 int m3
= get_field(s
->fields
, m3
);
2337 int pos
, len
, base
= s
->insn
->data
;
2338 TCGv_i64 tmp
= tcg_temp_new_i64();
2343 /* Effectively a 32-bit load. */
2344 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2351 /* Effectively a 16-bit load. */
2352 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2360 /* Effectively an 8-bit load. */
2361 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2366 pos
= base
+ ctz32(m3
) * 8;
2367 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2368 ccm
= ((1ull << len
) - 1) << pos
;
2372 /* This is going to be a sequence of loads and inserts. */
2373 pos
= base
+ 32 - 8;
2377 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2378 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2379 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2382 m3
= (m3
<< 1) & 0xf;
2388 tcg_gen_movi_i64(tmp
, ccm
);
2389 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2390 tcg_temp_free_i64(tmp
);
2394 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2396 int shift
= s
->insn
->data
& 0xff;
2397 int size
= s
->insn
->data
>> 8;
2398 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2402 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2407 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2409 t1
= tcg_temp_new_i64();
2410 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2411 tcg_gen_shri_i64(t1
, t1
, 36);
2412 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2414 tcg_gen_extu_i32_i64(t1
, cc_op
);
2415 tcg_gen_shli_i64(t1
, t1
, 28);
2416 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2417 tcg_temp_free_i64(t1
);
2421 #ifndef CONFIG_USER_ONLY
2422 static ExitStatus
op_idte(DisasContext
*s
, DisasOps
*o
)
2426 check_privileged(s
);
2427 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2428 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2430 m4
= tcg_const_i32(0);
2432 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2433 tcg_temp_free_i32(m4
);
2437 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2441 check_privileged(s
);
2442 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2443 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2445 m4
= tcg_const_i32(0);
2447 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2448 tcg_temp_free_i32(m4
);
2452 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2454 check_privileged(s
);
2455 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2460 static ExitStatus
op_msa(DisasContext
*s
, DisasOps
*o
)
2462 int r1
= have_field(s
->fields
, r1
) ? get_field(s
->fields
, r1
) : 0;
2463 int r2
= have_field(s
->fields
, r2
) ? get_field(s
->fields
, r2
) : 0;
2464 int r3
= have_field(s
->fields
, r3
) ? get_field(s
->fields
, r3
) : 0;
2465 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2467 switch (s
->insn
->data
) {
2468 case S390_FEAT_TYPE_KMCTR
:
2469 if (r3
& 1 || !r3
) {
2470 gen_program_exception(s
, PGM_SPECIFICATION
);
2471 return EXIT_NORETURN
;
2474 case S390_FEAT_TYPE_PPNO
:
2475 case S390_FEAT_TYPE_KMF
:
2476 case S390_FEAT_TYPE_KMC
:
2477 case S390_FEAT_TYPE_KMO
:
2478 case S390_FEAT_TYPE_KM
:
2479 if (r1
& 1 || !r1
) {
2480 gen_program_exception(s
, PGM_SPECIFICATION
);
2481 return EXIT_NORETURN
;
2484 case S390_FEAT_TYPE_KMAC
:
2485 case S390_FEAT_TYPE_KIMD
:
2486 case S390_FEAT_TYPE_KLMD
:
2487 if (r2
& 1 || !r2
) {
2488 gen_program_exception(s
, PGM_SPECIFICATION
);
2489 return EXIT_NORETURN
;
2492 case S390_FEAT_TYPE_PCKMO
:
2493 case S390_FEAT_TYPE_PCC
:
2496 g_assert_not_reached();
2499 t_r1
= tcg_const_i32(r1
);
2500 t_r2
= tcg_const_i32(r2
);
2501 t_r3
= tcg_const_i32(r3
);
2502 type
= tcg_const_i32(s
->insn
->data
);
2503 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2505 tcg_temp_free_i32(t_r1
);
2506 tcg_temp_free_i32(t_r2
);
2507 tcg_temp_free_i32(t_r3
);
2508 tcg_temp_free_i32(type
);
2512 static ExitStatus
op_keb(DisasContext
*s
, DisasOps
*o
)
2514 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2519 static ExitStatus
op_kdb(DisasContext
*s
, DisasOps
*o
)
2521 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2526 static ExitStatus
op_kxb(DisasContext
*s
, DisasOps
*o
)
2528 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2533 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2535 /* The real output is indeed the original value in memory;
2536 recompute the addition for the computation of CC. */
2537 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2538 s
->insn
->data
| MO_ALIGN
);
2539 /* However, we need to recompute the addition for setting CC. */
2540 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2544 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2546 /* The real output is indeed the original value in memory;
2547 recompute the addition for the computation of CC. */
2548 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2549 s
->insn
->data
| MO_ALIGN
);
2550 /* However, we need to recompute the operation for setting CC. */
2551 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2555 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2557 /* The real output is indeed the original value in memory;
2558 recompute the addition for the computation of CC. */
2559 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2560 s
->insn
->data
| MO_ALIGN
);
2561 /* However, we need to recompute the operation for setting CC. */
2562 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2566 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2568 /* The real output is indeed the original value in memory;
2569 recompute the addition for the computation of CC. */
2570 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2571 s
->insn
->data
| MO_ALIGN
);
2572 /* However, we need to recompute the operation for setting CC. */
2573 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2577 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2579 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2583 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2585 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2589 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2591 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2595 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2597 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2601 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2603 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2604 return_low128(o
->out2
);
2608 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2610 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2611 return_low128(o
->out2
);
2615 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2617 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2621 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2623 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2627 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2629 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2633 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2635 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2639 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2641 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2645 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2647 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2651 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2653 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2657 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2659 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2663 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2665 TCGLabel
*lab
= gen_new_label();
2666 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2667 /* The value is stored even in case of trap. */
2668 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2674 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2676 TCGLabel
*lab
= gen_new_label();
2677 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2678 /* The value is stored even in case of trap. */
2679 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2685 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2687 TCGLabel
*lab
= gen_new_label();
2688 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2689 /* The value is stored even in case of trap. */
2690 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2696 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2698 TCGLabel
*lab
= gen_new_label();
2699 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2700 /* The value is stored even in case of trap. */
2701 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2707 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2709 TCGLabel
*lab
= gen_new_label();
2710 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2711 /* The value is stored even in case of trap. */
2712 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2718 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2722 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2725 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2729 TCGv_i32 t32
= tcg_temp_new_i32();
2732 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2735 t
= tcg_temp_new_i64();
2736 tcg_gen_extu_i32_i64(t
, t32
);
2737 tcg_temp_free_i32(t32
);
2739 z
= tcg_const_i64(0);
2740 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2741 tcg_temp_free_i64(t
);
2742 tcg_temp_free_i64(z
);
2748 #ifndef CONFIG_USER_ONLY
2749 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2751 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2752 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2753 check_privileged(s
);
2754 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2755 tcg_temp_free_i32(r1
);
2756 tcg_temp_free_i32(r3
);
2757 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2758 return EXIT_PC_STALE_NOCHAIN
;
2761 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2763 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2764 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2765 check_privileged(s
);
2766 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2767 tcg_temp_free_i32(r1
);
2768 tcg_temp_free_i32(r3
);
2769 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2770 return EXIT_PC_STALE_NOCHAIN
;
2773 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2775 check_privileged(s
);
2776 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2781 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2783 check_privileged(s
);
2785 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2789 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2793 check_privileged(s
);
2794 per_breaking_event(s
);
2796 t1
= tcg_temp_new_i64();
2797 t2
= tcg_temp_new_i64();
2798 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2799 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2800 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2801 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2802 tcg_gen_shli_i64(t1
, t1
, 32);
2803 gen_helper_load_psw(cpu_env
, t1
, t2
);
2804 tcg_temp_free_i64(t1
);
2805 tcg_temp_free_i64(t2
);
2806 return EXIT_NORETURN
;
2809 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2813 check_privileged(s
);
2814 per_breaking_event(s
);
2816 t1
= tcg_temp_new_i64();
2817 t2
= tcg_temp_new_i64();
2818 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2819 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2820 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2821 gen_helper_load_psw(cpu_env
, t1
, t2
);
2822 tcg_temp_free_i64(t1
);
2823 tcg_temp_free_i64(t2
);
2824 return EXIT_NORETURN
;
2828 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2830 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2831 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2832 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2833 tcg_temp_free_i32(r1
);
2834 tcg_temp_free_i32(r3
);
2838 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2840 int r1
= get_field(s
->fields
, r1
);
2841 int r3
= get_field(s
->fields
, r3
);
2844 /* Only one register to read. */
2845 t1
= tcg_temp_new_i64();
2846 if (unlikely(r1
== r3
)) {
2847 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2848 store_reg32_i64(r1
, t1
);
2853 /* First load the values of the first and last registers to trigger
2854 possible page faults. */
2855 t2
= tcg_temp_new_i64();
2856 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2857 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2858 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2859 store_reg32_i64(r1
, t1
);
2860 store_reg32_i64(r3
, t2
);
2862 /* Only two registers to read. */
2863 if (((r1
+ 1) & 15) == r3
) {
2869 /* Then load the remaining registers. Page fault can't occur. */
2871 tcg_gen_movi_i64(t2
, 4);
2874 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2875 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2876 store_reg32_i64(r1
, t1
);
2884 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2886 int r1
= get_field(s
->fields
, r1
);
2887 int r3
= get_field(s
->fields
, r3
);
2890 /* Only one register to read. */
2891 t1
= tcg_temp_new_i64();
2892 if (unlikely(r1
== r3
)) {
2893 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2894 store_reg32h_i64(r1
, t1
);
2899 /* First load the values of the first and last registers to trigger
2900 possible page faults. */
2901 t2
= tcg_temp_new_i64();
2902 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2903 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2904 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2905 store_reg32h_i64(r1
, t1
);
2906 store_reg32h_i64(r3
, t2
);
2908 /* Only two registers to read. */
2909 if (((r1
+ 1) & 15) == r3
) {
2915 /* Then load the remaining registers. Page fault can't occur. */
2917 tcg_gen_movi_i64(t2
, 4);
2920 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2921 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2922 store_reg32h_i64(r1
, t1
);
2930 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2932 int r1
= get_field(s
->fields
, r1
);
2933 int r3
= get_field(s
->fields
, r3
);
2936 /* Only one register to read. */
2937 if (unlikely(r1
== r3
)) {
2938 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2942 /* First load the values of the first and last registers to trigger
2943 possible page faults. */
2944 t1
= tcg_temp_new_i64();
2945 t2
= tcg_temp_new_i64();
2946 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2947 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2948 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2949 tcg_gen_mov_i64(regs
[r1
], t1
);
2952 /* Only two registers to read. */
2953 if (((r1
+ 1) & 15) == r3
) {
2958 /* Then load the remaining registers. Page fault can't occur. */
2960 tcg_gen_movi_i64(t1
, 8);
2963 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2964 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2971 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2974 TCGMemOp mop
= s
->insn
->data
;
2976 /* In a parallel context, stop the world and single step. */
2977 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
2980 gen_exception(EXCP_ATOMIC
);
2981 return EXIT_NORETURN
;
2984 /* In a serial context, perform the two loads ... */
2985 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2986 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2987 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2988 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2989 tcg_temp_free_i64(a1
);
2990 tcg_temp_free_i64(a2
);
2992 /* ... and indicate that we performed them while interlocked. */
2993 gen_op_movi_cc(s
, 0);
2997 static ExitStatus
op_lpq(DisasContext
*s
, DisasOps
*o
)
2999 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
3000 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3002 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3004 return_low128(o
->out2
);
3008 #ifndef CONFIG_USER_ONLY
3009 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
3011 check_privileged(s
);
3012 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
3016 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
3018 check_privileged(s
);
3019 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
3024 static ExitStatus
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3026 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3030 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
3033 o
->g_out
= o
->g_in2
;
3034 TCGV_UNUSED_I64(o
->in2
);
3039 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3041 int b2
= get_field(s
->fields
, b2
);
3042 TCGv ar1
= tcg_temp_new_i64();
3045 o
->g_out
= o
->g_in2
;
3046 TCGV_UNUSED_I64(o
->in2
);
3049 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
3050 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3051 tcg_gen_movi_i64(ar1
, 0);
3053 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3054 tcg_gen_movi_i64(ar1
, 1);
3056 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3058 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3060 tcg_gen_movi_i64(ar1
, 0);
3063 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3064 tcg_gen_movi_i64(ar1
, 2);
3068 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3069 tcg_temp_free_i64(ar1
);
3074 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
3078 o
->g_out
= o
->g_in1
;
3079 o
->g_out2
= o
->g_in2
;
3080 TCGV_UNUSED_I64(o
->in1
);
3081 TCGV_UNUSED_I64(o
->in2
);
3082 o
->g_in1
= o
->g_in2
= false;
3086 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
3088 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3089 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3090 tcg_temp_free_i32(l
);
3094 static ExitStatus
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3096 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3097 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3098 tcg_temp_free_i32(l
);
3102 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3104 int r1
= get_field(s
->fields
, r1
);
3105 int r2
= get_field(s
->fields
, r2
);
3108 /* r1 and r2 must be even. */
3109 if (r1
& 1 || r2
& 1) {
3110 gen_program_exception(s
, PGM_SPECIFICATION
);
3111 return EXIT_NORETURN
;
3114 t1
= tcg_const_i32(r1
);
3115 t2
= tcg_const_i32(r2
);
3116 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3117 tcg_temp_free_i32(t1
);
3118 tcg_temp_free_i32(t2
);
3123 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3125 int r1
= get_field(s
->fields
, r1
);
3126 int r3
= get_field(s
->fields
, r3
);
3129 /* r1 and r3 must be even. */
3130 if (r1
& 1 || r3
& 1) {
3131 gen_program_exception(s
, PGM_SPECIFICATION
);
3132 return EXIT_NORETURN
;
3135 t1
= tcg_const_i32(r1
);
3136 t3
= tcg_const_i32(r3
);
3137 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3138 tcg_temp_free_i32(t1
);
3139 tcg_temp_free_i32(t3
);
3144 static ExitStatus
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3146 int r1
= get_field(s
->fields
, r1
);
3147 int r3
= get_field(s
->fields
, r3
);
3150 /* r1 and r3 must be even. */
3151 if (r1
& 1 || r3
& 1) {
3152 gen_program_exception(s
, PGM_SPECIFICATION
);
3153 return EXIT_NORETURN
;
3156 t1
= tcg_const_i32(r1
);
3157 t3
= tcg_const_i32(r3
);
3158 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3159 tcg_temp_free_i32(t1
);
3160 tcg_temp_free_i32(t3
);
3165 static ExitStatus
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3167 int r3
= get_field(s
->fields
, r3
);
3168 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3173 #ifndef CONFIG_USER_ONLY
3174 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3176 int r1
= get_field(s
->fields
, l1
);
3177 check_privileged(s
);
3178 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3183 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3185 int r1
= get_field(s
->fields
, l1
);
3186 check_privileged(s
);
3187 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3193 static ExitStatus
op_mvn(DisasContext
*s
, DisasOps
*o
)
3195 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3196 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3197 tcg_temp_free_i32(l
);
3201 static ExitStatus
op_mvo(DisasContext
*s
, DisasOps
*o
)
3203 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3204 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3205 tcg_temp_free_i32(l
);
3209 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3211 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3216 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
3218 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3220 return_low128(o
->in2
);
3224 static ExitStatus
op_mvz(DisasContext
*s
, DisasOps
*o
)
3226 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3227 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3228 tcg_temp_free_i32(l
);
3232 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3234 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3238 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3240 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3244 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
3246 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3250 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3252 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3256 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
3258 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3262 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
3264 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3265 return_low128(o
->out2
);
3269 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3271 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3272 return_low128(o
->out2
);
3276 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
3278 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3279 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3280 tcg_temp_free_i64(r3
);
3284 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
3286 int r3
= get_field(s
->fields
, r3
);
3287 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3291 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
3293 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3294 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3295 tcg_temp_free_i64(r3
);
3299 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3301 int r3
= get_field(s
->fields
, r3
);
3302 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3306 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3309 z
= tcg_const_i64(0);
3310 n
= tcg_temp_new_i64();
3311 tcg_gen_neg_i64(n
, o
->in2
);
3312 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3313 tcg_temp_free_i64(n
);
3314 tcg_temp_free_i64(z
);
3318 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3320 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3324 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3326 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3330 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3332 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3333 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3337 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3339 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3340 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3341 tcg_temp_free_i32(l
);
3346 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3348 tcg_gen_neg_i64(o
->out
, o
->in2
);
3352 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3354 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3358 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3360 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3364 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3366 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3367 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3371 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3373 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3374 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3375 tcg_temp_free_i32(l
);
3380 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3382 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3386 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3388 int shift
= s
->insn
->data
& 0xff;
3389 int size
= s
->insn
->data
>> 8;
3390 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3393 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3394 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3396 /* Produce the CC from only the bits manipulated. */
3397 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3398 set_cc_nz_u64(s
, cc_dst
);
3402 static ExitStatus
op_oi(DisasContext
*s
, DisasOps
*o
)
3404 o
->in1
= tcg_temp_new_i64();
3406 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3407 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3409 /* Perform the atomic operation in memory. */
3410 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3414 /* Recompute also for atomic case: needed for setting CC. */
3415 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3417 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3418 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3423 static ExitStatus
op_pack(DisasContext
*s
, DisasOps
*o
)
3425 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3426 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3427 tcg_temp_free_i32(l
);
3431 static ExitStatus
op_pka(DisasContext
*s
, DisasOps
*o
)
3433 int l2
= get_field(s
->fields
, l2
) + 1;
3436 /* The length must not exceed 32 bytes. */
3438 gen_program_exception(s
, PGM_SPECIFICATION
);
3439 return EXIT_NORETURN
;
3441 l
= tcg_const_i32(l2
);
3442 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3443 tcg_temp_free_i32(l
);
3447 static ExitStatus
op_pku(DisasContext
*s
, DisasOps
*o
)
3449 int l2
= get_field(s
->fields
, l2
) + 1;
3452 /* The length must be even and should not exceed 64 bytes. */
3453 if ((l2
& 1) || (l2
> 64)) {
3454 gen_program_exception(s
, PGM_SPECIFICATION
);
3455 return EXIT_NORETURN
;
3457 l
= tcg_const_i32(l2
);
3458 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3459 tcg_temp_free_i32(l
);
3463 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3465 gen_helper_popcnt(o
->out
, o
->in2
);
3469 #ifndef CONFIG_USER_ONLY
3470 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3472 check_privileged(s
);
3473 gen_helper_ptlb(cpu_env
);
3478 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3480 int i3
= get_field(s
->fields
, i3
);
3481 int i4
= get_field(s
->fields
, i4
);
3482 int i5
= get_field(s
->fields
, i5
);
3483 int do_zero
= i4
& 0x80;
3484 uint64_t mask
, imask
, pmask
;
3487 /* Adjust the arguments for the specific insn. */
3488 switch (s
->fields
->op2
) {
3489 case 0x55: /* risbg */
3490 case 0x59: /* risbgn */
3495 case 0x5d: /* risbhg */
3498 pmask
= 0xffffffff00000000ull
;
3500 case 0x51: /* risblg */
3503 pmask
= 0x00000000ffffffffull
;
3506 g_assert_not_reached();
3509 /* MASK is the set of bits to be inserted from R2.
3510 Take care for I3/I4 wraparound. */
3513 mask
^= pmask
>> i4
>> 1;
3515 mask
|= ~(pmask
>> i4
>> 1);
3519 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3520 insns, we need to keep the other half of the register. */
3521 imask
= ~mask
| ~pmask
;
3529 if (s
->fields
->op2
== 0x5d) {
3533 /* In some cases we can implement this with extract. */
3534 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3535 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3539 /* In some cases we can implement this with deposit. */
3540 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3541 /* Note that we rotate the bits to be inserted to the lsb, not to
3542 the position as described in the PoO. */
3543 rot
= (rot
- pos
) & 63;
3548 /* Rotate the input as necessary. */
3549 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3551 /* Insert the selected bits into the output. */
3554 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3556 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3558 } else if (imask
== 0) {
3559 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3561 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3562 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3563 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3568 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3570 int i3
= get_field(s
->fields
, i3
);
3571 int i4
= get_field(s
->fields
, i4
);
3572 int i5
= get_field(s
->fields
, i5
);
3575 /* If this is a test-only form, arrange to discard the result. */
3577 o
->out
= tcg_temp_new_i64();
3585 /* MASK is the set of bits to be operated on from R2.
3586 Take care for I3/I4 wraparound. */
3589 mask
^= ~0ull >> i4
>> 1;
3591 mask
|= ~(~0ull >> i4
>> 1);
3594 /* Rotate the input as necessary. */
3595 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3598 switch (s
->fields
->op2
) {
3599 case 0x55: /* AND */
3600 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3601 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3604 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3605 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3607 case 0x57: /* XOR */
3608 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3609 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3616 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3617 set_cc_nz_u64(s
, cc_dst
);
3621 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3623 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3627 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3629 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3633 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3635 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3639 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3641 TCGv_i32 t1
= tcg_temp_new_i32();
3642 TCGv_i32 t2
= tcg_temp_new_i32();
3643 TCGv_i32 to
= tcg_temp_new_i32();
3644 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3645 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3646 tcg_gen_rotl_i32(to
, t1
, t2
);
3647 tcg_gen_extu_i32_i64(o
->out
, to
);
3648 tcg_temp_free_i32(t1
);
3649 tcg_temp_free_i32(t2
);
3650 tcg_temp_free_i32(to
);
3654 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3656 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3660 #ifndef CONFIG_USER_ONLY
3661 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3663 check_privileged(s
);
3664 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3669 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3671 check_privileged(s
);
3672 gen_helper_sacf(cpu_env
, o
->in2
);
3673 /* Addressing mode has changed, so end the block. */
3674 return EXIT_PC_STALE
;
3678 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3680 int sam
= s
->insn
->data
;
3696 /* Bizarre but true, we check the address of the current insn for the
3697 specification exception, not the next to be executed. Thus the PoO
3698 documents that Bad Things Happen two bytes before the end. */
3699 if (s
->pc
& ~mask
) {
3700 gen_program_exception(s
, PGM_SPECIFICATION
);
3701 return EXIT_NORETURN
;
3705 tsam
= tcg_const_i64(sam
);
3706 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3707 tcg_temp_free_i64(tsam
);
3709 /* Always exit the TB, since we (may have) changed execution mode. */
3710 return EXIT_PC_STALE
;
3713 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3715 int r1
= get_field(s
->fields
, r1
);
3716 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3720 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3722 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3726 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3728 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3732 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3734 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3735 return_low128(o
->out2
);
3739 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3741 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3745 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3747 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3751 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3753 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3754 return_low128(o
->out2
);
3758 #ifndef CONFIG_USER_ONLY
3759 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3761 check_privileged(s
);
3762 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3767 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3769 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3770 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3771 check_privileged(s
);
3772 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
3774 tcg_temp_free_i32(r1
);
3775 tcg_temp_free_i32(r3
);
3780 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3787 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3789 /* We want to store when the condition is fulfilled, so branch
3790 out when it's not */
3791 c
.cond
= tcg_invert_cond(c
.cond
);
3793 lab
= gen_new_label();
3795 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3797 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3801 r1
= get_field(s
->fields
, r1
);
3802 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3803 switch (s
->insn
->data
) {
3805 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3808 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3810 case 2: /* STOCFH */
3811 h
= tcg_temp_new_i64();
3812 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3813 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3814 tcg_temp_free_i64(h
);
3817 g_assert_not_reached();
3819 tcg_temp_free_i64(a
);
3825 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3827 uint64_t sign
= 1ull << s
->insn
->data
;
3828 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3829 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3830 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3831 /* The arithmetic left shift is curious in that it does not affect
3832 the sign bit. Copy that over from the source unchanged. */
3833 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3834 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3835 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3839 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3841 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3845 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3847 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3851 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3853 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3857 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3859 gen_helper_sfpc(cpu_env
, o
->in2
);
3863 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3865 gen_helper_sfas(cpu_env
, o
->in2
);
3869 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3871 int b2
= get_field(s
->fields
, b2
);
3872 int d2
= get_field(s
->fields
, d2
);
3873 TCGv_i64 t1
= tcg_temp_new_i64();
3874 TCGv_i64 t2
= tcg_temp_new_i64();
3877 switch (s
->fields
->op2
) {
3878 case 0x99: /* SRNM */
3881 case 0xb8: /* SRNMB */
3884 case 0xb9: /* SRNMT */
3890 mask
= (1 << len
) - 1;
3892 /* Insert the value into the appropriate field of the FPC. */
3894 tcg_gen_movi_i64(t1
, d2
& mask
);
3896 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3897 tcg_gen_andi_i64(t1
, t1
, mask
);
3899 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3900 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3901 tcg_temp_free_i64(t1
);
3903 /* Then install the new FPC to set the rounding mode in fpu_status. */
3904 gen_helper_sfpc(cpu_env
, t2
);
3905 tcg_temp_free_i64(t2
);
3909 static ExitStatus
op_spm(DisasContext
*s
, DisasOps
*o
)
3911 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
3912 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
3915 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
3916 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
3920 static ExitStatus
op_ectg(DisasContext
*s
, DisasOps
*o
)
3922 int b1
= get_field(s
->fields
, b1
);
3923 int d1
= get_field(s
->fields
, d1
);
3924 int b2
= get_field(s
->fields
, b2
);
3925 int d2
= get_field(s
->fields
, d2
);
3926 int r3
= get_field(s
->fields
, r3
);
3927 TCGv_i64 tmp
= tcg_temp_new_i64();
3929 /* fetch all operands first */
3930 o
->in1
= tcg_temp_new_i64();
3931 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
3932 o
->in2
= tcg_temp_new_i64();
3933 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
3934 o
->addr1
= get_address(s
, 0, r3
, 0);
3936 /* load the third operand into r3 before modifying anything */
3937 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
3939 /* subtract CPU timer from first operand and store in GR0 */
3940 gen_helper_stpt(tmp
, cpu_env
);
3941 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
3943 /* store second operand in GR1 */
3944 tcg_gen_mov_i64(regs
[1], o
->in2
);
3946 tcg_temp_free_i64(tmp
);
3950 #ifndef CONFIG_USER_ONLY
3951 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3953 check_privileged(s
);
3954 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3955 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3959 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3961 check_privileged(s
);
3962 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3966 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3968 check_privileged(s
);
3969 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3970 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3971 return EXIT_PC_STALE_NOCHAIN
;
3974 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3976 check_privileged(s
);
3977 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
3981 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3983 gen_helper_stck(o
->out
, cpu_env
);
3984 /* ??? We don't implement clock states. */
3985 gen_op_movi_cc(s
, 0);
3989 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3991 TCGv_i64 c1
= tcg_temp_new_i64();
3992 TCGv_i64 c2
= tcg_temp_new_i64();
3993 TCGv_i64 todpr
= tcg_temp_new_i64();
3994 gen_helper_stck(c1
, cpu_env
);
3995 /* 16 bit value store in an uint32_t (only valid bits set) */
3996 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
3997 /* Shift the 64-bit value into its place as a zero-extended
3998 104-bit value. Note that "bit positions 64-103 are always
3999 non-zero so that they compare differently to STCK"; we set
4000 the least significant bit to 1. */
4001 tcg_gen_shli_i64(c2
, c1
, 56);
4002 tcg_gen_shri_i64(c1
, c1
, 8);
4003 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4004 tcg_gen_or_i64(c2
, c2
, todpr
);
4005 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4006 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4007 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4008 tcg_temp_free_i64(c1
);
4009 tcg_temp_free_i64(c2
);
4010 tcg_temp_free_i64(todpr
);
4011 /* ??? We don't implement clock states. */
4012 gen_op_movi_cc(s
, 0);
4016 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
4018 check_privileged(s
);
4019 gen_helper_sckc(cpu_env
, o
->in2
);
4023 static ExitStatus
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4025 check_privileged(s
);
4026 gen_helper_sckpf(cpu_env
, regs
[0]);
4030 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
4032 check_privileged(s
);
4033 gen_helper_stckc(o
->out
, cpu_env
);
4037 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
4039 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4040 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4041 check_privileged(s
);
4042 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4043 tcg_temp_free_i32(r1
);
4044 tcg_temp_free_i32(r3
);
4048 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
4050 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4051 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4052 check_privileged(s
);
4053 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4054 tcg_temp_free_i32(r1
);
4055 tcg_temp_free_i32(r3
);
4059 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
4061 check_privileged(s
);
4062 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4063 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4067 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
4069 check_privileged(s
);
4070 gen_helper_spt(cpu_env
, o
->in2
);
4074 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
4076 check_privileged(s
);
4077 gen_helper_stfl(cpu_env
);
4081 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
4083 check_privileged(s
);
4084 gen_helper_stpt(o
->out
, cpu_env
);
4088 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
4090 check_privileged(s
);
4091 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4096 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
4098 check_privileged(s
);
4099 gen_helper_spx(cpu_env
, o
->in2
);
4103 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
4105 check_privileged(s
);
4106 gen_helper_xsch(cpu_env
, regs
[1]);
4111 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
4113 check_privileged(s
);
4114 gen_helper_csch(cpu_env
, regs
[1]);
4119 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
4121 check_privileged(s
);
4122 gen_helper_hsch(cpu_env
, regs
[1]);
4127 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
4129 check_privileged(s
);
4130 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4135 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
4137 check_privileged(s
);
4138 gen_helper_rchp(cpu_env
, regs
[1]);
4143 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
4145 check_privileged(s
);
4146 gen_helper_rsch(cpu_env
, regs
[1]);
4151 static ExitStatus
op_sal(DisasContext
*s
, DisasOps
*o
)
4153 check_privileged(s
);
4154 gen_helper_sal(cpu_env
, regs
[1]);
4158 static ExitStatus
op_schm(DisasContext
*s
, DisasOps
*o
)
4160 check_privileged(s
);
4161 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4165 static ExitStatus
op_siga(DisasContext
*s
, DisasOps
*o
)
4167 check_privileged(s
);
4168 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4169 gen_op_movi_cc(s
, 3);
4173 static ExitStatus
op_stcps(DisasContext
*s
, DisasOps
*o
)
4175 check_privileged(s
);
4176 /* The instruction is suppressed if not provided. */
4180 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
4182 check_privileged(s
);
4183 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4188 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
4190 check_privileged(s
);
4191 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4196 static ExitStatus
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4198 check_privileged(s
);
4199 gen_helper_stcrw(cpu_env
, o
->in2
);
4204 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
4206 check_privileged(s
);
4207 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4212 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
4214 check_privileged(s
);
4215 gen_helper_chsc(cpu_env
, o
->in2
);
4220 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
4222 check_privileged(s
);
4223 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4224 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4228 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4230 uint64_t i2
= get_field(s
->fields
, i2
);
4233 check_privileged(s
);
4235 /* It is important to do what the instruction name says: STORE THEN.
4236 If we let the output hook perform the store then if we fault and
4237 restart, we'll have the wrong SYSTEM MASK in place. */
4238 t
= tcg_temp_new_i64();
4239 tcg_gen_shri_i64(t
, psw_mask
, 56);
4240 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4241 tcg_temp_free_i64(t
);
4243 if (s
->fields
->op
== 0xac) {
4244 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4245 (i2
<< 56) | 0x00ffffffffffffffull
);
4247 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4250 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4251 return EXIT_PC_STALE_NOCHAIN
;
4254 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
4256 check_privileged(s
);
4257 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4261 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
4263 check_privileged(s
);
4264 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4269 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
4271 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4276 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
4278 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4282 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
4284 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4288 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
4290 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4294 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
4296 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4300 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
4302 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4303 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4304 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4305 tcg_temp_free_i32(r1
);
4306 tcg_temp_free_i32(r3
);
4310 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
4312 int m3
= get_field(s
->fields
, m3
);
4313 int pos
, base
= s
->insn
->data
;
4314 TCGv_i64 tmp
= tcg_temp_new_i64();
4316 pos
= base
+ ctz32(m3
) * 8;
4319 /* Effectively a 32-bit store. */
4320 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4321 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4327 /* Effectively a 16-bit store. */
4328 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4329 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4336 /* Effectively an 8-bit store. */
4337 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4338 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4342 /* This is going to be a sequence of shifts and stores. */
4343 pos
= base
+ 32 - 8;
4346 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4347 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4348 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4350 m3
= (m3
<< 1) & 0xf;
4355 tcg_temp_free_i64(tmp
);
4359 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
4361 int r1
= get_field(s
->fields
, r1
);
4362 int r3
= get_field(s
->fields
, r3
);
4363 int size
= s
->insn
->data
;
4364 TCGv_i64 tsize
= tcg_const_i64(size
);
4368 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4370 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4375 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4379 tcg_temp_free_i64(tsize
);
4383 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
4385 int r1
= get_field(s
->fields
, r1
);
4386 int r3
= get_field(s
->fields
, r3
);
4387 TCGv_i64 t
= tcg_temp_new_i64();
4388 TCGv_i64 t4
= tcg_const_i64(4);
4389 TCGv_i64 t32
= tcg_const_i64(32);
4392 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4393 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4397 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4401 tcg_temp_free_i64(t
);
4402 tcg_temp_free_i64(t4
);
4403 tcg_temp_free_i64(t32
);
4407 static ExitStatus
op_stpq(DisasContext
*s
, DisasOps
*o
)
4409 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
4410 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4412 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4417 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
4419 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4420 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4422 gen_helper_srst(cpu_env
, r1
, r2
);
4424 tcg_temp_free_i32(r1
);
4425 tcg_temp_free_i32(r2
);
4430 static ExitStatus
op_srstu(DisasContext
*s
, DisasOps
*o
)
4432 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4433 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4435 gen_helper_srstu(cpu_env
, r1
, r2
);
4437 tcg_temp_free_i32(r1
);
4438 tcg_temp_free_i32(r2
);
4443 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
4445 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4449 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
4454 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4456 /* The !borrow flag is the msb of CC. Since we want the inverse of
4457 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4458 disas_jcc(s
, &cmp
, 8 | 4);
4459 borrow
= tcg_temp_new_i64();
4461 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4463 TCGv_i32 t
= tcg_temp_new_i32();
4464 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4465 tcg_gen_extu_i32_i64(borrow
, t
);
4466 tcg_temp_free_i32(t
);
4470 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4471 tcg_temp_free_i64(borrow
);
4475 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4482 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4483 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4484 tcg_temp_free_i32(t
);
4486 t
= tcg_const_i32(s
->ilen
);
4487 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4488 tcg_temp_free_i32(t
);
4490 gen_exception(EXCP_SVC
);
4491 return EXIT_NORETURN
;
4494 static ExitStatus
op_tam(DisasContext
*s
, DisasOps
*o
)
4498 cc
|= (s
->tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4499 cc
|= (s
->tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4500 gen_op_movi_cc(s
, cc
);
4504 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4506 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4511 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4513 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4518 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4520 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4525 #ifndef CONFIG_USER_ONLY
4527 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4529 check_privileged(s
);
4530 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4535 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4537 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4544 static ExitStatus
op_tp(DisasContext
*s
, DisasOps
*o
)
4546 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4547 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4548 tcg_temp_free_i32(l1
);
4553 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4555 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4556 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4557 tcg_temp_free_i32(l
);
4562 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4564 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4565 return_low128(o
->out2
);
4570 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4572 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4573 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4574 tcg_temp_free_i32(l
);
4579 static ExitStatus
op_trtr(DisasContext
*s
, DisasOps
*o
)
4581 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4582 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4583 tcg_temp_free_i32(l
);
4588 static ExitStatus
op_trXX(DisasContext
*s
, DisasOps
*o
)
4590 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4591 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4592 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4593 TCGv_i32 tst
= tcg_temp_new_i32();
4594 int m3
= get_field(s
->fields
, m3
);
4596 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4600 tcg_gen_movi_i32(tst
, -1);
4602 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4603 if (s
->insn
->opc
& 3) {
4604 tcg_gen_ext8u_i32(tst
, tst
);
4606 tcg_gen_ext16u_i32(tst
, tst
);
4609 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4611 tcg_temp_free_i32(r1
);
4612 tcg_temp_free_i32(r2
);
4613 tcg_temp_free_i32(sizes
);
4614 tcg_temp_free_i32(tst
);
4619 static ExitStatus
op_ts(DisasContext
*s
, DisasOps
*o
)
4621 TCGv_i32 t1
= tcg_const_i32(0xff);
4622 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4623 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4624 tcg_temp_free_i32(t1
);
4629 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4631 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4632 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4633 tcg_temp_free_i32(l
);
4637 static ExitStatus
op_unpka(DisasContext
*s
, DisasOps
*o
)
4639 int l1
= get_field(s
->fields
, l1
) + 1;
4642 /* The length must not exceed 32 bytes. */
4644 gen_program_exception(s
, PGM_SPECIFICATION
);
4645 return EXIT_NORETURN
;
4647 l
= tcg_const_i32(l1
);
4648 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4649 tcg_temp_free_i32(l
);
4654 static ExitStatus
op_unpku(DisasContext
*s
, DisasOps
*o
)
4656 int l1
= get_field(s
->fields
, l1
) + 1;
4659 /* The length must be even and should not exceed 64 bytes. */
4660 if ((l1
& 1) || (l1
> 64)) {
4661 gen_program_exception(s
, PGM_SPECIFICATION
);
4662 return EXIT_NORETURN
;
4664 l
= tcg_const_i32(l1
);
4665 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4666 tcg_temp_free_i32(l
);
4672 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4674 int d1
= get_field(s
->fields
, d1
);
4675 int d2
= get_field(s
->fields
, d2
);
4676 int b1
= get_field(s
->fields
, b1
);
4677 int b2
= get_field(s
->fields
, b2
);
4678 int l
= get_field(s
->fields
, l1
);
4681 o
->addr1
= get_address(s
, 0, b1
, d1
);
4683 /* If the addresses are identical, this is a store/memset of zero. */
4684 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4685 o
->in2
= tcg_const_i64(0);
4689 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4692 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4696 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4699 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4703 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4706 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4710 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4712 gen_op_movi_cc(s
, 0);
4716 /* But in general we'll defer to a helper. */
4717 o
->in2
= get_address(s
, 0, b2
, d2
);
4718 t32
= tcg_const_i32(l
);
4719 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4720 tcg_temp_free_i32(t32
);
4725 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4727 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4731 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4733 int shift
= s
->insn
->data
& 0xff;
4734 int size
= s
->insn
->data
>> 8;
4735 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4738 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4739 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4741 /* Produce the CC from only the bits manipulated. */
4742 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4743 set_cc_nz_u64(s
, cc_dst
);
4747 static ExitStatus
op_xi(DisasContext
*s
, DisasOps
*o
)
4749 o
->in1
= tcg_temp_new_i64();
4751 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4752 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4754 /* Perform the atomic operation in memory. */
4755 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
4759 /* Recompute also for atomic case: needed for setting CC. */
4760 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4762 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4763 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4768 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4770 o
->out
= tcg_const_i64(0);
4774 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4776 o
->out
= tcg_const_i64(0);
4782 /* ====================================================================== */
4783 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4784 the original inputs), update the various cc data structures in order to
4785 be able to compute the new condition code. */
4787 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4789 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4792 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4794 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4797 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4799 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4802 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4804 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4807 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4809 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4812 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4814 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4817 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4819 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4822 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4824 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4827 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4829 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4832 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4834 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4837 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4839 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4842 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4844 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4847 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4849 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4852 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4854 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4857 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4859 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4862 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4864 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4867 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4869 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4872 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4874 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4877 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4879 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4882 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4884 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4885 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4888 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4890 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4893 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4895 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4898 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4900 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4903 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4905 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4908 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4910 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4913 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4915 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4918 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4920 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4923 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4925 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4928 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4930 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4933 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4935 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4938 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4940 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4943 /* ====================================================================== */
4944 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4945 with the TCG register to which we will write. Used in combination with
4946 the "wout" generators, in some cases we need a new temporary, and in
4947 some cases we can write to a TCG global. */
4949 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4951 o
->out
= tcg_temp_new_i64();
4953 #define SPEC_prep_new 0
4955 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4957 o
->out
= tcg_temp_new_i64();
4958 o
->out2
= tcg_temp_new_i64();
4960 #define SPEC_prep_new_P 0
4962 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4964 o
->out
= regs
[get_field(f
, r1
)];
4967 #define SPEC_prep_r1 0
4969 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4971 int r1
= get_field(f
, r1
);
4973 o
->out2
= regs
[r1
+ 1];
4974 o
->g_out
= o
->g_out2
= true;
4976 #define SPEC_prep_r1_P SPEC_r1_even
4978 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4980 o
->out
= fregs
[get_field(f
, r1
)];
4983 #define SPEC_prep_f1 0
4985 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4987 int r1
= get_field(f
, r1
);
4989 o
->out2
= fregs
[r1
+ 2];
4990 o
->g_out
= o
->g_out2
= true;
4992 #define SPEC_prep_x1 SPEC_r1_f128
4994 /* ====================================================================== */
4995 /* The "Write OUTput" generators. These generally perform some non-trivial
4996 copy of data to TCG globals, or to main memory. The trivial cases are
4997 generally handled by having a "prep" generator install the TCG global
4998 as the destination of the operation. */
5000 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5002 store_reg(get_field(f
, r1
), o
->out
);
5004 #define SPEC_wout_r1 0
5006 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5008 int r1
= get_field(f
, r1
);
5009 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5011 #define SPEC_wout_r1_8 0
5013 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5015 int r1
= get_field(f
, r1
);
5016 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5018 #define SPEC_wout_r1_16 0
5020 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5022 store_reg32_i64(get_field(f
, r1
), o
->out
);
5024 #define SPEC_wout_r1_32 0
5026 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5028 store_reg32h_i64(get_field(f
, r1
), o
->out
);
5030 #define SPEC_wout_r1_32h 0
5032 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5034 int r1
= get_field(f
, r1
);
5035 store_reg32_i64(r1
, o
->out
);
5036 store_reg32_i64(r1
+ 1, o
->out2
);
5038 #define SPEC_wout_r1_P32 SPEC_r1_even
5040 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5042 int r1
= get_field(f
, r1
);
5043 store_reg32_i64(r1
+ 1, o
->out
);
5044 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5045 store_reg32_i64(r1
, o
->out
);
5047 #define SPEC_wout_r1_D32 SPEC_r1_even
5049 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5051 int r3
= get_field(f
, r3
);
5052 store_reg32_i64(r3
, o
->out
);
5053 store_reg32_i64(r3
+ 1, o
->out2
);
5055 #define SPEC_wout_r3_P32 SPEC_r3_even
5057 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5059 int r3
= get_field(f
, r3
);
5060 store_reg(r3
, o
->out
);
5061 store_reg(r3
+ 1, o
->out2
);
5063 #define SPEC_wout_r3_P64 SPEC_r3_even
5065 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5067 store_freg32_i64(get_field(f
, r1
), o
->out
);
5069 #define SPEC_wout_e1 0
5071 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5073 store_freg(get_field(f
, r1
), o
->out
);
5075 #define SPEC_wout_f1 0
5077 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5079 int f1
= get_field(s
->fields
, r1
);
5080 store_freg(f1
, o
->out
);
5081 store_freg(f1
+ 2, o
->out2
);
5083 #define SPEC_wout_x1 SPEC_r1_f128
5085 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5087 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5088 store_reg32_i64(get_field(f
, r1
), o
->out
);
5091 #define SPEC_wout_cond_r1r2_32 0
5093 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5095 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5096 store_freg32_i64(get_field(f
, r1
), o
->out
);
5099 #define SPEC_wout_cond_e1e2 0
5101 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5103 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5105 #define SPEC_wout_m1_8 0
5107 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5109 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5111 #define SPEC_wout_m1_16 0
5113 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5115 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5117 #define SPEC_wout_m1_32 0
5119 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5121 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5123 #define SPEC_wout_m1_64 0
5125 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5127 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5129 #define SPEC_wout_m2_32 0
5131 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5133 store_reg(get_field(f
, r1
), o
->in2
);
5135 #define SPEC_wout_in2_r1 0
5137 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5139 store_reg32_i64(get_field(f
, r1
), o
->in2
);
5141 #define SPEC_wout_in2_r1_32 0
5143 /* ====================================================================== */
5144 /* The "INput 1" generators. These load the first operand to an insn. */
5146 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5148 o
->in1
= load_reg(get_field(f
, r1
));
5150 #define SPEC_in1_r1 0
5152 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5154 o
->in1
= regs
[get_field(f
, r1
)];
5157 #define SPEC_in1_r1_o 0
5159 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5161 o
->in1
= tcg_temp_new_i64();
5162 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5164 #define SPEC_in1_r1_32s 0
5166 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5168 o
->in1
= tcg_temp_new_i64();
5169 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5171 #define SPEC_in1_r1_32u 0
5173 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5175 o
->in1
= tcg_temp_new_i64();
5176 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
5178 #define SPEC_in1_r1_sr32 0
5180 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5182 o
->in1
= load_reg(get_field(f
, r1
) + 1);
5184 #define SPEC_in1_r1p1 SPEC_r1_even
5186 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5188 o
->in1
= tcg_temp_new_i64();
5189 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5191 #define SPEC_in1_r1p1_32s SPEC_r1_even
5193 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5195 o
->in1
= tcg_temp_new_i64();
5196 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5198 #define SPEC_in1_r1p1_32u SPEC_r1_even
5200 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5202 int r1
= get_field(f
, r1
);
5203 o
->in1
= tcg_temp_new_i64();
5204 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5206 #define SPEC_in1_r1_D32 SPEC_r1_even
5208 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5210 o
->in1
= load_reg(get_field(f
, r2
));
5212 #define SPEC_in1_r2 0
5214 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5216 o
->in1
= tcg_temp_new_i64();
5217 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5219 #define SPEC_in1_r2_sr32 0
5221 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5223 o
->in1
= load_reg(get_field(f
, r3
));
5225 #define SPEC_in1_r3 0
5227 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5229 o
->in1
= regs
[get_field(f
, r3
)];
5232 #define SPEC_in1_r3_o 0
5234 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5236 o
->in1
= tcg_temp_new_i64();
5237 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5239 #define SPEC_in1_r3_32s 0
5241 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5243 o
->in1
= tcg_temp_new_i64();
5244 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5246 #define SPEC_in1_r3_32u 0
5248 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5250 int r3
= get_field(f
, r3
);
5251 o
->in1
= tcg_temp_new_i64();
5252 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5254 #define SPEC_in1_r3_D32 SPEC_r3_even
5256 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5258 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5260 #define SPEC_in1_e1 0
5262 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5264 o
->in1
= fregs
[get_field(f
, r1
)];
5267 #define SPEC_in1_f1_o 0
5269 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5271 int r1
= get_field(f
, r1
);
5273 o
->out2
= fregs
[r1
+ 2];
5274 o
->g_out
= o
->g_out2
= true;
5276 #define SPEC_in1_x1_o SPEC_r1_f128
5278 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5280 o
->in1
= fregs
[get_field(f
, r3
)];
5283 #define SPEC_in1_f3_o 0
5285 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5287 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5289 #define SPEC_in1_la1 0
5291 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5293 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5294 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5296 #define SPEC_in1_la2 0
5298 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5301 o
->in1
= tcg_temp_new_i64();
5302 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5304 #define SPEC_in1_m1_8u 0
5306 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5309 o
->in1
= tcg_temp_new_i64();
5310 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5312 #define SPEC_in1_m1_16s 0
5314 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5317 o
->in1
= tcg_temp_new_i64();
5318 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5320 #define SPEC_in1_m1_16u 0
5322 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5325 o
->in1
= tcg_temp_new_i64();
5326 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5328 #define SPEC_in1_m1_32s 0
5330 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5333 o
->in1
= tcg_temp_new_i64();
5334 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5336 #define SPEC_in1_m1_32u 0
5338 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5341 o
->in1
= tcg_temp_new_i64();
5342 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5344 #define SPEC_in1_m1_64 0
5346 /* ====================================================================== */
5347 /* The "INput 2" generators. These load the second operand to an insn. */
5349 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5351 o
->in2
= regs
[get_field(f
, r1
)];
5354 #define SPEC_in2_r1_o 0
5356 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5358 o
->in2
= tcg_temp_new_i64();
5359 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5361 #define SPEC_in2_r1_16u 0
5363 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5365 o
->in2
= tcg_temp_new_i64();
5366 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5368 #define SPEC_in2_r1_32u 0
5370 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5372 int r1
= get_field(f
, r1
);
5373 o
->in2
= tcg_temp_new_i64();
5374 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5376 #define SPEC_in2_r1_D32 SPEC_r1_even
5378 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5380 o
->in2
= load_reg(get_field(f
, r2
));
5382 #define SPEC_in2_r2 0
5384 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5386 o
->in2
= regs
[get_field(f
, r2
)];
5389 #define SPEC_in2_r2_o 0
5391 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5393 int r2
= get_field(f
, r2
);
5395 o
->in2
= load_reg(r2
);
5398 #define SPEC_in2_r2_nz 0
5400 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5402 o
->in2
= tcg_temp_new_i64();
5403 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5405 #define SPEC_in2_r2_8s 0
5407 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5409 o
->in2
= tcg_temp_new_i64();
5410 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5412 #define SPEC_in2_r2_8u 0
5414 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5416 o
->in2
= tcg_temp_new_i64();
5417 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5419 #define SPEC_in2_r2_16s 0
5421 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5423 o
->in2
= tcg_temp_new_i64();
5424 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5426 #define SPEC_in2_r2_16u 0
5428 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5430 o
->in2
= load_reg(get_field(f
, r3
));
5432 #define SPEC_in2_r3 0
5434 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5436 o
->in2
= tcg_temp_new_i64();
5437 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5439 #define SPEC_in2_r3_sr32 0
5441 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5443 o
->in2
= tcg_temp_new_i64();
5444 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5446 #define SPEC_in2_r2_32s 0
5448 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5450 o
->in2
= tcg_temp_new_i64();
5451 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5453 #define SPEC_in2_r2_32u 0
5455 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5457 o
->in2
= tcg_temp_new_i64();
5458 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5460 #define SPEC_in2_r2_sr32 0
5462 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5464 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5466 #define SPEC_in2_e2 0
5468 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5470 o
->in2
= fregs
[get_field(f
, r2
)];
5473 #define SPEC_in2_f2_o 0
5475 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5477 int r2
= get_field(f
, r2
);
5479 o
->in2
= fregs
[r2
+ 2];
5480 o
->g_in1
= o
->g_in2
= true;
5482 #define SPEC_in2_x2_o SPEC_r2_f128
5484 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5486 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5488 #define SPEC_in2_ra2 0
5490 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5492 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5493 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5495 #define SPEC_in2_a2 0
5497 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5499 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
5501 #define SPEC_in2_ri2 0
5503 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5505 help_l2_shift(s
, f
, o
, 31);
5507 #define SPEC_in2_sh32 0
5509 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5511 help_l2_shift(s
, f
, o
, 63);
5513 #define SPEC_in2_sh64 0
5515 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5518 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5520 #define SPEC_in2_m2_8u 0
5522 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5525 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5527 #define SPEC_in2_m2_16s 0
5529 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5532 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5534 #define SPEC_in2_m2_16u 0
5536 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5539 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5541 #define SPEC_in2_m2_32s 0
5543 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5546 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5548 #define SPEC_in2_m2_32u 0
5550 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5553 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5555 #define SPEC_in2_m2_64 0
5557 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5560 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5562 #define SPEC_in2_mri2_16u 0
5564 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5567 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5569 #define SPEC_in2_mri2_32s 0
5571 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5574 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5576 #define SPEC_in2_mri2_32u 0
5578 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5581 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5583 #define SPEC_in2_mri2_64 0
5585 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5587 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5589 #define SPEC_in2_i2 0
5591 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5593 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5595 #define SPEC_in2_i2_8u 0
5597 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5599 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5601 #define SPEC_in2_i2_16u 0
5603 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5605 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5607 #define SPEC_in2_i2_32u 0
5609 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5611 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5612 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5614 #define SPEC_in2_i2_16u_shl 0
5616 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5618 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5619 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5621 #define SPEC_in2_i2_32u_shl 0
5623 #ifndef CONFIG_USER_ONLY
5624 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5626 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5628 #define SPEC_in2_insn 0
5631 /* ====================================================================== */
5633 /* Find opc within the table of insns. This is formulated as a switch
5634 statement so that (1) we get compile-time notice of cut-paste errors
5635 for duplicated opcodes, and (2) the compiler generates the binary
5636 search tree, rather than us having to post-process the table. */
5638 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5639 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5641 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5643 enum DisasInsnEnum
{
5644 #include "insn-data.def"
5648 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5652 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5654 .help_in1 = in1_##I1, \
5655 .help_in2 = in2_##I2, \
5656 .help_prep = prep_##P, \
5657 .help_wout = wout_##W, \
5658 .help_cout = cout_##CC, \
5659 .help_op = op_##OP, \
5663 /* Allow 0 to be used for NULL in the table below. */
5671 #define SPEC_in1_0 0
5672 #define SPEC_in2_0 0
5673 #define SPEC_prep_0 0
5674 #define SPEC_wout_0 0
5676 /* Give smaller names to the various facilities. */
5677 #define FAC_Z S390_FEAT_ZARCH
5678 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5679 #define FAC_DFP S390_FEAT_DFP
5680 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5681 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5682 #define FAC_EE S390_FEAT_EXECUTE_EXT
5683 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5684 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5685 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5686 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5687 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5688 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5689 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5690 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5691 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5692 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5693 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5694 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5695 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5696 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5697 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5698 #define FAC_SFLE S390_FEAT_STFLE
5699 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5700 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5701 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5702 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5703 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5704 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5705 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5706 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5707 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5708 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5709 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5710 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5711 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5712 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5714 static const DisasInsn insn_info
[] = {
5715 #include "insn-data.def"
5719 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5720 case OPC: return &insn_info[insn_ ## NM];
5722 static const DisasInsn
*lookup_opc(uint16_t opc
)
5725 #include "insn-data.def"
5734 /* Extract a field from the insn. The INSN should be left-aligned in
5735 the uint64_t so that we can more easily utilize the big-bit-endian
5736 definitions we extract from the Principals of Operation. */
5738 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5746 /* Zero extract the field from the insn. */
5747 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5749 /* Sign-extend, or un-swap the field as necessary. */
5751 case 0: /* unsigned */
5753 case 1: /* signed */
5754 assert(f
->size
<= 32);
5755 m
= 1u << (f
->size
- 1);
5758 case 2: /* dl+dh split, signed 20 bit. */
5759 r
= ((int8_t)r
<< 12) | (r
>> 8);
5765 /* Validate that the "compressed" encoding we selected above is valid.
5766 I.e. we havn't make two different original fields overlap. */
5767 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5768 o
->presentC
|= 1 << f
->indexC
;
5769 o
->presentO
|= 1 << f
->indexO
;
5771 o
->c
[f
->indexC
] = r
;
5774 /* Lookup the insn at the current PC, extracting the operands into O and
5775 returning the info struct for the insn. Returns NULL for invalid insn. */
5777 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5780 uint64_t insn
, pc
= s
->pc
;
5782 const DisasInsn
*info
;
5784 if (unlikely(s
->ex_value
)) {
5785 /* Drop the EX data now, so that it's clear on exception paths. */
5786 TCGv_i64 zero
= tcg_const_i64(0);
5787 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5788 tcg_temp_free_i64(zero
);
5790 /* Extract the values saved by EXECUTE. */
5791 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5792 ilen
= s
->ex_value
& 0xf;
5795 insn
= ld_code2(env
, pc
);
5796 op
= (insn
>> 8) & 0xff;
5797 ilen
= get_ilen(op
);
5803 insn
= ld_code4(env
, pc
) << 32;
5806 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5809 g_assert_not_reached();
5812 s
->next_pc
= s
->pc
+ ilen
;
5815 /* We can't actually determine the insn format until we've looked up
5816 the full insn opcode. Which we can't do without locating the
5817 secondary opcode. Assume by default that OP2 is at bit 40; for
5818 those smaller insns that don't actually have a secondary opcode
5819 this will correctly result in OP2 = 0. */
5825 case 0xb2: /* S, RRF, RRE, IE */
5826 case 0xb3: /* RRE, RRD, RRF */
5827 case 0xb9: /* RRE, RRF */
5828 case 0xe5: /* SSE, SIL */
5829 op2
= (insn
<< 8) >> 56;
5833 case 0xc0: /* RIL */
5834 case 0xc2: /* RIL */
5835 case 0xc4: /* RIL */
5836 case 0xc6: /* RIL */
5837 case 0xc8: /* SSF */
5838 case 0xcc: /* RIL */
5839 op2
= (insn
<< 12) >> 60;
5841 case 0xc5: /* MII */
5842 case 0xc7: /* SMI */
5843 case 0xd0 ... 0xdf: /* SS */
5849 case 0xee ... 0xf3: /* SS */
5850 case 0xf8 ... 0xfd: /* SS */
5854 op2
= (insn
<< 40) >> 56;
5858 memset(f
, 0, sizeof(*f
));
5863 /* Lookup the instruction. */
5864 info
= lookup_opc(op
<< 8 | op2
);
5866 /* If we found it, extract the operands. */
5868 DisasFormat fmt
= info
->fmt
;
5871 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5872 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5878 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5880 const DisasInsn
*insn
;
5881 ExitStatus ret
= NO_EXIT
;
5885 /* Search for the insn in the table. */
5886 insn
= extract_insn(env
, s
, &f
);
5888 /* Not found means unimplemented/illegal opcode. */
5890 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5892 gen_illegal_opcode(s
);
5893 return EXIT_NORETURN
;
5896 #ifndef CONFIG_USER_ONLY
5897 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5898 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5899 gen_helper_per_ifetch(cpu_env
, addr
);
5900 tcg_temp_free_i64(addr
);
5904 /* Check for insn specification exceptions. */
5906 int spec
= insn
->spec
, excp
= 0, r
;
5908 if (spec
& SPEC_r1_even
) {
5909 r
= get_field(&f
, r1
);
5911 excp
= PGM_SPECIFICATION
;
5914 if (spec
& SPEC_r2_even
) {
5915 r
= get_field(&f
, r2
);
5917 excp
= PGM_SPECIFICATION
;
5920 if (spec
& SPEC_r3_even
) {
5921 r
= get_field(&f
, r3
);
5923 excp
= PGM_SPECIFICATION
;
5926 if (spec
& SPEC_r1_f128
) {
5927 r
= get_field(&f
, r1
);
5929 excp
= PGM_SPECIFICATION
;
5932 if (spec
& SPEC_r2_f128
) {
5933 r
= get_field(&f
, r2
);
5935 excp
= PGM_SPECIFICATION
;
5939 gen_program_exception(s
, excp
);
5940 return EXIT_NORETURN
;
5944 /* Set up the strutures we use to communicate with the helpers. */
5947 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5948 TCGV_UNUSED_I64(o
.out
);
5949 TCGV_UNUSED_I64(o
.out2
);
5950 TCGV_UNUSED_I64(o
.in1
);
5951 TCGV_UNUSED_I64(o
.in2
);
5952 TCGV_UNUSED_I64(o
.addr1
);
5954 /* Implement the instruction. */
5955 if (insn
->help_in1
) {
5956 insn
->help_in1(s
, &f
, &o
);
5958 if (insn
->help_in2
) {
5959 insn
->help_in2(s
, &f
, &o
);
5961 if (insn
->help_prep
) {
5962 insn
->help_prep(s
, &f
, &o
);
5964 if (insn
->help_op
) {
5965 ret
= insn
->help_op(s
, &o
);
5967 if (insn
->help_wout
) {
5968 insn
->help_wout(s
, &f
, &o
);
5970 if (insn
->help_cout
) {
5971 insn
->help_cout(s
, &o
);
5974 /* Free any temporaries created by the helpers. */
5975 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5976 tcg_temp_free_i64(o
.out
);
5978 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5979 tcg_temp_free_i64(o
.out2
);
5981 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5982 tcg_temp_free_i64(o
.in1
);
5984 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5985 tcg_temp_free_i64(o
.in2
);
5987 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5988 tcg_temp_free_i64(o
.addr1
);
5991 #ifndef CONFIG_USER_ONLY
5992 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5993 /* An exception might be triggered, save PSW if not already done. */
5994 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5995 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5998 /* Call the helper to check for a possible PER exception. */
5999 gen_helper_per_check_exception(cpu_env
);
6003 /* Advance to the next instruction. */
6008 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
6010 CPUS390XState
*env
= cs
->env_ptr
;
6012 target_ulong pc_start
;
6013 uint64_t next_page_start
;
6014 int num_insns
, max_insns
;
6021 if (!(tb
->flags
& FLAG_MASK_64
)) {
6022 pc_start
&= 0x7fffffff;
6027 dc
.cc_op
= CC_OP_DYNAMIC
;
6028 dc
.ex_value
= tb
->cs_base
;
6029 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
6031 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
6034 max_insns
= tb_cflags(tb
) & CF_COUNT_MASK
;
6035 if (max_insns
== 0) {
6036 max_insns
= CF_COUNT_MASK
;
6038 if (max_insns
> TCG_MAX_INSNS
) {
6039 max_insns
= TCG_MAX_INSNS
;
6045 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
6048 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
6049 status
= EXIT_PC_STALE
;
6051 /* The address covered by the breakpoint must be included in
6052 [tb->pc, tb->pc + tb->size) in order to for it to be
6053 properly cleared -- thus we increment the PC here so that
6054 the logic setting tb->size below does the right thing. */
6059 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
6063 status
= translate_one(env
, &dc
);
6065 /* If we reach a page boundary, are single stepping,
6066 or exhaust instruction count, stop generation. */
6067 if (status
== NO_EXIT
6068 && (dc
.pc
>= next_page_start
6069 || tcg_op_buf_full()
6070 || num_insns
>= max_insns
6072 || cs
->singlestep_enabled
6074 status
= EXIT_PC_STALE
;
6076 } while (status
== NO_EXIT
);
6078 if (tb_cflags(tb
) & CF_LAST_IO
) {
6087 case EXIT_PC_STALE_NOCHAIN
:
6088 update_psw_addr(&dc
);
6090 case EXIT_PC_UPDATED
:
6091 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6092 cc op type is in env */
6095 case EXIT_PC_CC_UPDATED
:
6096 /* Exit the TB, either by raising a debug exception or by return. */
6098 gen_exception(EXCP_DEBUG
);
6099 } else if (use_exit_tb(&dc
) || status
== EXIT_PC_STALE_NOCHAIN
) {
6102 tcg_gen_lookup_and_goto_ptr();
6106 g_assert_not_reached();
6109 gen_tb_end(tb
, num_insns
);
6111 tb
->size
= dc
.pc
- pc_start
;
6112 tb
->icount
= num_insns
;
6114 #if defined(S390X_DEBUG_DISAS)
6115 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
6116 && qemu_log_in_addr_range(pc_start
)) {
6118 if (unlikely(dc
.ex_value
)) {
6119 /* ??? Unfortunately log_target_disas can't use host memory. */
6120 qemu_log("IN: EXECUTE %016" PRIx64
"\n", dc
.ex_value
);
6122 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
6123 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
);
6131 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6134 int cc_op
= data
[1];
6135 env
->psw
.addr
= data
[0];
6136 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {