4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/translator.h"
47 #include "qemu/atomic128.h"
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext
;
52 typedef struct DisasInsn DisasInsn
;
53 typedef struct DisasFields DisasFields
;
56 DisasContextBase base
;
57 const DisasInsn
*insn
;
61 * During translate_one(), pc_tmp is used to determine the instruction
62 * to be executed after base.pc_next - e.g. next sequential instruction
71 /* Information carried about a condition to be evaluated. */
78 struct { TCGv_i64 a
, b
; } s64
;
79 struct { TCGv_i32 a
, b
; } s32
;
83 #ifdef DEBUG_INLINE_BRANCHES
84 static uint64_t inline_branch_hit
[CC_OP_MAX
];
85 static uint64_t inline_branch_miss
[CC_OP_MAX
];
88 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
92 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
93 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
94 tcg_gen_movi_i64(out
, pc
);
99 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
100 tmp
= tcg_const_i64(pc
);
101 tcg_gen_deposit_i64(out
, out
, tmp
, 0, 32);
102 tcg_temp_free_i64(tmp
);
105 static TCGv_i64 psw_addr
;
106 static TCGv_i64 psw_mask
;
107 static TCGv_i64 gbea
;
109 static TCGv_i32 cc_op
;
110 static TCGv_i64 cc_src
;
111 static TCGv_i64 cc_dst
;
112 static TCGv_i64 cc_vr
;
114 static char cpu_reg_names
[32][4];
115 static TCGv_i64 regs
[16];
116 static TCGv_i64 fregs
[16];
118 void s390x_translate_init(void)
122 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
123 offsetof(CPUS390XState
, psw
.addr
),
125 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
126 offsetof(CPUS390XState
, psw
.mask
),
128 gbea
= tcg_global_mem_new_i64(cpu_env
,
129 offsetof(CPUS390XState
, gbea
),
132 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
134 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
136 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
138 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
141 for (i
= 0; i
< 16; i
++) {
142 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
143 regs
[i
] = tcg_global_mem_new(cpu_env
,
144 offsetof(CPUS390XState
, regs
[i
]),
148 for (i
= 0; i
< 16; i
++) {
149 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
150 fregs
[i
] = tcg_global_mem_new(cpu_env
,
151 offsetof(CPUS390XState
, vregs
[i
][0].d
),
152 cpu_reg_names
[i
+ 16]);
156 static TCGv_i64
load_reg(int reg
)
158 TCGv_i64 r
= tcg_temp_new_i64();
159 tcg_gen_mov_i64(r
, regs
[reg
]);
163 static TCGv_i64
load_freg32_i64(int reg
)
165 TCGv_i64 r
= tcg_temp_new_i64();
166 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
170 static void store_reg(int reg
, TCGv_i64 v
)
172 tcg_gen_mov_i64(regs
[reg
], v
);
175 static void store_freg(int reg
, TCGv_i64 v
)
177 tcg_gen_mov_i64(fregs
[reg
], v
);
180 static void store_reg32_i64(int reg
, TCGv_i64 v
)
182 /* 32 bit register writes keep the upper half */
183 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
186 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
188 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
191 static void store_freg32_i64(int reg
, TCGv_i64 v
)
193 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
196 static void return_low128(TCGv_i64 dest
)
198 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
201 static void update_psw_addr(DisasContext
*s
)
204 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
207 static void per_branch(DisasContext
*s
, bool to_next
)
209 #ifndef CONFIG_USER_ONLY
210 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
212 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
213 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
214 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
216 tcg_temp_free_i64(next_pc
);
222 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
223 TCGv_i64 arg1
, TCGv_i64 arg2
)
225 #ifndef CONFIG_USER_ONLY
226 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
227 TCGLabel
*lab
= gen_new_label();
228 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
230 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
231 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
235 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
236 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
237 tcg_temp_free_i64(pc
);
242 static void per_breaking_event(DisasContext
*s
)
244 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
247 static void update_cc_op(DisasContext
*s
)
249 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
250 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
254 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
256 return (uint64_t)cpu_lduw_code(env
, pc
);
259 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
261 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
264 static int get_mem_index(DisasContext
*s
)
266 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
270 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
271 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
272 return MMU_PRIMARY_IDX
;
273 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
274 return MMU_SECONDARY_IDX
;
275 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
283 static void gen_exception(int excp
)
285 TCGv_i32 tmp
= tcg_const_i32(excp
);
286 gen_helper_exception(cpu_env
, tmp
);
287 tcg_temp_free_i32(tmp
);
290 static void gen_program_exception(DisasContext
*s
, int code
)
294 /* Remember what pgm exeption this was. */
295 tmp
= tcg_const_i32(code
);
296 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
297 tcg_temp_free_i32(tmp
);
299 tmp
= tcg_const_i32(s
->ilen
);
300 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
301 tcg_temp_free_i32(tmp
);
309 /* Trigger exception. */
310 gen_exception(EXCP_PGM
);
313 static inline void gen_illegal_opcode(DisasContext
*s
)
315 gen_program_exception(s
, PGM_OPERATION
);
318 static inline void gen_data_exception(uint8_t dxc
)
320 TCGv_i32 tmp
= tcg_const_i32(dxc
);
321 gen_helper_data_exception(cpu_env
, tmp
);
322 tcg_temp_free_i32(tmp
);
325 static inline void gen_trap(DisasContext
*s
)
327 /* Set DXC to 0xff */
328 gen_data_exception(0xff);
331 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
333 TCGv_i64 tmp
= tcg_temp_new_i64();
334 bool need_31
= !(s
->base
.tb
->flags
& FLAG_MASK_64
);
336 /* Note that d2 is limited to 20 bits, signed. If we crop negative
337 displacements early we create larger immedate addends. */
339 /* Note that addi optimizes the imm==0 case. */
341 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
342 tcg_gen_addi_i64(tmp
, tmp
, d2
);
344 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
346 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
352 tcg_gen_movi_i64(tmp
, d2
);
355 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
361 static inline bool live_cc_data(DisasContext
*s
)
363 return (s
->cc_op
!= CC_OP_DYNAMIC
364 && s
->cc_op
!= CC_OP_STATIC
368 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
370 if (live_cc_data(s
)) {
371 tcg_gen_discard_i64(cc_src
);
372 tcg_gen_discard_i64(cc_dst
);
373 tcg_gen_discard_i64(cc_vr
);
375 s
->cc_op
= CC_OP_CONST0
+ val
;
378 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
380 if (live_cc_data(s
)) {
381 tcg_gen_discard_i64(cc_src
);
382 tcg_gen_discard_i64(cc_vr
);
384 tcg_gen_mov_i64(cc_dst
, dst
);
388 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
391 if (live_cc_data(s
)) {
392 tcg_gen_discard_i64(cc_vr
);
394 tcg_gen_mov_i64(cc_src
, src
);
395 tcg_gen_mov_i64(cc_dst
, dst
);
399 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
400 TCGv_i64 dst
, TCGv_i64 vr
)
402 tcg_gen_mov_i64(cc_src
, src
);
403 tcg_gen_mov_i64(cc_dst
, dst
);
404 tcg_gen_mov_i64(cc_vr
, vr
);
408 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
410 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
413 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
415 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
418 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
420 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
423 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
425 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
428 /* CC value is in env->cc_op */
429 static void set_cc_static(DisasContext
*s
)
431 if (live_cc_data(s
)) {
432 tcg_gen_discard_i64(cc_src
);
433 tcg_gen_discard_i64(cc_dst
);
434 tcg_gen_discard_i64(cc_vr
);
436 s
->cc_op
= CC_OP_STATIC
;
439 /* calculates cc into cc_op */
440 static void gen_op_calc_cc(DisasContext
*s
)
442 TCGv_i32 local_cc_op
= NULL
;
443 TCGv_i64 dummy
= NULL
;
447 dummy
= tcg_const_i64(0);
461 local_cc_op
= tcg_const_i32(s
->cc_op
);
477 /* s->cc_op is the cc value */
478 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
481 /* env->cc_op already is the cc value */
496 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
501 case CC_OP_LTUGTU_32
:
502 case CC_OP_LTUGTU_64
:
509 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
524 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
527 /* unknown operation - assume 3 arguments and cc_op in env */
528 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
535 tcg_temp_free_i32(local_cc_op
);
538 tcg_temp_free_i64(dummy
);
541 /* We now have cc in cc_op as constant */
545 static bool use_exit_tb(DisasContext
*s
)
547 return s
->base
.singlestep_enabled
||
548 (tb_cflags(s
->base
.tb
) & CF_LAST_IO
) ||
549 (s
->base
.tb
->flags
& FLAG_MASK_PER
);
552 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
554 if (unlikely(use_exit_tb(s
))) {
557 #ifndef CONFIG_USER_ONLY
558 return (dest
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
559 (dest
& TARGET_PAGE_MASK
) == (s
->base
.pc_next
& TARGET_PAGE_MASK
);
565 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
567 #ifdef DEBUG_INLINE_BRANCHES
568 inline_branch_miss
[cc_op
]++;
572 static void account_inline_branch(DisasContext
*s
, int cc_op
)
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_hit
[cc_op
]++;
579 /* Table of mask values to comparison codes, given a comparison as input.
580 For such, CC=3 should not be possible. */
581 static const TCGCond ltgt_cond
[16] = {
582 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
583 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
584 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
585 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
586 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
587 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
588 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
589 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
592 /* Table of mask values to comparison codes, given a logic op as input.
593 For such, only CC=0 and CC=1 should be possible. */
594 static const TCGCond nz_cond
[16] = {
595 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
596 TCG_COND_NEVER
, TCG_COND_NEVER
,
597 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
598 TCG_COND_NE
, TCG_COND_NE
,
599 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
600 TCG_COND_EQ
, TCG_COND_EQ
,
601 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
602 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
605 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
606 details required to generate a TCG comparison. */
607 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
610 enum cc_op old_cc_op
= s
->cc_op
;
612 if (mask
== 15 || mask
== 0) {
613 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
616 c
->g1
= c
->g2
= true;
621 /* Find the TCG condition for the mask + cc op. */
627 cond
= ltgt_cond
[mask
];
628 if (cond
== TCG_COND_NEVER
) {
631 account_inline_branch(s
, old_cc_op
);
634 case CC_OP_LTUGTU_32
:
635 case CC_OP_LTUGTU_64
:
636 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
637 if (cond
== TCG_COND_NEVER
) {
640 account_inline_branch(s
, old_cc_op
);
644 cond
= nz_cond
[mask
];
645 if (cond
== TCG_COND_NEVER
) {
648 account_inline_branch(s
, old_cc_op
);
663 account_inline_branch(s
, old_cc_op
);
678 account_inline_branch(s
, old_cc_op
);
682 switch (mask
& 0xa) {
683 case 8: /* src == 0 -> no one bit found */
686 case 2: /* src != 0 -> one bit found */
692 account_inline_branch(s
, old_cc_op
);
698 case 8 | 2: /* vr == 0 */
701 case 4 | 1: /* vr != 0 */
704 case 8 | 4: /* no carry -> vr >= src */
707 case 2 | 1: /* carry -> vr < src */
713 account_inline_branch(s
, old_cc_op
);
718 /* Note that CC=0 is impossible; treat it as dont-care. */
720 case 2: /* zero -> op1 == op2 */
723 case 4 | 1: /* !zero -> op1 != op2 */
726 case 4: /* borrow (!carry) -> op1 < op2 */
729 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
735 account_inline_branch(s
, old_cc_op
);
740 /* Calculate cc value. */
745 /* Jump based on CC. We'll load up the real cond below;
746 the assignment here merely avoids a compiler warning. */
747 account_noninline_branch(s
, old_cc_op
);
748 old_cc_op
= CC_OP_STATIC
;
749 cond
= TCG_COND_NEVER
;
753 /* Load up the arguments of the comparison. */
755 c
->g1
= c
->g2
= false;
759 c
->u
.s32
.a
= tcg_temp_new_i32();
760 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
761 c
->u
.s32
.b
= tcg_const_i32(0);
764 case CC_OP_LTUGTU_32
:
767 c
->u
.s32
.a
= tcg_temp_new_i32();
768 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
769 c
->u
.s32
.b
= tcg_temp_new_i32();
770 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
777 c
->u
.s64
.b
= tcg_const_i64(0);
781 case CC_OP_LTUGTU_64
:
785 c
->g1
= c
->g2
= true;
791 c
->u
.s64
.a
= tcg_temp_new_i64();
792 c
->u
.s64
.b
= tcg_const_i64(0);
793 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
798 c
->u
.s32
.a
= tcg_temp_new_i32();
799 c
->u
.s32
.b
= tcg_temp_new_i32();
800 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
801 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
802 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
804 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
811 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
812 c
->u
.s64
.b
= tcg_const_i64(0);
824 case 0x8 | 0x4 | 0x2: /* cc != 3 */
826 c
->u
.s32
.b
= tcg_const_i32(3);
828 case 0x8 | 0x4 | 0x1: /* cc != 2 */
830 c
->u
.s32
.b
= tcg_const_i32(2);
832 case 0x8 | 0x2 | 0x1: /* cc != 1 */
834 c
->u
.s32
.b
= tcg_const_i32(1);
836 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
839 c
->u
.s32
.a
= tcg_temp_new_i32();
840 c
->u
.s32
.b
= tcg_const_i32(0);
841 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
843 case 0x8 | 0x4: /* cc < 2 */
845 c
->u
.s32
.b
= tcg_const_i32(2);
847 case 0x8: /* cc == 0 */
849 c
->u
.s32
.b
= tcg_const_i32(0);
851 case 0x4 | 0x2 | 0x1: /* cc != 0 */
853 c
->u
.s32
.b
= tcg_const_i32(0);
855 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
858 c
->u
.s32
.a
= tcg_temp_new_i32();
859 c
->u
.s32
.b
= tcg_const_i32(0);
860 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
862 case 0x4: /* cc == 1 */
864 c
->u
.s32
.b
= tcg_const_i32(1);
866 case 0x2 | 0x1: /* cc > 1 */
868 c
->u
.s32
.b
= tcg_const_i32(1);
870 case 0x2: /* cc == 2 */
872 c
->u
.s32
.b
= tcg_const_i32(2);
874 case 0x1: /* cc == 3 */
876 c
->u
.s32
.b
= tcg_const_i32(3);
879 /* CC is masked by something else: (8 >> cc) & mask. */
882 c
->u
.s32
.a
= tcg_const_i32(8);
883 c
->u
.s32
.b
= tcg_const_i32(0);
884 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
885 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
896 static void free_compare(DisasCompare
*c
)
900 tcg_temp_free_i64(c
->u
.s64
.a
);
902 tcg_temp_free_i32(c
->u
.s32
.a
);
907 tcg_temp_free_i64(c
->u
.s64
.b
);
909 tcg_temp_free_i32(c
->u
.s32
.b
);
914 /* ====================================================================== */
915 /* Define the insn format enumeration. */
916 #define F0(N) FMT_##N,
917 #define F1(N, X1) F0(N)
918 #define F2(N, X1, X2) F0(N)
919 #define F3(N, X1, X2, X3) F0(N)
920 #define F4(N, X1, X2, X3, X4) F0(N)
921 #define F5(N, X1, X2, X3, X4, X5) F0(N)
924 #include "insn-format.def"
934 /* Define a structure to hold the decoded fields. We'll store each inside
935 an array indexed by an enum. In order to conserve memory, we'll arrange
936 for fields that do not exist at the same time to overlap, thus the "C"
937 for compact. For checking purposes there is an "O" for original index
938 as well that will be applied to availability bitmaps. */
940 enum DisasFieldIndexO
{
963 enum DisasFieldIndexC
{
998 unsigned presentC
:16;
999 unsigned int presentO
;
1003 /* This is the way fields are to be accessed out of DisasFields. */
1004 #define have_field(S, F) have_field1((S), FLD_O_##F)
1005 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1007 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1009 return (f
->presentO
>> c
) & 1;
1012 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1013 enum DisasFieldIndexC c
)
1015 assert(have_field1(f
, o
));
1019 /* Describe the layout of each field in each format. */
1020 typedef struct DisasField
{
1022 unsigned int size
:8;
1023 unsigned int type
:2;
1024 unsigned int indexC
:6;
1025 enum DisasFieldIndexO indexO
:8;
1028 typedef struct DisasFormatInfo
{
1029 DisasField op
[NUM_C_FIELD
];
1032 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1033 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1034 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1035 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1036 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1037 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1038 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1039 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1040 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1041 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1042 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1043 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1044 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1045 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1047 #define F0(N) { { } },
1048 #define F1(N, X1) { { X1 } },
1049 #define F2(N, X1, X2) { { X1, X2 } },
1050 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1051 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1052 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1054 static const DisasFormatInfo format_info
[] = {
1055 #include "insn-format.def"
1073 /* Generally, we'll extract operands into this structures, operate upon
1074 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1075 of routines below for more details. */
1077 bool g_out
, g_out2
, g_in1
, g_in2
;
1078 TCGv_i64 out
, out2
, in1
, in2
;
1082 /* Instructions can place constraints on their operands, raising specification
1083 exceptions if they are violated. To make this easy to automate, each "in1",
1084 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1085 of the following, or 0. To make this easy to document, we'll put the
1086 SPEC_<name> defines next to <name>. */
1088 #define SPEC_r1_even 1
1089 #define SPEC_r2_even 2
1090 #define SPEC_r3_even 4
1091 #define SPEC_r1_f128 8
1092 #define SPEC_r2_f128 16
1094 /* Return values from translate_one, indicating the state of the TB. */
1096 /* We are not using a goto_tb (for whatever reason), but have updated
1097 the PC (for whatever reason), so there's no need to do it again on
1099 #define DISAS_PC_UPDATED DISAS_TARGET_0
1101 /* We have emitted one or more goto_tb. No fixup required. */
1102 #define DISAS_GOTO_TB DISAS_TARGET_1
1104 /* We have updated the PC and CC values. */
1105 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1107 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1108 updated the PC for the next instruction to be executed. */
1109 #define DISAS_PC_STALE DISAS_TARGET_3
1111 /* We are exiting the TB to the main loop. */
1112 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1115 /* Instruction flags */
1116 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1117 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1118 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1119 #define IF_BFP 0x0008 /* binary floating point instruction */
1120 #define IF_DFP 0x0010 /* decimal floating point instruction */
1121 #define IF_PRIV 0x0020 /* privileged instruction */
1132 /* Pre-process arguments before HELP_OP. */
1133 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1134 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1135 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1138 * Post-process output after HELP_OP.
1139 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1141 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1142 void (*help_cout
)(DisasContext
*, DisasOps
*);
1144 /* Implement the operation itself. */
1145 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1150 /* ====================================================================== */
1151 /* Miscellaneous helpers, used by several operations. */
1153 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1154 DisasOps
*o
, int mask
)
1156 int b2
= get_field(f
, b2
);
1157 int d2
= get_field(f
, d2
);
1160 o
->in2
= tcg_const_i64(d2
& mask
);
1162 o
->in2
= get_address(s
, 0, b2
, d2
);
1163 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1167 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1169 if (dest
== s
->pc_tmp
) {
1170 per_branch(s
, true);
1173 if (use_goto_tb(s
, dest
)) {
1175 per_breaking_event(s
);
1177 tcg_gen_movi_i64(psw_addr
, dest
);
1178 tcg_gen_exit_tb(s
->base
.tb
, 0);
1179 return DISAS_GOTO_TB
;
1181 tcg_gen_movi_i64(psw_addr
, dest
);
1182 per_branch(s
, false);
1183 return DISAS_PC_UPDATED
;
1187 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1188 bool is_imm
, int imm
, TCGv_i64 cdest
)
1191 uint64_t dest
= s
->base
.pc_next
+ 2 * imm
;
1194 /* Take care of the special cases first. */
1195 if (c
->cond
== TCG_COND_NEVER
) {
1200 if (dest
== s
->pc_tmp
) {
1201 /* Branch to next. */
1202 per_branch(s
, true);
1206 if (c
->cond
== TCG_COND_ALWAYS
) {
1207 ret
= help_goto_direct(s
, dest
);
1212 /* E.g. bcr %r0 -> no branch. */
1216 if (c
->cond
== TCG_COND_ALWAYS
) {
1217 tcg_gen_mov_i64(psw_addr
, cdest
);
1218 per_branch(s
, false);
1219 ret
= DISAS_PC_UPDATED
;
1224 if (use_goto_tb(s
, s
->pc_tmp
)) {
1225 if (is_imm
&& use_goto_tb(s
, dest
)) {
1226 /* Both exits can use goto_tb. */
1229 lab
= gen_new_label();
1231 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1233 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1236 /* Branch not taken. */
1238 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1239 tcg_gen_exit_tb(s
->base
.tb
, 0);
1243 per_breaking_event(s
);
1245 tcg_gen_movi_i64(psw_addr
, dest
);
1246 tcg_gen_exit_tb(s
->base
.tb
, 1);
1248 ret
= DISAS_GOTO_TB
;
1250 /* Fallthru can use goto_tb, but taken branch cannot. */
1251 /* Store taken branch destination before the brcond. This
1252 avoids having to allocate a new local temp to hold it.
1253 We'll overwrite this in the not taken case anyway. */
1255 tcg_gen_mov_i64(psw_addr
, cdest
);
1258 lab
= gen_new_label();
1260 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1262 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1265 /* Branch not taken. */
1268 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1269 tcg_gen_exit_tb(s
->base
.tb
, 0);
1273 tcg_gen_movi_i64(psw_addr
, dest
);
1275 per_breaking_event(s
);
1276 ret
= DISAS_PC_UPDATED
;
1279 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1280 Most commonly we're single-stepping or some other condition that
1281 disables all use of goto_tb. Just update the PC and exit. */
1283 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1285 cdest
= tcg_const_i64(dest
);
1289 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1291 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1293 TCGv_i32 t0
= tcg_temp_new_i32();
1294 TCGv_i64 t1
= tcg_temp_new_i64();
1295 TCGv_i64 z
= tcg_const_i64(0);
1296 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1297 tcg_gen_extu_i32_i64(t1
, t0
);
1298 tcg_temp_free_i32(t0
);
1299 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1300 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1301 tcg_temp_free_i64(t1
);
1302 tcg_temp_free_i64(z
);
1306 tcg_temp_free_i64(cdest
);
1308 tcg_temp_free_i64(next
);
1310 ret
= DISAS_PC_UPDATED
;
1318 /* ====================================================================== */
1319 /* The operations. These perform the bulk of the work for any insn,
1320 usually after the operands have been loaded and output initialized. */
1322 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1325 z
= tcg_const_i64(0);
1326 n
= tcg_temp_new_i64();
1327 tcg_gen_neg_i64(n
, o
->in2
);
1328 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1329 tcg_temp_free_i64(n
);
1330 tcg_temp_free_i64(z
);
1334 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1336 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1340 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1342 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1346 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1348 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1349 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1353 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1355 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1359 static DisasJumpType
op_addc(DisasContext
*s
, DisasOps
*o
)
1364 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1366 /* The carry flag is the msb of CC, therefore the branch mask that would
1367 create that comparison is 3. Feeding the generated comparison to
1368 setcond produces the carry flag that we desire. */
1369 disas_jcc(s
, &cmp
, 3);
1370 carry
= tcg_temp_new_i64();
1372 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1374 TCGv_i32 t
= tcg_temp_new_i32();
1375 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1376 tcg_gen_extu_i32_i64(carry
, t
);
1377 tcg_temp_free_i32(t
);
1381 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1382 tcg_temp_free_i64(carry
);
1386 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1388 o
->in1
= tcg_temp_new_i64();
1390 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1391 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1393 /* Perform the atomic addition in memory. */
1394 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1398 /* Recompute also for atomic case: needed for setting CC. */
1399 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1401 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1402 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1407 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1409 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1413 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1415 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1419 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1421 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1422 return_low128(o
->out2
);
1426 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1428 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1432 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1434 int shift
= s
->insn
->data
& 0xff;
1435 int size
= s
->insn
->data
>> 8;
1436 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1439 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1440 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1441 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1443 /* Produce the CC from only the bits manipulated. */
1444 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1445 set_cc_nz_u64(s
, cc_dst
);
1449 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1451 o
->in1
= tcg_temp_new_i64();
1453 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1454 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1456 /* Perform the atomic operation in memory. */
1457 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1461 /* Recompute also for atomic case: needed for setting CC. */
1462 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1464 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1465 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1470 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1472 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1474 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1475 per_branch(s
, false);
1476 return DISAS_PC_UPDATED
;
1482 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1486 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1487 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1491 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1492 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1493 t
= tcg_temp_new_i64();
1494 tcg_gen_shri_i64(t
, psw_mask
, 16);
1495 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1496 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1497 tcg_gen_extu_i32_i64(t
, cc_op
);
1498 tcg_gen_shli_i64(t
, t
, 28);
1499 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1500 tcg_temp_free_i64(t
);
1503 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1505 save_link_info(s
, o
);
1507 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1508 per_branch(s
, false);
1509 return DISAS_PC_UPDATED
;
1515 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1517 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1518 return help_goto_direct(s
, s
->base
.pc_next
+ 2 * get_field(s
->fields
, i2
));
1521 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1523 int m1
= get_field(s
->fields
, m1
);
1524 bool is_imm
= have_field(s
->fields
, i2
);
1525 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1528 /* BCR with R2 = 0 causes no branching */
1529 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1531 /* Perform serialization */
1532 /* FIXME: check for fast-BCR-serialization facility */
1533 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1536 /* Perform serialization */
1537 /* FIXME: perform checkpoint-synchronisation */
1538 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1543 disas_jcc(s
, &c
, m1
);
1544 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1547 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1549 int r1
= get_field(s
->fields
, r1
);
1550 bool is_imm
= have_field(s
->fields
, i2
);
1551 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1555 c
.cond
= TCG_COND_NE
;
1560 t
= tcg_temp_new_i64();
1561 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1562 store_reg32_i64(r1
, t
);
1563 c
.u
.s32
.a
= tcg_temp_new_i32();
1564 c
.u
.s32
.b
= tcg_const_i32(0);
1565 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1566 tcg_temp_free_i64(t
);
1568 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1571 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1573 int r1
= get_field(s
->fields
, r1
);
1574 int imm
= get_field(s
->fields
, i2
);
1578 c
.cond
= TCG_COND_NE
;
1583 t
= tcg_temp_new_i64();
1584 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1585 tcg_gen_subi_i64(t
, t
, 1);
1586 store_reg32h_i64(r1
, t
);
1587 c
.u
.s32
.a
= tcg_temp_new_i32();
1588 c
.u
.s32
.b
= tcg_const_i32(0);
1589 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1590 tcg_temp_free_i64(t
);
1592 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1595 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1597 int r1
= get_field(s
->fields
, r1
);
1598 bool is_imm
= have_field(s
->fields
, i2
);
1599 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1602 c
.cond
= TCG_COND_NE
;
1607 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1608 c
.u
.s64
.a
= regs
[r1
];
1609 c
.u
.s64
.b
= tcg_const_i64(0);
1611 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1614 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1616 int r1
= get_field(s
->fields
, r1
);
1617 int r3
= get_field(s
->fields
, r3
);
1618 bool is_imm
= have_field(s
->fields
, i2
);
1619 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1623 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1628 t
= tcg_temp_new_i64();
1629 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1630 c
.u
.s32
.a
= tcg_temp_new_i32();
1631 c
.u
.s32
.b
= tcg_temp_new_i32();
1632 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1633 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1634 store_reg32_i64(r1
, t
);
1635 tcg_temp_free_i64(t
);
1637 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1640 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1642 int r1
= get_field(s
->fields
, r1
);
1643 int r3
= get_field(s
->fields
, r3
);
1644 bool is_imm
= have_field(s
->fields
, i2
);
1645 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1648 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1651 if (r1
== (r3
| 1)) {
1652 c
.u
.s64
.b
= load_reg(r3
| 1);
1655 c
.u
.s64
.b
= regs
[r3
| 1];
1659 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1660 c
.u
.s64
.a
= regs
[r1
];
1663 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1666 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1668 int imm
, m3
= get_field(s
->fields
, m3
);
1672 c
.cond
= ltgt_cond
[m3
];
1673 if (s
->insn
->data
) {
1674 c
.cond
= tcg_unsigned_cond(c
.cond
);
1676 c
.is_64
= c
.g1
= c
.g2
= true;
1680 is_imm
= have_field(s
->fields
, i4
);
1682 imm
= get_field(s
->fields
, i4
);
1685 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1686 get_field(s
->fields
, d4
));
1689 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1692 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1694 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1699 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1701 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1706 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1708 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1713 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1715 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1716 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1717 tcg_temp_free_i32(m3
);
1718 gen_set_cc_nz_f32(s
, o
->in2
);
1722 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1724 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1725 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1726 tcg_temp_free_i32(m3
);
1727 gen_set_cc_nz_f64(s
, o
->in2
);
1731 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1733 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1734 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1735 tcg_temp_free_i32(m3
);
1736 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1740 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1742 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1743 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1744 tcg_temp_free_i32(m3
);
1745 gen_set_cc_nz_f32(s
, o
->in2
);
1749 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1751 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1752 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1753 tcg_temp_free_i32(m3
);
1754 gen_set_cc_nz_f64(s
, o
->in2
);
1758 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1760 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1761 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1762 tcg_temp_free_i32(m3
);
1763 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1767 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1769 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1770 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1771 tcg_temp_free_i32(m3
);
1772 gen_set_cc_nz_f32(s
, o
->in2
);
1776 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1778 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1779 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1780 tcg_temp_free_i32(m3
);
1781 gen_set_cc_nz_f64(s
, o
->in2
);
1785 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1787 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1788 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1789 tcg_temp_free_i32(m3
);
1790 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1794 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1796 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1797 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1798 tcg_temp_free_i32(m3
);
1799 gen_set_cc_nz_f32(s
, o
->in2
);
1803 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1805 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1806 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1807 tcg_temp_free_i32(m3
);
1808 gen_set_cc_nz_f64(s
, o
->in2
);
1812 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1814 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1815 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1816 tcg_temp_free_i32(m3
);
1817 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1821 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1823 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1824 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1825 tcg_temp_free_i32(m3
);
1829 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1831 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1832 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1833 tcg_temp_free_i32(m3
);
1837 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1839 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1840 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1841 tcg_temp_free_i32(m3
);
1842 return_low128(o
->out2
);
1846 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
1848 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1849 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1850 tcg_temp_free_i32(m3
);
1854 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1856 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1857 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1858 tcg_temp_free_i32(m3
);
1862 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1864 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1865 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1866 tcg_temp_free_i32(m3
);
1867 return_low128(o
->out2
);
1871 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
1873 int r2
= get_field(s
->fields
, r2
);
1874 TCGv_i64 len
= tcg_temp_new_i64();
1876 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1878 return_low128(o
->out
);
1880 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1881 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1882 tcg_temp_free_i64(len
);
1887 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
1889 int l
= get_field(s
->fields
, l1
);
1894 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1895 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1898 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1899 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1902 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1903 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1906 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1907 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1910 vl
= tcg_const_i32(l
);
1911 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1912 tcg_temp_free_i32(vl
);
1916 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1920 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
1922 int r1
= get_field(s
->fields
, r1
);
1923 int r2
= get_field(s
->fields
, r2
);
1926 /* r1 and r2 must be even. */
1927 if (r1
& 1 || r2
& 1) {
1928 gen_program_exception(s
, PGM_SPECIFICATION
);
1929 return DISAS_NORETURN
;
1932 t1
= tcg_const_i32(r1
);
1933 t2
= tcg_const_i32(r2
);
1934 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1935 tcg_temp_free_i32(t1
);
1936 tcg_temp_free_i32(t2
);
1941 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
1943 int r1
= get_field(s
->fields
, r1
);
1944 int r3
= get_field(s
->fields
, r3
);
1947 /* r1 and r3 must be even. */
1948 if (r1
& 1 || r3
& 1) {
1949 gen_program_exception(s
, PGM_SPECIFICATION
);
1950 return DISAS_NORETURN
;
1953 t1
= tcg_const_i32(r1
);
1954 t3
= tcg_const_i32(r3
);
1955 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1956 tcg_temp_free_i32(t1
);
1957 tcg_temp_free_i32(t3
);
1962 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
1964 int r1
= get_field(s
->fields
, r1
);
1965 int r3
= get_field(s
->fields
, r3
);
1968 /* r1 and r3 must be even. */
1969 if (r1
& 1 || r3
& 1) {
1970 gen_program_exception(s
, PGM_SPECIFICATION
);
1971 return DISAS_NORETURN
;
1974 t1
= tcg_const_i32(r1
);
1975 t3
= tcg_const_i32(r3
);
1976 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1977 tcg_temp_free_i32(t1
);
1978 tcg_temp_free_i32(t3
);
1983 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
1985 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1986 TCGv_i32 t1
= tcg_temp_new_i32();
1987 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1988 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1990 tcg_temp_free_i32(t1
);
1991 tcg_temp_free_i32(m3
);
1995 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
1997 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1999 return_low128(o
->in2
);
2003 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
2005 TCGv_i64 t
= tcg_temp_new_i64();
2006 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
2007 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
2008 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2009 tcg_temp_free_i64(t
);
2013 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2015 int d2
= get_field(s
->fields
, d2
);
2016 int b2
= get_field(s
->fields
, b2
);
2019 /* Note that in1 = R3 (new value) and
2020 in2 = (zero-extended) R1 (expected value). */
2022 addr
= get_address(s
, 0, b2
, d2
);
2023 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2024 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2025 tcg_temp_free_i64(addr
);
2027 /* Are the memory and expected values (un)equal? Note that this setcond
2028 produces the output CC value, thus the NE sense of the test. */
2029 cc
= tcg_temp_new_i64();
2030 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2031 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2032 tcg_temp_free_i64(cc
);
2038 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2040 int r1
= get_field(s
->fields
, r1
);
2041 int r3
= get_field(s
->fields
, r3
);
2042 int d2
= get_field(s
->fields
, d2
);
2043 int b2
= get_field(s
->fields
, b2
);
2044 DisasJumpType ret
= DISAS_NEXT
;
2046 TCGv_i32 t_r1
, t_r3
;
2048 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2049 addr
= get_address(s
, 0, b2
, d2
);
2050 t_r1
= tcg_const_i32(r1
);
2051 t_r3
= tcg_const_i32(r3
);
2052 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
2053 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2054 } else if (HAVE_CMPXCHG128
) {
2055 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2057 gen_helper_exit_atomic(cpu_env
);
2058 ret
= DISAS_NORETURN
;
2060 tcg_temp_free_i64(addr
);
2061 tcg_temp_free_i32(t_r1
);
2062 tcg_temp_free_i32(t_r3
);
2068 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2070 int r3
= get_field(s
->fields
, r3
);
2071 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2073 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2074 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2076 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2078 tcg_temp_free_i32(t_r3
);
2084 #ifndef CONFIG_USER_ONLY
2085 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2087 TCGMemOp mop
= s
->insn
->data
;
2088 TCGv_i64 addr
, old
, cc
;
2089 TCGLabel
*lab
= gen_new_label();
2091 /* Note that in1 = R1 (zero-extended expected value),
2092 out = R1 (original reg), out2 = R1+1 (new value). */
2094 addr
= tcg_temp_new_i64();
2095 old
= tcg_temp_new_i64();
2096 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2097 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2098 get_mem_index(s
), mop
| MO_ALIGN
);
2099 tcg_temp_free_i64(addr
);
2101 /* Are the memory and expected values (un)equal? */
2102 cc
= tcg_temp_new_i64();
2103 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2104 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2106 /* Write back the output now, so that it happens before the
2107 following branch, so that we don't need local temps. */
2108 if ((mop
& MO_SIZE
) == MO_32
) {
2109 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2111 tcg_gen_mov_i64(o
->out
, old
);
2113 tcg_temp_free_i64(old
);
2115 /* If the comparison was equal, and the LSB of R2 was set,
2116 then we need to flush the TLB (for all cpus). */
2117 tcg_gen_xori_i64(cc
, cc
, 1);
2118 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2119 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2120 tcg_temp_free_i64(cc
);
2122 gen_helper_purge(cpu_env
);
2129 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2131 TCGv_i64 t1
= tcg_temp_new_i64();
2132 TCGv_i32 t2
= tcg_temp_new_i32();
2133 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2134 gen_helper_cvd(t1
, t2
);
2135 tcg_temp_free_i32(t2
);
2136 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2137 tcg_temp_free_i64(t1
);
2141 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2143 int m3
= get_field(s
->fields
, m3
);
2144 TCGLabel
*lab
= gen_new_label();
2147 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2148 if (s
->insn
->data
) {
2149 c
= tcg_unsigned_cond(c
);
2151 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2160 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2162 int m3
= get_field(s
->fields
, m3
);
2163 int r1
= get_field(s
->fields
, r1
);
2164 int r2
= get_field(s
->fields
, r2
);
2165 TCGv_i32 tr1
, tr2
, chk
;
2167 /* R1 and R2 must both be even. */
2168 if ((r1
| r2
) & 1) {
2169 gen_program_exception(s
, PGM_SPECIFICATION
);
2170 return DISAS_NORETURN
;
2172 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2176 tr1
= tcg_const_i32(r1
);
2177 tr2
= tcg_const_i32(r2
);
2178 chk
= tcg_const_i32(m3
);
2180 switch (s
->insn
->data
) {
2182 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2185 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2188 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2191 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2194 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2197 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2200 g_assert_not_reached();
2203 tcg_temp_free_i32(tr1
);
2204 tcg_temp_free_i32(tr2
);
2205 tcg_temp_free_i32(chk
);
2210 #ifndef CONFIG_USER_ONLY
2211 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2213 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2214 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2215 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2217 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2219 tcg_temp_free_i32(func_code
);
2220 tcg_temp_free_i32(r3
);
2221 tcg_temp_free_i32(r1
);
2226 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2228 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2229 return_low128(o
->out
);
2233 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2235 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2236 return_low128(o
->out
);
2240 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2242 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2243 return_low128(o
->out
);
2247 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2249 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2250 return_low128(o
->out
);
2254 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2256 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2260 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2262 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2266 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2268 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2269 return_low128(o
->out2
);
2273 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2275 int r2
= get_field(s
->fields
, r2
);
2276 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2280 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2282 /* No cache information provided. */
2283 tcg_gen_movi_i64(o
->out
, -1);
2287 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2289 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2293 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2295 int r1
= get_field(s
->fields
, r1
);
2296 int r2
= get_field(s
->fields
, r2
);
2297 TCGv_i64 t
= tcg_temp_new_i64();
2299 /* Note the "subsequently" in the PoO, which implies a defined result
2300 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2301 tcg_gen_shri_i64(t
, psw_mask
, 32);
2302 store_reg32_i64(r1
, t
);
2304 store_reg32_i64(r2
, psw_mask
);
2307 tcg_temp_free_i64(t
);
2311 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2313 int r1
= get_field(s
->fields
, r1
);
2317 /* Nested EXECUTE is not allowed. */
2318 if (unlikely(s
->ex_value
)) {
2319 gen_program_exception(s
, PGM_EXECUTE
);
2320 return DISAS_NORETURN
;
2327 v1
= tcg_const_i64(0);
2332 ilen
= tcg_const_i32(s
->ilen
);
2333 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2334 tcg_temp_free_i32(ilen
);
2337 tcg_temp_free_i64(v1
);
2340 return DISAS_PC_CC_UPDATED
;
2343 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2345 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2346 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2347 tcg_temp_free_i32(m3
);
2351 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2353 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2354 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2355 tcg_temp_free_i32(m3
);
2359 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2361 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2362 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2363 return_low128(o
->out2
);
2364 tcg_temp_free_i32(m3
);
2368 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2370 /* We'll use the original input for cc computation, since we get to
2371 compare that against 0, which ought to be better than comparing
2372 the real output against 64. It also lets cc_dst be a convenient
2373 temporary during our computation. */
2374 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2376 /* R1 = IN ? CLZ(IN) : 64. */
2377 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2379 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2380 value by 64, which is undefined. But since the shift is 64 iff the
2381 input is zero, we still get the correct result after and'ing. */
2382 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2383 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2384 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2388 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2390 int m3
= get_field(s
->fields
, m3
);
2391 int pos
, len
, base
= s
->insn
->data
;
2392 TCGv_i64 tmp
= tcg_temp_new_i64();
2397 /* Effectively a 32-bit load. */
2398 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2405 /* Effectively a 16-bit load. */
2406 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2414 /* Effectively an 8-bit load. */
2415 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2420 pos
= base
+ ctz32(m3
) * 8;
2421 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2422 ccm
= ((1ull << len
) - 1) << pos
;
2426 /* This is going to be a sequence of loads and inserts. */
2427 pos
= base
+ 32 - 8;
2431 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2432 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2433 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2436 m3
= (m3
<< 1) & 0xf;
2442 tcg_gen_movi_i64(tmp
, ccm
);
2443 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2444 tcg_temp_free_i64(tmp
);
2448 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2450 int shift
= s
->insn
->data
& 0xff;
2451 int size
= s
->insn
->data
>> 8;
2452 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2456 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2461 t1
= tcg_temp_new_i64();
2462 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2463 t2
= tcg_temp_new_i64();
2464 tcg_gen_extu_i32_i64(t2
, cc_op
);
2465 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2466 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2467 tcg_temp_free_i64(t1
);
2468 tcg_temp_free_i64(t2
);
2472 #ifndef CONFIG_USER_ONLY
2473 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2477 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2478 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2480 m4
= tcg_const_i32(0);
2482 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2483 tcg_temp_free_i32(m4
);
2487 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2491 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2492 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2494 m4
= tcg_const_i32(0);
2496 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2497 tcg_temp_free_i32(m4
);
2501 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2503 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2508 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2510 int r1
= have_field(s
->fields
, r1
) ? get_field(s
->fields
, r1
) : 0;
2511 int r2
= have_field(s
->fields
, r2
) ? get_field(s
->fields
, r2
) : 0;
2512 int r3
= have_field(s
->fields
, r3
) ? get_field(s
->fields
, r3
) : 0;
2513 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2515 switch (s
->insn
->data
) {
2516 case S390_FEAT_TYPE_KMCTR
:
2517 if (r3
& 1 || !r3
) {
2518 gen_program_exception(s
, PGM_SPECIFICATION
);
2519 return DISAS_NORETURN
;
2522 case S390_FEAT_TYPE_PPNO
:
2523 case S390_FEAT_TYPE_KMF
:
2524 case S390_FEAT_TYPE_KMC
:
2525 case S390_FEAT_TYPE_KMO
:
2526 case S390_FEAT_TYPE_KM
:
2527 if (r1
& 1 || !r1
) {
2528 gen_program_exception(s
, PGM_SPECIFICATION
);
2529 return DISAS_NORETURN
;
2532 case S390_FEAT_TYPE_KMAC
:
2533 case S390_FEAT_TYPE_KIMD
:
2534 case S390_FEAT_TYPE_KLMD
:
2535 if (r2
& 1 || !r2
) {
2536 gen_program_exception(s
, PGM_SPECIFICATION
);
2537 return DISAS_NORETURN
;
2540 case S390_FEAT_TYPE_PCKMO
:
2541 case S390_FEAT_TYPE_PCC
:
2544 g_assert_not_reached();
2547 t_r1
= tcg_const_i32(r1
);
2548 t_r2
= tcg_const_i32(r2
);
2549 t_r3
= tcg_const_i32(r3
);
2550 type
= tcg_const_i32(s
->insn
->data
);
2551 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2553 tcg_temp_free_i32(t_r1
);
2554 tcg_temp_free_i32(t_r2
);
2555 tcg_temp_free_i32(t_r3
);
2556 tcg_temp_free_i32(type
);
2560 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2562 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2567 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2569 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2574 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2576 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2581 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2583 /* The real output is indeed the original value in memory;
2584 recompute the addition for the computation of CC. */
2585 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2586 s
->insn
->data
| MO_ALIGN
);
2587 /* However, we need to recompute the addition for setting CC. */
2588 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2592 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2594 /* The real output is indeed the original value in memory;
2595 recompute the addition for the computation of CC. */
2596 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2597 s
->insn
->data
| MO_ALIGN
);
2598 /* However, we need to recompute the operation for setting CC. */
2599 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2603 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2605 /* The real output is indeed the original value in memory;
2606 recompute the addition for the computation of CC. */
2607 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2608 s
->insn
->data
| MO_ALIGN
);
2609 /* However, we need to recompute the operation for setting CC. */
2610 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2614 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2616 /* The real output is indeed the original value in memory;
2617 recompute the addition for the computation of CC. */
2618 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2619 s
->insn
->data
| MO_ALIGN
);
2620 /* However, we need to recompute the operation for setting CC. */
2621 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2625 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2627 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2631 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2633 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2637 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2639 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2643 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2645 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2649 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2651 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2652 return_low128(o
->out2
);
2656 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2658 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2659 return_low128(o
->out2
);
2663 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2665 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2669 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2671 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2675 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2677 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2681 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2683 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2687 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2689 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2693 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2695 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2699 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2701 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2705 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2707 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2711 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2713 TCGLabel
*lab
= gen_new_label();
2714 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2715 /* The value is stored even in case of trap. */
2716 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2722 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2724 TCGLabel
*lab
= gen_new_label();
2725 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2726 /* The value is stored even in case of trap. */
2727 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2733 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2735 TCGLabel
*lab
= gen_new_label();
2736 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2737 /* The value is stored even in case of trap. */
2738 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2744 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2746 TCGLabel
*lab
= gen_new_label();
2747 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2748 /* The value is stored even in case of trap. */
2749 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2755 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2757 TCGLabel
*lab
= gen_new_label();
2758 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2759 /* The value is stored even in case of trap. */
2760 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2766 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
2770 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2773 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2777 TCGv_i32 t32
= tcg_temp_new_i32();
2780 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2783 t
= tcg_temp_new_i64();
2784 tcg_gen_extu_i32_i64(t
, t32
);
2785 tcg_temp_free_i32(t32
);
2787 z
= tcg_const_i64(0);
2788 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2789 tcg_temp_free_i64(t
);
2790 tcg_temp_free_i64(z
);
2796 #ifndef CONFIG_USER_ONLY
2797 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
2799 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2800 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2801 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2802 tcg_temp_free_i32(r1
);
2803 tcg_temp_free_i32(r3
);
2804 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2805 return DISAS_PC_STALE_NOCHAIN
;
2808 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2810 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2811 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2812 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2813 tcg_temp_free_i32(r1
);
2814 tcg_temp_free_i32(r3
);
2815 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2816 return DISAS_PC_STALE_NOCHAIN
;
2819 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
2821 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2826 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
2828 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2832 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2836 per_breaking_event(s
);
2838 t1
= tcg_temp_new_i64();
2839 t2
= tcg_temp_new_i64();
2840 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
2841 MO_TEUL
| MO_ALIGN_8
);
2842 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2843 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2844 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2845 tcg_gen_shli_i64(t1
, t1
, 32);
2846 gen_helper_load_psw(cpu_env
, t1
, t2
);
2847 tcg_temp_free_i64(t1
);
2848 tcg_temp_free_i64(t2
);
2849 return DISAS_NORETURN
;
2852 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2856 per_breaking_event(s
);
2858 t1
= tcg_temp_new_i64();
2859 t2
= tcg_temp_new_i64();
2860 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
2861 MO_TEQ
| MO_ALIGN_8
);
2862 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2863 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2864 gen_helper_load_psw(cpu_env
, t1
, t2
);
2865 tcg_temp_free_i64(t1
);
2866 tcg_temp_free_i64(t2
);
2867 return DISAS_NORETURN
;
2871 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
2873 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2874 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2875 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2876 tcg_temp_free_i32(r1
);
2877 tcg_temp_free_i32(r3
);
2881 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
2883 int r1
= get_field(s
->fields
, r1
);
2884 int r3
= get_field(s
->fields
, r3
);
2887 /* Only one register to read. */
2888 t1
= tcg_temp_new_i64();
2889 if (unlikely(r1
== r3
)) {
2890 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2891 store_reg32_i64(r1
, t1
);
2896 /* First load the values of the first and last registers to trigger
2897 possible page faults. */
2898 t2
= tcg_temp_new_i64();
2899 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2900 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2901 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2902 store_reg32_i64(r1
, t1
);
2903 store_reg32_i64(r3
, t2
);
2905 /* Only two registers to read. */
2906 if (((r1
+ 1) & 15) == r3
) {
2912 /* Then load the remaining registers. Page fault can't occur. */
2914 tcg_gen_movi_i64(t2
, 4);
2917 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2918 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2919 store_reg32_i64(r1
, t1
);
2927 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
2929 int r1
= get_field(s
->fields
, r1
);
2930 int r3
= get_field(s
->fields
, r3
);
2933 /* Only one register to read. */
2934 t1
= tcg_temp_new_i64();
2935 if (unlikely(r1
== r3
)) {
2936 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2937 store_reg32h_i64(r1
, t1
);
2942 /* First load the values of the first and last registers to trigger
2943 possible page faults. */
2944 t2
= tcg_temp_new_i64();
2945 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2946 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2947 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2948 store_reg32h_i64(r1
, t1
);
2949 store_reg32h_i64(r3
, t2
);
2951 /* Only two registers to read. */
2952 if (((r1
+ 1) & 15) == r3
) {
2958 /* Then load the remaining registers. Page fault can't occur. */
2960 tcg_gen_movi_i64(t2
, 4);
2963 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2964 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2965 store_reg32h_i64(r1
, t1
);
2973 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
2975 int r1
= get_field(s
->fields
, r1
);
2976 int r3
= get_field(s
->fields
, r3
);
2979 /* Only one register to read. */
2980 if (unlikely(r1
== r3
)) {
2981 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2985 /* First load the values of the first and last registers to trigger
2986 possible page faults. */
2987 t1
= tcg_temp_new_i64();
2988 t2
= tcg_temp_new_i64();
2989 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2990 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2991 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2992 tcg_gen_mov_i64(regs
[r1
], t1
);
2995 /* Only two registers to read. */
2996 if (((r1
+ 1) & 15) == r3
) {
3001 /* Then load the remaining registers. Page fault can't occur. */
3003 tcg_gen_movi_i64(t1
, 8);
3006 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
3007 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3014 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3017 TCGMemOp mop
= s
->insn
->data
;
3019 /* In a parallel context, stop the world and single step. */
3020 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3023 gen_exception(EXCP_ATOMIC
);
3024 return DISAS_NORETURN
;
3027 /* In a serial context, perform the two loads ... */
3028 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
3029 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3030 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3031 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3032 tcg_temp_free_i64(a1
);
3033 tcg_temp_free_i64(a2
);
3035 /* ... and indicate that we performed them while interlocked. */
3036 gen_op_movi_cc(s
, 0);
3040 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3042 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3043 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3044 } else if (HAVE_ATOMIC128
) {
3045 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3047 gen_helper_exit_atomic(cpu_env
);
3048 return DISAS_NORETURN
;
3050 return_low128(o
->out2
);
3054 #ifndef CONFIG_USER_ONLY
3055 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3057 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
3061 static DisasJumpType
op_lurag(DisasContext
*s
, DisasOps
*o
)
3063 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
3068 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3070 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3074 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3077 o
->g_out
= o
->g_in2
;
3083 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3085 int b2
= get_field(s
->fields
, b2
);
3086 TCGv ar1
= tcg_temp_new_i64();
3089 o
->g_out
= o
->g_in2
;
3093 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3094 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3095 tcg_gen_movi_i64(ar1
, 0);
3097 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3098 tcg_gen_movi_i64(ar1
, 1);
3100 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3102 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3104 tcg_gen_movi_i64(ar1
, 0);
3107 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3108 tcg_gen_movi_i64(ar1
, 2);
3112 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3113 tcg_temp_free_i64(ar1
);
3118 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3122 o
->g_out
= o
->g_in1
;
3123 o
->g_out2
= o
->g_in2
;
3126 o
->g_in1
= o
->g_in2
= false;
3130 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3132 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3133 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3134 tcg_temp_free_i32(l
);
3138 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3140 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3141 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3142 tcg_temp_free_i32(l
);
3146 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3148 int r1
= get_field(s
->fields
, r1
);
3149 int r2
= get_field(s
->fields
, r2
);
3152 /* r1 and r2 must be even. */
3153 if (r1
& 1 || r2
& 1) {
3154 gen_program_exception(s
, PGM_SPECIFICATION
);
3155 return DISAS_NORETURN
;
3158 t1
= tcg_const_i32(r1
);
3159 t2
= tcg_const_i32(r2
);
3160 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3161 tcg_temp_free_i32(t1
);
3162 tcg_temp_free_i32(t2
);
3167 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3169 int r1
= get_field(s
->fields
, r1
);
3170 int r3
= get_field(s
->fields
, r3
);
3173 /* r1 and r3 must be even. */
3174 if (r1
& 1 || r3
& 1) {
3175 gen_program_exception(s
, PGM_SPECIFICATION
);
3176 return DISAS_NORETURN
;
3179 t1
= tcg_const_i32(r1
);
3180 t3
= tcg_const_i32(r3
);
3181 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3182 tcg_temp_free_i32(t1
);
3183 tcg_temp_free_i32(t3
);
3188 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3190 int r1
= get_field(s
->fields
, r1
);
3191 int r3
= get_field(s
->fields
, r3
);
3194 /* r1 and r3 must be even. */
3195 if (r1
& 1 || r3
& 1) {
3196 gen_program_exception(s
, PGM_SPECIFICATION
);
3197 return DISAS_NORETURN
;
3200 t1
= tcg_const_i32(r1
);
3201 t3
= tcg_const_i32(r3
);
3202 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3203 tcg_temp_free_i32(t1
);
3204 tcg_temp_free_i32(t3
);
3209 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3211 int r3
= get_field(s
->fields
, r3
);
3212 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3217 #ifndef CONFIG_USER_ONLY
3218 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3220 int r1
= get_field(s
->fields
, l1
);
3221 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3226 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3228 int r1
= get_field(s
->fields
, l1
);
3229 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3235 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3237 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3238 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3239 tcg_temp_free_i32(l
);
3243 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3245 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3246 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3247 tcg_temp_free_i32(l
);
3251 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3253 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3258 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3260 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3262 return_low128(o
->in2
);
3266 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3268 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3269 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3270 tcg_temp_free_i32(l
);
3274 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3276 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3280 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3282 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3286 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3288 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3292 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3294 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3298 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3300 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3304 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3306 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3307 return_low128(o
->out2
);
3311 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3313 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3314 return_low128(o
->out2
);
3318 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3320 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3321 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3322 tcg_temp_free_i64(r3
);
3326 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3328 int r3
= get_field(s
->fields
, r3
);
3329 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3333 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3335 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3336 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3337 tcg_temp_free_i64(r3
);
3341 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3343 int r3
= get_field(s
->fields
, r3
);
3344 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3348 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3351 z
= tcg_const_i64(0);
3352 n
= tcg_temp_new_i64();
3353 tcg_gen_neg_i64(n
, o
->in2
);
3354 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3355 tcg_temp_free_i64(n
);
3356 tcg_temp_free_i64(z
);
3360 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3362 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3366 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3368 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3372 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3374 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3375 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3379 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3381 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3382 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3383 tcg_temp_free_i32(l
);
3388 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3390 tcg_gen_neg_i64(o
->out
, o
->in2
);
3394 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3396 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3400 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3402 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3406 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3408 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3409 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3413 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3415 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3416 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3417 tcg_temp_free_i32(l
);
3422 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3424 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3428 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3430 int shift
= s
->insn
->data
& 0xff;
3431 int size
= s
->insn
->data
>> 8;
3432 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3435 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3436 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3438 /* Produce the CC from only the bits manipulated. */
3439 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3440 set_cc_nz_u64(s
, cc_dst
);
3444 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3446 o
->in1
= tcg_temp_new_i64();
3448 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3449 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3451 /* Perform the atomic operation in memory. */
3452 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3456 /* Recompute also for atomic case: needed for setting CC. */
3457 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3459 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3460 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3465 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3467 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3468 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3469 tcg_temp_free_i32(l
);
3473 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3475 int l2
= get_field(s
->fields
, l2
) + 1;
3478 /* The length must not exceed 32 bytes. */
3480 gen_program_exception(s
, PGM_SPECIFICATION
);
3481 return DISAS_NORETURN
;
3483 l
= tcg_const_i32(l2
);
3484 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3485 tcg_temp_free_i32(l
);
3489 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3491 int l2
= get_field(s
->fields
, l2
) + 1;
3494 /* The length must be even and should not exceed 64 bytes. */
3495 if ((l2
& 1) || (l2
> 64)) {
3496 gen_program_exception(s
, PGM_SPECIFICATION
);
3497 return DISAS_NORETURN
;
3499 l
= tcg_const_i32(l2
);
3500 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3501 tcg_temp_free_i32(l
);
3505 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3507 gen_helper_popcnt(o
->out
, o
->in2
);
3511 #ifndef CONFIG_USER_ONLY
3512 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3514 gen_helper_ptlb(cpu_env
);
3519 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3521 int i3
= get_field(s
->fields
, i3
);
3522 int i4
= get_field(s
->fields
, i4
);
3523 int i5
= get_field(s
->fields
, i5
);
3524 int do_zero
= i4
& 0x80;
3525 uint64_t mask
, imask
, pmask
;
3528 /* Adjust the arguments for the specific insn. */
3529 switch (s
->fields
->op2
) {
3530 case 0x55: /* risbg */
3531 case 0x59: /* risbgn */
3536 case 0x5d: /* risbhg */
3539 pmask
= 0xffffffff00000000ull
;
3541 case 0x51: /* risblg */
3544 pmask
= 0x00000000ffffffffull
;
3547 g_assert_not_reached();
3550 /* MASK is the set of bits to be inserted from R2.
3551 Take care for I3/I4 wraparound. */
3554 mask
^= pmask
>> i4
>> 1;
3556 mask
|= ~(pmask
>> i4
>> 1);
3560 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3561 insns, we need to keep the other half of the register. */
3562 imask
= ~mask
| ~pmask
;
3570 if (s
->fields
->op2
== 0x5d) {
3574 /* In some cases we can implement this with extract. */
3575 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3576 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3580 /* In some cases we can implement this with deposit. */
3581 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3582 /* Note that we rotate the bits to be inserted to the lsb, not to
3583 the position as described in the PoO. */
3584 rot
= (rot
- pos
) & 63;
3589 /* Rotate the input as necessary. */
3590 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3592 /* Insert the selected bits into the output. */
3595 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3597 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3599 } else if (imask
== 0) {
3600 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3602 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3603 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3604 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3609 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3611 int i3
= get_field(s
->fields
, i3
);
3612 int i4
= get_field(s
->fields
, i4
);
3613 int i5
= get_field(s
->fields
, i5
);
3616 /* If this is a test-only form, arrange to discard the result. */
3618 o
->out
= tcg_temp_new_i64();
3626 /* MASK is the set of bits to be operated on from R2.
3627 Take care for I3/I4 wraparound. */
3630 mask
^= ~0ull >> i4
>> 1;
3632 mask
|= ~(~0ull >> i4
>> 1);
3635 /* Rotate the input as necessary. */
3636 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3639 switch (s
->fields
->op2
) {
3640 case 0x55: /* AND */
3641 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3642 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3645 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3646 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3648 case 0x57: /* XOR */
3649 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3650 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3657 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3658 set_cc_nz_u64(s
, cc_dst
);
3662 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3664 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3668 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3670 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3674 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3676 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3680 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3682 TCGv_i32 t1
= tcg_temp_new_i32();
3683 TCGv_i32 t2
= tcg_temp_new_i32();
3684 TCGv_i32 to
= tcg_temp_new_i32();
3685 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3686 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3687 tcg_gen_rotl_i32(to
, t1
, t2
);
3688 tcg_gen_extu_i32_i64(o
->out
, to
);
3689 tcg_temp_free_i32(t1
);
3690 tcg_temp_free_i32(t2
);
3691 tcg_temp_free_i32(to
);
3695 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3697 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3701 #ifndef CONFIG_USER_ONLY
3702 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3704 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3709 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3711 gen_helper_sacf(cpu_env
, o
->in2
);
3712 /* Addressing mode has changed, so end the block. */
3713 return DISAS_PC_STALE
;
3717 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3719 int sam
= s
->insn
->data
;
3735 /* Bizarre but true, we check the address of the current insn for the
3736 specification exception, not the next to be executed. Thus the PoO
3737 documents that Bad Things Happen two bytes before the end. */
3738 if (s
->base
.pc_next
& ~mask
) {
3739 gen_program_exception(s
, PGM_SPECIFICATION
);
3740 return DISAS_NORETURN
;
3744 tsam
= tcg_const_i64(sam
);
3745 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3746 tcg_temp_free_i64(tsam
);
3748 /* Always exit the TB, since we (may have) changed execution mode. */
3749 return DISAS_PC_STALE
;
3752 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
3754 int r1
= get_field(s
->fields
, r1
);
3755 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3759 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
3761 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3765 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
3767 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3771 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
3773 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3774 return_low128(o
->out2
);
3778 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3780 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3784 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3786 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3790 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3792 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3793 return_low128(o
->out2
);
3797 #ifndef CONFIG_USER_ONLY
3798 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
3800 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3805 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
3807 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3808 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3809 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
3811 tcg_temp_free_i32(r1
);
3812 tcg_temp_free_i32(r3
);
3817 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
3824 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3826 /* We want to store when the condition is fulfilled, so branch
3827 out when it's not */
3828 c
.cond
= tcg_invert_cond(c
.cond
);
3830 lab
= gen_new_label();
3832 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3834 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3838 r1
= get_field(s
->fields
, r1
);
3839 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3840 switch (s
->insn
->data
) {
3842 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3845 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3847 case 2: /* STOCFH */
3848 h
= tcg_temp_new_i64();
3849 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3850 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3851 tcg_temp_free_i64(h
);
3854 g_assert_not_reached();
3856 tcg_temp_free_i64(a
);
3862 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
3864 uint64_t sign
= 1ull << s
->insn
->data
;
3865 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3866 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3867 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3868 /* The arithmetic left shift is curious in that it does not affect
3869 the sign bit. Copy that over from the source unchanged. */
3870 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3871 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3872 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3876 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
3878 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3882 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
3884 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3888 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
3890 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3894 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3896 gen_helper_sfpc(cpu_env
, o
->in2
);
3900 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
3902 gen_helper_sfas(cpu_env
, o
->in2
);
3906 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
3908 int b2
= get_field(s
->fields
, b2
);
3909 int d2
= get_field(s
->fields
, d2
);
3910 TCGv_i64 t1
= tcg_temp_new_i64();
3911 TCGv_i64 t2
= tcg_temp_new_i64();
3914 switch (s
->fields
->op2
) {
3915 case 0x99: /* SRNM */
3918 case 0xb8: /* SRNMB */
3921 case 0xb9: /* SRNMT */
3927 mask
= (1 << len
) - 1;
3929 /* Insert the value into the appropriate field of the FPC. */
3931 tcg_gen_movi_i64(t1
, d2
& mask
);
3933 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3934 tcg_gen_andi_i64(t1
, t1
, mask
);
3936 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3937 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3938 tcg_temp_free_i64(t1
);
3940 /* Then install the new FPC to set the rounding mode in fpu_status. */
3941 gen_helper_sfpc(cpu_env
, t2
);
3942 tcg_temp_free_i64(t2
);
3946 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
3948 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
3949 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
3952 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
3953 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
3957 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
3959 int b1
= get_field(s
->fields
, b1
);
3960 int d1
= get_field(s
->fields
, d1
);
3961 int b2
= get_field(s
->fields
, b2
);
3962 int d2
= get_field(s
->fields
, d2
);
3963 int r3
= get_field(s
->fields
, r3
);
3964 TCGv_i64 tmp
= tcg_temp_new_i64();
3966 /* fetch all operands first */
3967 o
->in1
= tcg_temp_new_i64();
3968 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
3969 o
->in2
= tcg_temp_new_i64();
3970 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
3971 o
->addr1
= get_address(s
, 0, r3
, 0);
3973 /* load the third operand into r3 before modifying anything */
3974 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
3976 /* subtract CPU timer from first operand and store in GR0 */
3977 gen_helper_stpt(tmp
, cpu_env
);
3978 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
3980 /* store second operand in GR1 */
3981 tcg_gen_mov_i64(regs
[1], o
->in2
);
3983 tcg_temp_free_i64(tmp
);
3987 #ifndef CONFIG_USER_ONLY
3988 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
3990 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3991 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3995 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
3997 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
4001 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
4003 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
4004 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4005 return DISAS_PC_STALE_NOCHAIN
;
4008 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
4010 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
4014 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4016 gen_helper_stck(o
->out
, cpu_env
);
4017 /* ??? We don't implement clock states. */
4018 gen_op_movi_cc(s
, 0);
4022 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4024 TCGv_i64 c1
= tcg_temp_new_i64();
4025 TCGv_i64 c2
= tcg_temp_new_i64();
4026 TCGv_i64 todpr
= tcg_temp_new_i64();
4027 gen_helper_stck(c1
, cpu_env
);
4028 /* 16 bit value store in an uint32_t (only valid bits set) */
4029 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4030 /* Shift the 64-bit value into its place as a zero-extended
4031 104-bit value. Note that "bit positions 64-103 are always
4032 non-zero so that they compare differently to STCK"; we set
4033 the least significant bit to 1. */
4034 tcg_gen_shli_i64(c2
, c1
, 56);
4035 tcg_gen_shri_i64(c1
, c1
, 8);
4036 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4037 tcg_gen_or_i64(c2
, c2
, todpr
);
4038 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4039 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4040 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4041 tcg_temp_free_i64(c1
);
4042 tcg_temp_free_i64(c2
);
4043 tcg_temp_free_i64(todpr
);
4044 /* ??? We don't implement clock states. */
4045 gen_op_movi_cc(s
, 0);
4049 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4051 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4052 gen_helper_sck(cc_op
, cpu_env
, o
->in1
);
4057 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4059 gen_helper_sckc(cpu_env
, o
->in2
);
4063 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4065 gen_helper_sckpf(cpu_env
, regs
[0]);
4069 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4071 gen_helper_stckc(o
->out
, cpu_env
);
4075 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4077 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4078 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4079 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4080 tcg_temp_free_i32(r1
);
4081 tcg_temp_free_i32(r3
);
4085 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4087 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4088 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4089 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4090 tcg_temp_free_i32(r1
);
4091 tcg_temp_free_i32(r3
);
4095 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4097 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4101 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4103 gen_helper_spt(cpu_env
, o
->in2
);
4107 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4109 gen_helper_stfl(cpu_env
);
4113 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4115 gen_helper_stpt(o
->out
, cpu_env
);
4119 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4121 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4126 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4128 gen_helper_spx(cpu_env
, o
->in2
);
4132 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4134 gen_helper_xsch(cpu_env
, regs
[1]);
4139 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4141 gen_helper_csch(cpu_env
, regs
[1]);
4146 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4148 gen_helper_hsch(cpu_env
, regs
[1]);
4153 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4155 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4160 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4162 gen_helper_rchp(cpu_env
, regs
[1]);
4167 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4169 gen_helper_rsch(cpu_env
, regs
[1]);
4174 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4176 gen_helper_sal(cpu_env
, regs
[1]);
4180 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4182 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4186 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4188 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4189 gen_op_movi_cc(s
, 3);
4193 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4195 /* The instruction is suppressed if not provided. */
4199 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4201 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4206 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4208 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4213 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4215 gen_helper_stcrw(cpu_env
, o
->in2
);
4220 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4222 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4227 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4229 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4234 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4236 gen_helper_chsc(cpu_env
, o
->in2
);
4241 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4243 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4244 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4248 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4250 uint64_t i2
= get_field(s
->fields
, i2
);
4253 /* It is important to do what the instruction name says: STORE THEN.
4254 If we let the output hook perform the store then if we fault and
4255 restart, we'll have the wrong SYSTEM MASK in place. */
4256 t
= tcg_temp_new_i64();
4257 tcg_gen_shri_i64(t
, psw_mask
, 56);
4258 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4259 tcg_temp_free_i64(t
);
4261 if (s
->fields
->op
== 0xac) {
4262 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4263 (i2
<< 56) | 0x00ffffffffffffffull
);
4265 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4268 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4269 return DISAS_PC_STALE_NOCHAIN
;
4272 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4274 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4278 static DisasJumpType
op_sturg(DisasContext
*s
, DisasOps
*o
)
4280 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4285 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4287 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4292 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4294 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4298 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4300 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4304 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4306 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4310 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4312 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4316 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4318 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4319 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4320 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4321 tcg_temp_free_i32(r1
);
4322 tcg_temp_free_i32(r3
);
4326 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4328 int m3
= get_field(s
->fields
, m3
);
4329 int pos
, base
= s
->insn
->data
;
4330 TCGv_i64 tmp
= tcg_temp_new_i64();
4332 pos
= base
+ ctz32(m3
) * 8;
4335 /* Effectively a 32-bit store. */
4336 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4337 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4343 /* Effectively a 16-bit store. */
4344 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4345 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4352 /* Effectively an 8-bit store. */
4353 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4354 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4358 /* This is going to be a sequence of shifts and stores. */
4359 pos
= base
+ 32 - 8;
4362 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4363 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4364 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4366 m3
= (m3
<< 1) & 0xf;
4371 tcg_temp_free_i64(tmp
);
4375 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4377 int r1
= get_field(s
->fields
, r1
);
4378 int r3
= get_field(s
->fields
, r3
);
4379 int size
= s
->insn
->data
;
4380 TCGv_i64 tsize
= tcg_const_i64(size
);
4384 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4386 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4391 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4395 tcg_temp_free_i64(tsize
);
4399 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4401 int r1
= get_field(s
->fields
, r1
);
4402 int r3
= get_field(s
->fields
, r3
);
4403 TCGv_i64 t
= tcg_temp_new_i64();
4404 TCGv_i64 t4
= tcg_const_i64(4);
4405 TCGv_i64 t32
= tcg_const_i64(32);
4408 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4409 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4413 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4417 tcg_temp_free_i64(t
);
4418 tcg_temp_free_i64(t4
);
4419 tcg_temp_free_i64(t32
);
4423 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4425 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4426 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4427 } else if (HAVE_ATOMIC128
) {
4428 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4430 gen_helper_exit_atomic(cpu_env
);
4431 return DISAS_NORETURN
;
4436 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4438 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4439 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4441 gen_helper_srst(cpu_env
, r1
, r2
);
4443 tcg_temp_free_i32(r1
);
4444 tcg_temp_free_i32(r2
);
4449 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4451 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4452 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4454 gen_helper_srstu(cpu_env
, r1
, r2
);
4456 tcg_temp_free_i32(r1
);
4457 tcg_temp_free_i32(r2
);
4462 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4464 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4468 static DisasJumpType
op_subb(DisasContext
*s
, DisasOps
*o
)
4473 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4475 /* The !borrow flag is the msb of CC. Since we want the inverse of
4476 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4477 disas_jcc(s
, &cmp
, 8 | 4);
4478 borrow
= tcg_temp_new_i64();
4480 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4482 TCGv_i32 t
= tcg_temp_new_i32();
4483 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4484 tcg_gen_extu_i32_i64(borrow
, t
);
4485 tcg_temp_free_i32(t
);
4489 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4490 tcg_temp_free_i64(borrow
);
4494 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4501 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4502 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4503 tcg_temp_free_i32(t
);
4505 t
= tcg_const_i32(s
->ilen
);
4506 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4507 tcg_temp_free_i32(t
);
4509 gen_exception(EXCP_SVC
);
4510 return DISAS_NORETURN
;
4513 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4517 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4518 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4519 gen_op_movi_cc(s
, cc
);
4523 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4525 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4530 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4532 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4537 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4539 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4544 #ifndef CONFIG_USER_ONLY
4546 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4548 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4553 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4555 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4562 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4564 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4565 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4566 tcg_temp_free_i32(l1
);
4571 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4573 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4574 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4575 tcg_temp_free_i32(l
);
4580 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4582 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4583 return_low128(o
->out2
);
4588 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4590 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4591 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4592 tcg_temp_free_i32(l
);
4597 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4599 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4600 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4601 tcg_temp_free_i32(l
);
4606 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4608 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4609 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4610 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4611 TCGv_i32 tst
= tcg_temp_new_i32();
4612 int m3
= get_field(s
->fields
, m3
);
4614 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4618 tcg_gen_movi_i32(tst
, -1);
4620 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4621 if (s
->insn
->opc
& 3) {
4622 tcg_gen_ext8u_i32(tst
, tst
);
4624 tcg_gen_ext16u_i32(tst
, tst
);
4627 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4629 tcg_temp_free_i32(r1
);
4630 tcg_temp_free_i32(r2
);
4631 tcg_temp_free_i32(sizes
);
4632 tcg_temp_free_i32(tst
);
4637 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4639 TCGv_i32 t1
= tcg_const_i32(0xff);
4640 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4641 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4642 tcg_temp_free_i32(t1
);
4647 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4649 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4650 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4651 tcg_temp_free_i32(l
);
4655 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4657 int l1
= get_field(s
->fields
, l1
) + 1;
4660 /* The length must not exceed 32 bytes. */
4662 gen_program_exception(s
, PGM_SPECIFICATION
);
4663 return DISAS_NORETURN
;
4665 l
= tcg_const_i32(l1
);
4666 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4667 tcg_temp_free_i32(l
);
4672 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4674 int l1
= get_field(s
->fields
, l1
) + 1;
4677 /* The length must be even and should not exceed 64 bytes. */
4678 if ((l1
& 1) || (l1
> 64)) {
4679 gen_program_exception(s
, PGM_SPECIFICATION
);
4680 return DISAS_NORETURN
;
4682 l
= tcg_const_i32(l1
);
4683 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4684 tcg_temp_free_i32(l
);
4690 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4692 int d1
= get_field(s
->fields
, d1
);
4693 int d2
= get_field(s
->fields
, d2
);
4694 int b1
= get_field(s
->fields
, b1
);
4695 int b2
= get_field(s
->fields
, b2
);
4696 int l
= get_field(s
->fields
, l1
);
4699 o
->addr1
= get_address(s
, 0, b1
, d1
);
4701 /* If the addresses are identical, this is a store/memset of zero. */
4702 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4703 o
->in2
= tcg_const_i64(0);
4707 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4710 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4714 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4717 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4721 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4724 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4728 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4730 gen_op_movi_cc(s
, 0);
4734 /* But in general we'll defer to a helper. */
4735 o
->in2
= get_address(s
, 0, b2
, d2
);
4736 t32
= tcg_const_i32(l
);
4737 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4738 tcg_temp_free_i32(t32
);
4743 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
4745 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4749 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
4751 int shift
= s
->insn
->data
& 0xff;
4752 int size
= s
->insn
->data
>> 8;
4753 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4756 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4757 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4759 /* Produce the CC from only the bits manipulated. */
4760 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4761 set_cc_nz_u64(s
, cc_dst
);
4765 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
4767 o
->in1
= tcg_temp_new_i64();
4769 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4770 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4772 /* Perform the atomic operation in memory. */
4773 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
4777 /* Recompute also for atomic case: needed for setting CC. */
4778 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4780 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4781 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4786 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
4788 o
->out
= tcg_const_i64(0);
4792 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
4794 o
->out
= tcg_const_i64(0);
4800 #ifndef CONFIG_USER_ONLY
4801 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
4803 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4805 gen_helper_clp(cpu_env
, r2
);
4806 tcg_temp_free_i32(r2
);
4811 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
4813 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4814 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4816 gen_helper_pcilg(cpu_env
, r1
, r2
);
4817 tcg_temp_free_i32(r1
);
4818 tcg_temp_free_i32(r2
);
4823 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
4825 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4826 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4828 gen_helper_pcistg(cpu_env
, r1
, r2
);
4829 tcg_temp_free_i32(r1
);
4830 tcg_temp_free_i32(r2
);
4835 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
4837 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4838 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4840 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4841 tcg_temp_free_i32(ar
);
4842 tcg_temp_free_i32(r1
);
4847 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
4849 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
4853 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
4855 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4856 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4858 gen_helper_rpcit(cpu_env
, r1
, r2
);
4859 tcg_temp_free_i32(r1
);
4860 tcg_temp_free_i32(r2
);
4865 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
4867 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4868 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4869 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4871 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
4872 tcg_temp_free_i32(ar
);
4873 tcg_temp_free_i32(r1
);
4874 tcg_temp_free_i32(r3
);
4879 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
4881 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4882 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4884 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4885 tcg_temp_free_i32(ar
);
4886 tcg_temp_free_i32(r1
);
4892 /* ====================================================================== */
4893 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4894 the original inputs), update the various cc data structures in order to
4895 be able to compute the new condition code. */
4897 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4899 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4902 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4904 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4907 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4909 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4912 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4914 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4917 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4919 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4922 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4924 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4927 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4929 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4932 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4934 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4937 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4939 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4942 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4944 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4947 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4949 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4952 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4954 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4957 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4959 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4962 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4964 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4967 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4969 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4972 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4974 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4977 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4979 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4982 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4984 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4987 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4989 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4992 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4994 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4995 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4998 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5000 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5003 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5005 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5008 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5010 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5013 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5015 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5018 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5020 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5023 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5025 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
5028 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5030 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
5033 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
5035 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
5038 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
5040 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
5043 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5045 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5048 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5050 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5053 /* ====================================================================== */
5054 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5055 with the TCG register to which we will write. Used in combination with
5056 the "wout" generators, in some cases we need a new temporary, and in
5057 some cases we can write to a TCG global. */
5059 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5061 o
->out
= tcg_temp_new_i64();
5063 #define SPEC_prep_new 0
5065 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5067 o
->out
= tcg_temp_new_i64();
5068 o
->out2
= tcg_temp_new_i64();
5070 #define SPEC_prep_new_P 0
5072 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5074 o
->out
= regs
[get_field(f
, r1
)];
5077 #define SPEC_prep_r1 0
5079 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5081 int r1
= get_field(f
, r1
);
5083 o
->out2
= regs
[r1
+ 1];
5084 o
->g_out
= o
->g_out2
= true;
5086 #define SPEC_prep_r1_P SPEC_r1_even
5088 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5090 o
->out
= fregs
[get_field(f
, r1
)];
5093 #define SPEC_prep_f1 0
5095 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5097 int r1
= get_field(f
, r1
);
5099 o
->out2
= fregs
[r1
+ 2];
5100 o
->g_out
= o
->g_out2
= true;
5102 #define SPEC_prep_x1 SPEC_r1_f128
5104 /* ====================================================================== */
5105 /* The "Write OUTput" generators. These generally perform some non-trivial
5106 copy of data to TCG globals, or to main memory. The trivial cases are
5107 generally handled by having a "prep" generator install the TCG global
5108 as the destination of the operation. */
5110 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5112 store_reg(get_field(f
, r1
), o
->out
);
5114 #define SPEC_wout_r1 0
5116 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5118 int r1
= get_field(f
, r1
);
5119 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5121 #define SPEC_wout_r1_8 0
5123 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5125 int r1
= get_field(f
, r1
);
5126 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5128 #define SPEC_wout_r1_16 0
5130 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5132 store_reg32_i64(get_field(f
, r1
), o
->out
);
5134 #define SPEC_wout_r1_32 0
5136 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5138 store_reg32h_i64(get_field(f
, r1
), o
->out
);
5140 #define SPEC_wout_r1_32h 0
5142 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5144 int r1
= get_field(f
, r1
);
5145 store_reg32_i64(r1
, o
->out
);
5146 store_reg32_i64(r1
+ 1, o
->out2
);
5148 #define SPEC_wout_r1_P32 SPEC_r1_even
5150 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5152 int r1
= get_field(f
, r1
);
5153 store_reg32_i64(r1
+ 1, o
->out
);
5154 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5155 store_reg32_i64(r1
, o
->out
);
5157 #define SPEC_wout_r1_D32 SPEC_r1_even
5159 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5161 int r3
= get_field(f
, r3
);
5162 store_reg32_i64(r3
, o
->out
);
5163 store_reg32_i64(r3
+ 1, o
->out2
);
5165 #define SPEC_wout_r3_P32 SPEC_r3_even
5167 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5169 int r3
= get_field(f
, r3
);
5170 store_reg(r3
, o
->out
);
5171 store_reg(r3
+ 1, o
->out2
);
5173 #define SPEC_wout_r3_P64 SPEC_r3_even
5175 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5177 store_freg32_i64(get_field(f
, r1
), o
->out
);
5179 #define SPEC_wout_e1 0
5181 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5183 store_freg(get_field(f
, r1
), o
->out
);
5185 #define SPEC_wout_f1 0
5187 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5189 int f1
= get_field(s
->fields
, r1
);
5190 store_freg(f1
, o
->out
);
5191 store_freg(f1
+ 2, o
->out2
);
5193 #define SPEC_wout_x1 SPEC_r1_f128
5195 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5197 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5198 store_reg32_i64(get_field(f
, r1
), o
->out
);
5201 #define SPEC_wout_cond_r1r2_32 0
5203 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5205 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5206 store_freg32_i64(get_field(f
, r1
), o
->out
);
5209 #define SPEC_wout_cond_e1e2 0
5211 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5213 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5215 #define SPEC_wout_m1_8 0
5217 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5219 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5221 #define SPEC_wout_m1_16 0
5223 #ifndef CONFIG_USER_ONLY
5224 static void wout_m1_16a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5226 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5228 #define SPEC_wout_m1_16a 0
5231 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5233 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5235 #define SPEC_wout_m1_32 0
5237 #ifndef CONFIG_USER_ONLY
5238 static void wout_m1_32a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5240 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5242 #define SPEC_wout_m1_32a 0
5245 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5247 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5249 #define SPEC_wout_m1_64 0
5251 #ifndef CONFIG_USER_ONLY
5252 static void wout_m1_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5254 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5256 #define SPEC_wout_m1_64a 0
5259 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5261 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5263 #define SPEC_wout_m2_32 0
5265 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5267 store_reg(get_field(f
, r1
), o
->in2
);
5269 #define SPEC_wout_in2_r1 0
5271 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5273 store_reg32_i64(get_field(f
, r1
), o
->in2
);
5275 #define SPEC_wout_in2_r1_32 0
5277 /* ====================================================================== */
5278 /* The "INput 1" generators. These load the first operand to an insn. */
5280 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5282 o
->in1
= load_reg(get_field(f
, r1
));
5284 #define SPEC_in1_r1 0
5286 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5288 o
->in1
= regs
[get_field(f
, r1
)];
5291 #define SPEC_in1_r1_o 0
5293 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5295 o
->in1
= tcg_temp_new_i64();
5296 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5298 #define SPEC_in1_r1_32s 0
5300 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5302 o
->in1
= tcg_temp_new_i64();
5303 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5305 #define SPEC_in1_r1_32u 0
5307 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5309 o
->in1
= tcg_temp_new_i64();
5310 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
5312 #define SPEC_in1_r1_sr32 0
5314 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5316 o
->in1
= load_reg(get_field(f
, r1
) + 1);
5318 #define SPEC_in1_r1p1 SPEC_r1_even
5320 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5322 o
->in1
= tcg_temp_new_i64();
5323 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5325 #define SPEC_in1_r1p1_32s SPEC_r1_even
5327 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5329 o
->in1
= tcg_temp_new_i64();
5330 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5332 #define SPEC_in1_r1p1_32u SPEC_r1_even
5334 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5336 int r1
= get_field(f
, r1
);
5337 o
->in1
= tcg_temp_new_i64();
5338 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5340 #define SPEC_in1_r1_D32 SPEC_r1_even
5342 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5344 o
->in1
= load_reg(get_field(f
, r2
));
5346 #define SPEC_in1_r2 0
5348 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5350 o
->in1
= tcg_temp_new_i64();
5351 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5353 #define SPEC_in1_r2_sr32 0
5355 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5357 o
->in1
= load_reg(get_field(f
, r3
));
5359 #define SPEC_in1_r3 0
5361 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5363 o
->in1
= regs
[get_field(f
, r3
)];
5366 #define SPEC_in1_r3_o 0
5368 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5370 o
->in1
= tcg_temp_new_i64();
5371 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5373 #define SPEC_in1_r3_32s 0
5375 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5377 o
->in1
= tcg_temp_new_i64();
5378 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5380 #define SPEC_in1_r3_32u 0
5382 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5384 int r3
= get_field(f
, r3
);
5385 o
->in1
= tcg_temp_new_i64();
5386 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5388 #define SPEC_in1_r3_D32 SPEC_r3_even
5390 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5392 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5394 #define SPEC_in1_e1 0
5396 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5398 o
->in1
= fregs
[get_field(f
, r1
)];
5401 #define SPEC_in1_f1_o 0
5403 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5405 int r1
= get_field(f
, r1
);
5407 o
->out2
= fregs
[r1
+ 2];
5408 o
->g_out
= o
->g_out2
= true;
5410 #define SPEC_in1_x1_o SPEC_r1_f128
5412 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5414 o
->in1
= fregs
[get_field(f
, r3
)];
5417 #define SPEC_in1_f3_o 0
5419 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5421 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5423 #define SPEC_in1_la1 0
5425 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5427 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5428 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5430 #define SPEC_in1_la2 0
5432 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5435 o
->in1
= tcg_temp_new_i64();
5436 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5438 #define SPEC_in1_m1_8u 0
5440 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5443 o
->in1
= tcg_temp_new_i64();
5444 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5446 #define SPEC_in1_m1_16s 0
5448 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5451 o
->in1
= tcg_temp_new_i64();
5452 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5454 #define SPEC_in1_m1_16u 0
5456 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5459 o
->in1
= tcg_temp_new_i64();
5460 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5462 #define SPEC_in1_m1_32s 0
5464 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5467 o
->in1
= tcg_temp_new_i64();
5468 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5470 #define SPEC_in1_m1_32u 0
5472 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5475 o
->in1
= tcg_temp_new_i64();
5476 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5478 #define SPEC_in1_m1_64 0
5480 /* ====================================================================== */
5481 /* The "INput 2" generators. These load the second operand to an insn. */
5483 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5485 o
->in2
= regs
[get_field(f
, r1
)];
5488 #define SPEC_in2_r1_o 0
5490 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5492 o
->in2
= tcg_temp_new_i64();
5493 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5495 #define SPEC_in2_r1_16u 0
5497 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5499 o
->in2
= tcg_temp_new_i64();
5500 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5502 #define SPEC_in2_r1_32u 0
5504 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5506 int r1
= get_field(f
, r1
);
5507 o
->in2
= tcg_temp_new_i64();
5508 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5510 #define SPEC_in2_r1_D32 SPEC_r1_even
5512 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5514 o
->in2
= load_reg(get_field(f
, r2
));
5516 #define SPEC_in2_r2 0
5518 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5520 o
->in2
= regs
[get_field(f
, r2
)];
5523 #define SPEC_in2_r2_o 0
5525 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5527 int r2
= get_field(f
, r2
);
5529 o
->in2
= load_reg(r2
);
5532 #define SPEC_in2_r2_nz 0
5534 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5536 o
->in2
= tcg_temp_new_i64();
5537 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5539 #define SPEC_in2_r2_8s 0
5541 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5543 o
->in2
= tcg_temp_new_i64();
5544 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5546 #define SPEC_in2_r2_8u 0
5548 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5550 o
->in2
= tcg_temp_new_i64();
5551 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5553 #define SPEC_in2_r2_16s 0
5555 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5557 o
->in2
= tcg_temp_new_i64();
5558 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5560 #define SPEC_in2_r2_16u 0
5562 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5564 o
->in2
= load_reg(get_field(f
, r3
));
5566 #define SPEC_in2_r3 0
5568 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5570 o
->in2
= tcg_temp_new_i64();
5571 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5573 #define SPEC_in2_r3_sr32 0
5575 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5577 o
->in2
= tcg_temp_new_i64();
5578 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5580 #define SPEC_in2_r2_32s 0
5582 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5584 o
->in2
= tcg_temp_new_i64();
5585 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5587 #define SPEC_in2_r2_32u 0
5589 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5591 o
->in2
= tcg_temp_new_i64();
5592 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5594 #define SPEC_in2_r2_sr32 0
5596 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5598 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5600 #define SPEC_in2_e2 0
5602 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5604 o
->in2
= fregs
[get_field(f
, r2
)];
5607 #define SPEC_in2_f2_o 0
5609 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5611 int r2
= get_field(f
, r2
);
5613 o
->in2
= fregs
[r2
+ 2];
5614 o
->g_in1
= o
->g_in2
= true;
5616 #define SPEC_in2_x2_o SPEC_r2_f128
5618 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5620 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5622 #define SPEC_in2_ra2 0
5624 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5626 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5627 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5629 #define SPEC_in2_a2 0
5631 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5633 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(f
, i2
) * 2);
5635 #define SPEC_in2_ri2 0
5637 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5639 help_l2_shift(s
, f
, o
, 31);
5641 #define SPEC_in2_sh32 0
5643 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5645 help_l2_shift(s
, f
, o
, 63);
5647 #define SPEC_in2_sh64 0
5649 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5652 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5654 #define SPEC_in2_m2_8u 0
5656 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5659 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5661 #define SPEC_in2_m2_16s 0
5663 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5666 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5668 #define SPEC_in2_m2_16u 0
5670 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5673 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5675 #define SPEC_in2_m2_32s 0
5677 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5680 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5682 #define SPEC_in2_m2_32u 0
5684 #ifndef CONFIG_USER_ONLY
5685 static void in2_m2_32ua(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5688 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5690 #define SPEC_in2_m2_32ua 0
5693 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5696 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5698 #define SPEC_in2_m2_64 0
5700 #ifndef CONFIG_USER_ONLY
5701 static void in2_m2_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5704 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5706 #define SPEC_in2_m2_64a 0
5709 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5712 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5714 #define SPEC_in2_mri2_16u 0
5716 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5719 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5721 #define SPEC_in2_mri2_32s 0
5723 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5726 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5728 #define SPEC_in2_mri2_32u 0
5730 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5733 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5735 #define SPEC_in2_mri2_64 0
5737 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5739 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5741 #define SPEC_in2_i2 0
5743 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5745 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5747 #define SPEC_in2_i2_8u 0
5749 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5751 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5753 #define SPEC_in2_i2_16u 0
5755 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5757 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5759 #define SPEC_in2_i2_32u 0
5761 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5763 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5764 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5766 #define SPEC_in2_i2_16u_shl 0
5768 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5770 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5771 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5773 #define SPEC_in2_i2_32u_shl 0
5775 #ifndef CONFIG_USER_ONLY
5776 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5778 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5780 #define SPEC_in2_insn 0
5783 /* ====================================================================== */
5785 /* Find opc within the table of insns. This is formulated as a switch
5786 statement so that (1) we get compile-time notice of cut-paste errors
5787 for duplicated opcodes, and (2) the compiler generates the binary
5788 search tree, rather than us having to post-process the table. */
5790 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5791 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5793 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5794 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5796 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5797 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5799 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5801 enum DisasInsnEnum
{
5802 #include "insn-data.def"
5806 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
5811 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5813 .help_in1 = in1_##I1, \
5814 .help_in2 = in2_##I2, \
5815 .help_prep = prep_##P, \
5816 .help_wout = wout_##W, \
5817 .help_cout = cout_##CC, \
5818 .help_op = op_##OP, \
5822 /* Allow 0 to be used for NULL in the table below. */
5830 #define SPEC_in1_0 0
5831 #define SPEC_in2_0 0
5832 #define SPEC_prep_0 0
5833 #define SPEC_wout_0 0
5835 /* Give smaller names to the various facilities. */
5836 #define FAC_Z S390_FEAT_ZARCH
5837 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5838 #define FAC_DFP S390_FEAT_DFP
5839 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5840 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5841 #define FAC_EE S390_FEAT_EXECUTE_EXT
5842 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5843 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5844 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5845 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5846 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5847 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5848 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5849 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5850 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5851 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5852 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5853 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5854 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5855 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5856 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5857 #define FAC_SFLE S390_FEAT_STFLE
5858 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5859 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5860 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5861 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5862 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5863 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5864 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5865 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5866 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5867 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5868 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5869 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5870 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5871 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5872 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5873 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5875 static const DisasInsn insn_info
[] = {
5876 #include "insn-data.def"
5880 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
5881 case OPC: return &insn_info[insn_ ## NM];
5883 static const DisasInsn
*lookup_opc(uint16_t opc
)
5886 #include "insn-data.def"
5897 /* Extract a field from the insn. The INSN should be left-aligned in
5898 the uint64_t so that we can more easily utilize the big-bit-endian
5899 definitions we extract from the Principals of Operation. */
5901 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5909 /* Zero extract the field from the insn. */
5910 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5912 /* Sign-extend, or un-swap the field as necessary. */
5914 case 0: /* unsigned */
5916 case 1: /* signed */
5917 assert(f
->size
<= 32);
5918 m
= 1u << (f
->size
- 1);
5921 case 2: /* dl+dh split, signed 20 bit. */
5922 r
= ((int8_t)r
<< 12) | (r
>> 8);
5928 /* Validate that the "compressed" encoding we selected above is valid.
5929 I.e. we havn't make two different original fields overlap. */
5930 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5931 o
->presentC
|= 1 << f
->indexC
;
5932 o
->presentO
|= 1 << f
->indexO
;
5934 o
->c
[f
->indexC
] = r
;
5937 /* Lookup the insn at the current PC, extracting the operands into O and
5938 returning the info struct for the insn. Returns NULL for invalid insn. */
5940 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5943 uint64_t insn
, pc
= s
->base
.pc_next
;
5945 const DisasInsn
*info
;
5947 if (unlikely(s
->ex_value
)) {
5948 /* Drop the EX data now, so that it's clear on exception paths. */
5949 TCGv_i64 zero
= tcg_const_i64(0);
5950 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5951 tcg_temp_free_i64(zero
);
5953 /* Extract the values saved by EXECUTE. */
5954 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5955 ilen
= s
->ex_value
& 0xf;
5958 insn
= ld_code2(env
, pc
);
5959 op
= (insn
>> 8) & 0xff;
5960 ilen
= get_ilen(op
);
5966 insn
= ld_code4(env
, pc
) << 32;
5969 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5972 g_assert_not_reached();
5975 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
5978 /* We can't actually determine the insn format until we've looked up
5979 the full insn opcode. Which we can't do without locating the
5980 secondary opcode. Assume by default that OP2 is at bit 40; for
5981 those smaller insns that don't actually have a secondary opcode
5982 this will correctly result in OP2 = 0. */
5988 case 0xb2: /* S, RRF, RRE, IE */
5989 case 0xb3: /* RRE, RRD, RRF */
5990 case 0xb9: /* RRE, RRF */
5991 case 0xe5: /* SSE, SIL */
5992 op2
= (insn
<< 8) >> 56;
5996 case 0xc0: /* RIL */
5997 case 0xc2: /* RIL */
5998 case 0xc4: /* RIL */
5999 case 0xc6: /* RIL */
6000 case 0xc8: /* SSF */
6001 case 0xcc: /* RIL */
6002 op2
= (insn
<< 12) >> 60;
6004 case 0xc5: /* MII */
6005 case 0xc7: /* SMI */
6006 case 0xd0 ... 0xdf: /* SS */
6012 case 0xee ... 0xf3: /* SS */
6013 case 0xf8 ... 0xfd: /* SS */
6017 op2
= (insn
<< 40) >> 56;
6021 memset(f
, 0, sizeof(*f
));
6026 /* Lookup the instruction. */
6027 info
= lookup_opc(op
<< 8 | op2
);
6029 /* If we found it, extract the operands. */
6031 DisasFormat fmt
= info
->fmt
;
6034 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6035 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
6041 static bool is_afp_reg(int reg
)
6043 return reg
% 2 || reg
> 6;
6046 static bool is_fp_pair(int reg
)
6048 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6049 return !(reg
& 0x2);
6052 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6054 const DisasInsn
*insn
;
6055 DisasJumpType ret
= DISAS_NEXT
;
6059 /* Search for the insn in the table. */
6060 insn
= extract_insn(env
, s
, &f
);
6062 /* Not found means unimplemented/illegal opcode. */
6064 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6066 gen_illegal_opcode(s
);
6067 return DISAS_NORETURN
;
6070 #ifndef CONFIG_USER_ONLY
6071 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6072 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6073 gen_helper_per_ifetch(cpu_env
, addr
);
6074 tcg_temp_free_i64(addr
);
6080 /* privileged instruction */
6081 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6082 gen_program_exception(s
, PGM_PRIVILEGED
);
6083 return DISAS_NORETURN
;
6086 /* if AFP is not enabled, instructions and registers are forbidden */
6087 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6090 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(&f
, r1
))) {
6093 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(&f
, r2
))) {
6096 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(&f
, r3
))) {
6099 if (insn
->flags
& IF_BFP
) {
6102 if (insn
->flags
& IF_DFP
) {
6106 gen_data_exception(dxc
);
6107 return DISAS_NORETURN
;
6112 /* Check for insn specification exceptions. */
6114 if ((insn
->spec
& SPEC_r1_even
&& get_field(&f
, r1
) & 1) ||
6115 (insn
->spec
& SPEC_r2_even
&& get_field(&f
, r2
) & 1) ||
6116 (insn
->spec
& SPEC_r3_even
&& get_field(&f
, r3
) & 1) ||
6117 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(&f
, r1
))) ||
6118 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(&f
, r2
)))) {
6119 gen_program_exception(s
, PGM_SPECIFICATION
);
6120 return DISAS_NORETURN
;
6124 /* Set up the strutures we use to communicate with the helpers. */
6127 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
6134 /* Implement the instruction. */
6135 if (insn
->help_in1
) {
6136 insn
->help_in1(s
, &f
, &o
);
6138 if (insn
->help_in2
) {
6139 insn
->help_in2(s
, &f
, &o
);
6141 if (insn
->help_prep
) {
6142 insn
->help_prep(s
, &f
, &o
);
6144 if (insn
->help_op
) {
6145 ret
= insn
->help_op(s
, &o
);
6147 if (ret
!= DISAS_NORETURN
) {
6148 if (insn
->help_wout
) {
6149 insn
->help_wout(s
, &f
, &o
);
6151 if (insn
->help_cout
) {
6152 insn
->help_cout(s
, &o
);
6156 /* Free any temporaries created by the helpers. */
6157 if (o
.out
&& !o
.g_out
) {
6158 tcg_temp_free_i64(o
.out
);
6160 if (o
.out2
&& !o
.g_out2
) {
6161 tcg_temp_free_i64(o
.out2
);
6163 if (o
.in1
&& !o
.g_in1
) {
6164 tcg_temp_free_i64(o
.in1
);
6166 if (o
.in2
&& !o
.g_in2
) {
6167 tcg_temp_free_i64(o
.in2
);
6170 tcg_temp_free_i64(o
.addr1
);
6173 #ifndef CONFIG_USER_ONLY
6174 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6175 /* An exception might be triggered, save PSW if not already done. */
6176 if (ret
== DISAS_NEXT
|| ret
== DISAS_PC_STALE
) {
6177 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6180 /* Call the helper to check for a possible PER exception. */
6181 gen_helper_per_check_exception(cpu_env
);
6185 /* Advance to the next instruction. */
6186 s
->base
.pc_next
= s
->pc_tmp
;
6190 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6192 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6195 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6196 dc
->base
.pc_first
&= 0x7fffffff;
6197 dc
->base
.pc_next
= dc
->base
.pc_first
;
6200 dc
->cc_op
= CC_OP_DYNAMIC
;
6201 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6202 dc
->do_debug
= dc
->base
.singlestep_enabled
;
6205 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6209 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6211 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6213 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
);
6216 static bool s390x_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
6217 const CPUBreakpoint
*bp
)
6219 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6221 dc
->base
.is_jmp
= DISAS_PC_STALE
;
6222 dc
->do_debug
= true;
6223 /* The address covered by the breakpoint must be included in
6224 [tb->pc, tb->pc + tb->size) in order to for it to be
6225 properly cleared -- thus we increment the PC here so that
6226 the logic setting tb->size does the right thing. */
6227 dc
->base
.pc_next
+= 2;
6231 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6233 CPUS390XState
*env
= cs
->env_ptr
;
6234 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6236 dc
->base
.is_jmp
= translate_one(env
, dc
);
6237 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6238 uint64_t page_start
;
6240 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6241 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6242 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6247 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6249 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6251 switch (dc
->base
.is_jmp
) {
6253 case DISAS_NORETURN
:
6255 case DISAS_TOO_MANY
:
6256 case DISAS_PC_STALE
:
6257 case DISAS_PC_STALE_NOCHAIN
:
6258 update_psw_addr(dc
);
6260 case DISAS_PC_UPDATED
:
6261 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6262 cc op type is in env */
6265 case DISAS_PC_CC_UPDATED
:
6266 /* Exit the TB, either by raising a debug exception or by return. */
6268 gen_exception(EXCP_DEBUG
);
6269 } else if (use_exit_tb(dc
) ||
6270 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6271 tcg_gen_exit_tb(NULL
, 0);
6273 tcg_gen_lookup_and_goto_ptr();
6277 g_assert_not_reached();
6281 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
6283 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6285 if (unlikely(dc
->ex_value
)) {
6286 /* ??? Unfortunately log_target_disas can't use host memory. */
6287 qemu_log("IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6289 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6290 log_target_disas(cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6294 static const TranslatorOps s390x_tr_ops
= {
6295 .init_disas_context
= s390x_tr_init_disas_context
,
6296 .tb_start
= s390x_tr_tb_start
,
6297 .insn_start
= s390x_tr_insn_start
,
6298 .breakpoint_check
= s390x_tr_breakpoint_check
,
6299 .translate_insn
= s390x_tr_translate_insn
,
6300 .tb_stop
= s390x_tr_tb_stop
,
6301 .disas_log
= s390x_tr_disas_log
,
6304 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
)
6308 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
);
6311 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6314 int cc_op
= data
[1];
6315 env
->psw
.addr
= data
[0];
6316 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {