4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 * Define a structure to hold the decoded fields. We'll store each inside
58 * an array indexed by an enum. In order to conserve memory, we'll arrange
59 * for fields that do not exist at the same time to overlap, thus the "C"
60 * for compact. For checking purposes there is an "O" for original index
61 * as well that will be applied to availability bitmaps.
64 enum DisasFieldIndexO
{
93 enum DisasFieldIndexC
{
134 unsigned presentC
:16;
135 unsigned int presentO
;
139 struct DisasContext
{
140 DisasContextBase base
;
141 const DisasInsn
*insn
;
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
155 /* Information carried about a condition to be evaluated. */
162 struct { TCGv_i64 a
, b
; } s64
;
163 struct { TCGv_i32 a
, b
; } s32
;
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit
[CC_OP_MAX
];
169 static uint64_t inline_branch_miss
[CC_OP_MAX
];
172 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
176 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
177 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
178 tcg_gen_movi_i64(out
, pc
);
183 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
184 tmp
= tcg_const_i64(pc
);
185 tcg_gen_deposit_i64(out
, out
, tmp
, 0, 32);
186 tcg_temp_free_i64(tmp
);
189 static TCGv_i64 psw_addr
;
190 static TCGv_i64 psw_mask
;
191 static TCGv_i64 gbea
;
193 static TCGv_i32 cc_op
;
194 static TCGv_i64 cc_src
;
195 static TCGv_i64 cc_dst
;
196 static TCGv_i64 cc_vr
;
198 static char cpu_reg_names
[16][4];
199 static TCGv_i64 regs
[16];
201 void s390x_translate_init(void)
205 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
206 offsetof(CPUS390XState
, psw
.addr
),
208 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
209 offsetof(CPUS390XState
, psw
.mask
),
211 gbea
= tcg_global_mem_new_i64(cpu_env
,
212 offsetof(CPUS390XState
, gbea
),
215 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
217 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
219 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
221 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
224 for (i
= 0; i
< 16; i
++) {
225 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
226 regs
[i
] = tcg_global_mem_new(cpu_env
,
227 offsetof(CPUS390XState
, regs
[i
]),
232 static inline int vec_full_reg_offset(uint8_t reg
)
235 return offsetof(CPUS390XState
, vregs
[reg
][0]);
238 static inline int vec_reg_offset(uint8_t reg
, uint8_t enr
, MemOp es
)
240 /* Convert element size (es) - e.g. MO_8 - to bytes */
241 const uint8_t bytes
= 1 << es
;
242 int offs
= enr
* bytes
;
245 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246 * of the 16 byte vector, on both, little and big endian systems.
248 * Big Endian (target/possible host)
249 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
251 * W: [ 0][ 1] - [ 2][ 3]
254 * Little Endian (possible host)
255 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
257 * W: [ 1][ 0] - [ 3][ 2]
260 * For 16 byte elements, the two 8 byte halves will not form a host
261 * int128 if the host is little endian, since they're in the wrong order.
262 * Some operations (e.g. xor) do not care. For operations like addition,
263 * the two 8 byte elements have to be loaded separately. Let's force all
264 * 16 byte operations to handle it in a special way.
266 g_assert(es
<= MO_64
);
267 #ifndef HOST_WORDS_BIGENDIAN
270 return offs
+ vec_full_reg_offset(reg
);
273 static inline int freg64_offset(uint8_t reg
)
276 return vec_reg_offset(reg
, 0, MO_64
);
279 static inline int freg32_offset(uint8_t reg
)
282 return vec_reg_offset(reg
, 0, MO_32
);
285 static TCGv_i64
load_reg(int reg
)
287 TCGv_i64 r
= tcg_temp_new_i64();
288 tcg_gen_mov_i64(r
, regs
[reg
]);
292 static TCGv_i64
load_freg(int reg
)
294 TCGv_i64 r
= tcg_temp_new_i64();
296 tcg_gen_ld_i64(r
, cpu_env
, freg64_offset(reg
));
300 static TCGv_i64
load_freg32_i64(int reg
)
302 TCGv_i64 r
= tcg_temp_new_i64();
304 tcg_gen_ld32u_i64(r
, cpu_env
, freg32_offset(reg
));
308 static void store_reg(int reg
, TCGv_i64 v
)
310 tcg_gen_mov_i64(regs
[reg
], v
);
313 static void store_freg(int reg
, TCGv_i64 v
)
315 tcg_gen_st_i64(v
, cpu_env
, freg64_offset(reg
));
318 static void store_reg32_i64(int reg
, TCGv_i64 v
)
320 /* 32 bit register writes keep the upper half */
321 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
324 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
326 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
329 static void store_freg32_i64(int reg
, TCGv_i64 v
)
331 tcg_gen_st32_i64(v
, cpu_env
, freg32_offset(reg
));
334 static void return_low128(TCGv_i64 dest
)
336 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
339 static void update_psw_addr(DisasContext
*s
)
342 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
345 static void per_branch(DisasContext
*s
, bool to_next
)
347 #ifndef CONFIG_USER_ONLY
348 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
350 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
351 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
352 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
354 tcg_temp_free_i64(next_pc
);
360 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
361 TCGv_i64 arg1
, TCGv_i64 arg2
)
363 #ifndef CONFIG_USER_ONLY
364 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
365 TCGLabel
*lab
= gen_new_label();
366 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
368 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
369 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
373 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
374 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
375 tcg_temp_free_i64(pc
);
380 static void per_breaking_event(DisasContext
*s
)
382 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
385 static void update_cc_op(DisasContext
*s
)
387 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
388 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
392 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
394 return (uint64_t)cpu_lduw_code(env
, pc
);
397 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
399 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
402 static int get_mem_index(DisasContext
*s
)
404 #ifdef CONFIG_USER_ONLY
407 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
411 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
412 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
413 return MMU_PRIMARY_IDX
;
414 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
415 return MMU_SECONDARY_IDX
;
416 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
425 static void gen_exception(int excp
)
427 TCGv_i32 tmp
= tcg_const_i32(excp
);
428 gen_helper_exception(cpu_env
, tmp
);
429 tcg_temp_free_i32(tmp
);
432 static void gen_program_exception(DisasContext
*s
, int code
)
436 /* Remember what pgm exeption this was. */
437 tmp
= tcg_const_i32(code
);
438 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
439 tcg_temp_free_i32(tmp
);
441 tmp
= tcg_const_i32(s
->ilen
);
442 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
443 tcg_temp_free_i32(tmp
);
451 /* Trigger exception. */
452 gen_exception(EXCP_PGM
);
455 static inline void gen_illegal_opcode(DisasContext
*s
)
457 gen_program_exception(s
, PGM_OPERATION
);
460 static inline void gen_data_exception(uint8_t dxc
)
462 TCGv_i32 tmp
= tcg_const_i32(dxc
);
463 gen_helper_data_exception(cpu_env
, tmp
);
464 tcg_temp_free_i32(tmp
);
467 static inline void gen_trap(DisasContext
*s
)
469 /* Set DXC to 0xff */
470 gen_data_exception(0xff);
473 static void gen_addi_and_wrap_i64(DisasContext
*s
, TCGv_i64 dst
, TCGv_i64 src
,
476 tcg_gen_addi_i64(dst
, src
, imm
);
477 if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
478 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
479 tcg_gen_andi_i64(dst
, dst
, 0x7fffffff);
481 tcg_gen_andi_i64(dst
, dst
, 0x00ffffff);
486 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
488 TCGv_i64 tmp
= tcg_temp_new_i64();
491 * Note that d2 is limited to 20 bits, signed. If we crop negative
492 * displacements early we create larger immedate addends.
495 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
496 gen_addi_and_wrap_i64(s
, tmp
, tmp
, d2
);
498 gen_addi_and_wrap_i64(s
, tmp
, regs
[b2
], d2
);
500 gen_addi_and_wrap_i64(s
, tmp
, regs
[x2
], d2
);
501 } else if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
502 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
503 tcg_gen_movi_i64(tmp
, d2
& 0x7fffffff);
505 tcg_gen_movi_i64(tmp
, d2
& 0x00ffffff);
508 tcg_gen_movi_i64(tmp
, d2
);
514 static inline bool live_cc_data(DisasContext
*s
)
516 return (s
->cc_op
!= CC_OP_DYNAMIC
517 && s
->cc_op
!= CC_OP_STATIC
521 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
523 if (live_cc_data(s
)) {
524 tcg_gen_discard_i64(cc_src
);
525 tcg_gen_discard_i64(cc_dst
);
526 tcg_gen_discard_i64(cc_vr
);
528 s
->cc_op
= CC_OP_CONST0
+ val
;
531 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
533 if (live_cc_data(s
)) {
534 tcg_gen_discard_i64(cc_src
);
535 tcg_gen_discard_i64(cc_vr
);
537 tcg_gen_mov_i64(cc_dst
, dst
);
541 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
544 if (live_cc_data(s
)) {
545 tcg_gen_discard_i64(cc_vr
);
547 tcg_gen_mov_i64(cc_src
, src
);
548 tcg_gen_mov_i64(cc_dst
, dst
);
552 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
553 TCGv_i64 dst
, TCGv_i64 vr
)
555 tcg_gen_mov_i64(cc_src
, src
);
556 tcg_gen_mov_i64(cc_dst
, dst
);
557 tcg_gen_mov_i64(cc_vr
, vr
);
561 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
563 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
566 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
568 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
571 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
573 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
576 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
578 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
581 /* CC value is in env->cc_op */
582 static void set_cc_static(DisasContext
*s
)
584 if (live_cc_data(s
)) {
585 tcg_gen_discard_i64(cc_src
);
586 tcg_gen_discard_i64(cc_dst
);
587 tcg_gen_discard_i64(cc_vr
);
589 s
->cc_op
= CC_OP_STATIC
;
592 /* calculates cc into cc_op */
593 static void gen_op_calc_cc(DisasContext
*s
)
595 TCGv_i32 local_cc_op
= NULL
;
596 TCGv_i64 dummy
= NULL
;
600 dummy
= tcg_const_i64(0);
606 local_cc_op
= tcg_const_i32(s
->cc_op
);
622 /* s->cc_op is the cc value */
623 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
626 /* env->cc_op already is the cc value */
643 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
649 case CC_OP_LTUGTU_32
:
650 case CC_OP_LTUGTU_64
:
660 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
667 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
670 /* unknown operation - assume 3 arguments and cc_op in env */
671 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
678 tcg_temp_free_i32(local_cc_op
);
681 tcg_temp_free_i64(dummy
);
684 /* We now have cc in cc_op as constant */
688 static bool use_exit_tb(DisasContext
*s
)
690 return s
->base
.singlestep_enabled
||
691 (tb_cflags(s
->base
.tb
) & CF_LAST_IO
) ||
692 (s
->base
.tb
->flags
& FLAG_MASK_PER
);
695 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
697 if (unlikely(use_exit_tb(s
))) {
700 #ifndef CONFIG_USER_ONLY
701 return (dest
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
702 (dest
& TARGET_PAGE_MASK
) == (s
->base
.pc_next
& TARGET_PAGE_MASK
);
708 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
710 #ifdef DEBUG_INLINE_BRANCHES
711 inline_branch_miss
[cc_op
]++;
715 static void account_inline_branch(DisasContext
*s
, int cc_op
)
717 #ifdef DEBUG_INLINE_BRANCHES
718 inline_branch_hit
[cc_op
]++;
722 /* Table of mask values to comparison codes, given a comparison as input.
723 For such, CC=3 should not be possible. */
724 static const TCGCond ltgt_cond
[16] = {
725 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
726 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
727 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
728 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
729 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
730 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
731 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
732 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
735 /* Table of mask values to comparison codes, given a logic op as input.
736 For such, only CC=0 and CC=1 should be possible. */
737 static const TCGCond nz_cond
[16] = {
738 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
739 TCG_COND_NEVER
, TCG_COND_NEVER
,
740 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
741 TCG_COND_NE
, TCG_COND_NE
,
742 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
743 TCG_COND_EQ
, TCG_COND_EQ
,
744 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
745 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
748 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
749 details required to generate a TCG comparison. */
750 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
753 enum cc_op old_cc_op
= s
->cc_op
;
755 if (mask
== 15 || mask
== 0) {
756 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
759 c
->g1
= c
->g2
= true;
764 /* Find the TCG condition for the mask + cc op. */
770 cond
= ltgt_cond
[mask
];
771 if (cond
== TCG_COND_NEVER
) {
774 account_inline_branch(s
, old_cc_op
);
777 case CC_OP_LTUGTU_32
:
778 case CC_OP_LTUGTU_64
:
779 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
780 if (cond
== TCG_COND_NEVER
) {
783 account_inline_branch(s
, old_cc_op
);
787 cond
= nz_cond
[mask
];
788 if (cond
== TCG_COND_NEVER
) {
791 account_inline_branch(s
, old_cc_op
);
806 account_inline_branch(s
, old_cc_op
);
821 account_inline_branch(s
, old_cc_op
);
825 switch (mask
& 0xa) {
826 case 8: /* src == 0 -> no one bit found */
829 case 2: /* src != 0 -> one bit found */
835 account_inline_branch(s
, old_cc_op
);
841 case 8 | 2: /* result == 0 */
844 case 4 | 1: /* result != 0 */
847 case 8 | 4: /* !carry (borrow) */
848 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_EQ
: TCG_COND_NE
;
850 case 2 | 1: /* carry (!borrow) */
851 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_NE
: TCG_COND_EQ
;
856 account_inline_branch(s
, old_cc_op
);
861 /* Calculate cc value. */
866 /* Jump based on CC. We'll load up the real cond below;
867 the assignment here merely avoids a compiler warning. */
868 account_noninline_branch(s
, old_cc_op
);
869 old_cc_op
= CC_OP_STATIC
;
870 cond
= TCG_COND_NEVER
;
874 /* Load up the arguments of the comparison. */
876 c
->g1
= c
->g2
= false;
880 c
->u
.s32
.a
= tcg_temp_new_i32();
881 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
882 c
->u
.s32
.b
= tcg_const_i32(0);
885 case CC_OP_LTUGTU_32
:
887 c
->u
.s32
.a
= tcg_temp_new_i32();
888 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
889 c
->u
.s32
.b
= tcg_temp_new_i32();
890 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
897 c
->u
.s64
.b
= tcg_const_i64(0);
901 case CC_OP_LTUGTU_64
:
904 c
->g1
= c
->g2
= true;
910 c
->u
.s64
.a
= tcg_temp_new_i64();
911 c
->u
.s64
.b
= tcg_const_i64(0);
912 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
918 c
->u
.s64
.b
= tcg_const_i64(0);
922 case 4 | 1: /* result */
926 case 2 | 1: /* carry */
930 g_assert_not_reached();
939 case 0x8 | 0x4 | 0x2: /* cc != 3 */
941 c
->u
.s32
.b
= tcg_const_i32(3);
943 case 0x8 | 0x4 | 0x1: /* cc != 2 */
945 c
->u
.s32
.b
= tcg_const_i32(2);
947 case 0x8 | 0x2 | 0x1: /* cc != 1 */
949 c
->u
.s32
.b
= tcg_const_i32(1);
951 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
954 c
->u
.s32
.a
= tcg_temp_new_i32();
955 c
->u
.s32
.b
= tcg_const_i32(0);
956 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
958 case 0x8 | 0x4: /* cc < 2 */
960 c
->u
.s32
.b
= tcg_const_i32(2);
962 case 0x8: /* cc == 0 */
964 c
->u
.s32
.b
= tcg_const_i32(0);
966 case 0x4 | 0x2 | 0x1: /* cc != 0 */
968 c
->u
.s32
.b
= tcg_const_i32(0);
970 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
973 c
->u
.s32
.a
= tcg_temp_new_i32();
974 c
->u
.s32
.b
= tcg_const_i32(0);
975 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
977 case 0x4: /* cc == 1 */
979 c
->u
.s32
.b
= tcg_const_i32(1);
981 case 0x2 | 0x1: /* cc > 1 */
983 c
->u
.s32
.b
= tcg_const_i32(1);
985 case 0x2: /* cc == 2 */
987 c
->u
.s32
.b
= tcg_const_i32(2);
989 case 0x1: /* cc == 3 */
991 c
->u
.s32
.b
= tcg_const_i32(3);
994 /* CC is masked by something else: (8 >> cc) & mask. */
997 c
->u
.s32
.a
= tcg_const_i32(8);
998 c
->u
.s32
.b
= tcg_const_i32(0);
999 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
1000 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
1011 static void free_compare(DisasCompare
*c
)
1015 tcg_temp_free_i64(c
->u
.s64
.a
);
1017 tcg_temp_free_i32(c
->u
.s32
.a
);
1022 tcg_temp_free_i64(c
->u
.s64
.b
);
1024 tcg_temp_free_i32(c
->u
.s32
.b
);
1029 /* ====================================================================== */
1030 /* Define the insn format enumeration. */
1031 #define F0(N) FMT_##N,
1032 #define F1(N, X1) F0(N)
1033 #define F2(N, X1, X2) F0(N)
1034 #define F3(N, X1, X2, X3) F0(N)
1035 #define F4(N, X1, X2, X3, X4) F0(N)
1036 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1037 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1040 #include "insn-format.def"
1051 /* This is the way fields are to be accessed out of DisasFields. */
1052 #define have_field(S, F) have_field1((S), FLD_O_##F)
1053 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1055 static bool have_field1(const DisasContext
*s
, enum DisasFieldIndexO c
)
1057 return (s
->fields
.presentO
>> c
) & 1;
1060 static int get_field1(const DisasContext
*s
, enum DisasFieldIndexO o
,
1061 enum DisasFieldIndexC c
)
1063 assert(have_field1(s
, o
));
1064 return s
->fields
.c
[c
];
1067 /* Describe the layout of each field in each format. */
1068 typedef struct DisasField
{
1070 unsigned int size
:8;
1071 unsigned int type
:2;
1072 unsigned int indexC
:6;
1073 enum DisasFieldIndexO indexO
:8;
1076 typedef struct DisasFormatInfo
{
1077 DisasField op
[NUM_C_FIELD
];
1080 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1081 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1082 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1083 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1084 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1085 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1086 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1087 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1088 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1089 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1090 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1091 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1092 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1093 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1094 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1096 #define F0(N) { { } },
1097 #define F1(N, X1) { { X1 } },
1098 #define F2(N, X1, X2) { { X1, X2 } },
1099 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1100 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1101 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1102 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1104 static const DisasFormatInfo format_info
[] = {
1105 #include "insn-format.def"
1125 /* Generally, we'll extract operands into this structures, operate upon
1126 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1127 of routines below for more details. */
1129 bool g_out
, g_out2
, g_in1
, g_in2
;
1130 TCGv_i64 out
, out2
, in1
, in2
;
1134 /* Instructions can place constraints on their operands, raising specification
1135 exceptions if they are violated. To make this easy to automate, each "in1",
1136 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1137 of the following, or 0. To make this easy to document, we'll put the
1138 SPEC_<name> defines next to <name>. */
1140 #define SPEC_r1_even 1
1141 #define SPEC_r2_even 2
1142 #define SPEC_r3_even 4
1143 #define SPEC_r1_f128 8
1144 #define SPEC_r2_f128 16
1146 /* Return values from translate_one, indicating the state of the TB. */
1148 /* We are not using a goto_tb (for whatever reason), but have updated
1149 the PC (for whatever reason), so there's no need to do it again on
1151 #define DISAS_PC_UPDATED DISAS_TARGET_0
1153 /* We have emitted one or more goto_tb. No fixup required. */
1154 #define DISAS_GOTO_TB DISAS_TARGET_1
1156 /* We have updated the PC and CC values. */
1157 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1159 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1160 updated the PC for the next instruction to be executed. */
1161 #define DISAS_PC_STALE DISAS_TARGET_3
1163 /* We are exiting the TB to the main loop. */
1164 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1167 /* Instruction flags */
1168 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1169 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1170 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1171 #define IF_BFP 0x0008 /* binary floating point instruction */
1172 #define IF_DFP 0x0010 /* decimal floating point instruction */
1173 #define IF_PRIV 0x0020 /* privileged instruction */
1174 #define IF_VEC 0x0040 /* vector instruction */
1175 #define IF_IO 0x0080 /* input/output instruction */
1186 /* Pre-process arguments before HELP_OP. */
1187 void (*help_in1
)(DisasContext
*, DisasOps
*);
1188 void (*help_in2
)(DisasContext
*, DisasOps
*);
1189 void (*help_prep
)(DisasContext
*, DisasOps
*);
1192 * Post-process output after HELP_OP.
1193 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1195 void (*help_wout
)(DisasContext
*, DisasOps
*);
1196 void (*help_cout
)(DisasContext
*, DisasOps
*);
1198 /* Implement the operation itself. */
1199 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1204 /* ====================================================================== */
1205 /* Miscellaneous helpers, used by several operations. */
1207 static void help_l2_shift(DisasContext
*s
, DisasOps
*o
, int mask
)
1209 int b2
= get_field(s
, b2
);
1210 int d2
= get_field(s
, d2
);
1213 o
->in2
= tcg_const_i64(d2
& mask
);
1215 o
->in2
= get_address(s
, 0, b2
, d2
);
1216 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1220 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1222 if (dest
== s
->pc_tmp
) {
1223 per_branch(s
, true);
1226 if (use_goto_tb(s
, dest
)) {
1228 per_breaking_event(s
);
1230 tcg_gen_movi_i64(psw_addr
, dest
);
1231 tcg_gen_exit_tb(s
->base
.tb
, 0);
1232 return DISAS_GOTO_TB
;
1234 tcg_gen_movi_i64(psw_addr
, dest
);
1235 per_branch(s
, false);
1236 return DISAS_PC_UPDATED
;
1240 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1241 bool is_imm
, int imm
, TCGv_i64 cdest
)
1244 uint64_t dest
= s
->base
.pc_next
+ 2 * imm
;
1247 /* Take care of the special cases first. */
1248 if (c
->cond
== TCG_COND_NEVER
) {
1253 if (dest
== s
->pc_tmp
) {
1254 /* Branch to next. */
1255 per_branch(s
, true);
1259 if (c
->cond
== TCG_COND_ALWAYS
) {
1260 ret
= help_goto_direct(s
, dest
);
1265 /* E.g. bcr %r0 -> no branch. */
1269 if (c
->cond
== TCG_COND_ALWAYS
) {
1270 tcg_gen_mov_i64(psw_addr
, cdest
);
1271 per_branch(s
, false);
1272 ret
= DISAS_PC_UPDATED
;
1277 if (use_goto_tb(s
, s
->pc_tmp
)) {
1278 if (is_imm
&& use_goto_tb(s
, dest
)) {
1279 /* Both exits can use goto_tb. */
1282 lab
= gen_new_label();
1284 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1286 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1289 /* Branch not taken. */
1291 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1292 tcg_gen_exit_tb(s
->base
.tb
, 0);
1296 per_breaking_event(s
);
1298 tcg_gen_movi_i64(psw_addr
, dest
);
1299 tcg_gen_exit_tb(s
->base
.tb
, 1);
1301 ret
= DISAS_GOTO_TB
;
1303 /* Fallthru can use goto_tb, but taken branch cannot. */
1304 /* Store taken branch destination before the brcond. This
1305 avoids having to allocate a new local temp to hold it.
1306 We'll overwrite this in the not taken case anyway. */
1308 tcg_gen_mov_i64(psw_addr
, cdest
);
1311 lab
= gen_new_label();
1313 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1315 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1318 /* Branch not taken. */
1321 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1322 tcg_gen_exit_tb(s
->base
.tb
, 0);
1326 tcg_gen_movi_i64(psw_addr
, dest
);
1328 per_breaking_event(s
);
1329 ret
= DISAS_PC_UPDATED
;
1332 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1333 Most commonly we're single-stepping or some other condition that
1334 disables all use of goto_tb. Just update the PC and exit. */
1336 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1338 cdest
= tcg_const_i64(dest
);
1342 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1344 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1346 TCGv_i32 t0
= tcg_temp_new_i32();
1347 TCGv_i64 t1
= tcg_temp_new_i64();
1348 TCGv_i64 z
= tcg_const_i64(0);
1349 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1350 tcg_gen_extu_i32_i64(t1
, t0
);
1351 tcg_temp_free_i32(t0
);
1352 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1353 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1354 tcg_temp_free_i64(t1
);
1355 tcg_temp_free_i64(z
);
1359 tcg_temp_free_i64(cdest
);
1361 tcg_temp_free_i64(next
);
1363 ret
= DISAS_PC_UPDATED
;
1371 /* ====================================================================== */
1372 /* The operations. These perform the bulk of the work for any insn,
1373 usually after the operands have been loaded and output initialized. */
1375 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1377 tcg_gen_abs_i64(o
->out
, o
->in2
);
1381 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1383 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1387 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1389 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1393 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1395 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1396 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1400 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1402 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1406 static DisasJumpType
op_addu64(DisasContext
*s
, DisasOps
*o
)
1408 tcg_gen_movi_i64(cc_src
, 0);
1409 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1413 /* Compute carry into cc_src. */
1414 static void compute_carry(DisasContext
*s
)
1418 /* The carry value is already in cc_src (1,0). */
1421 tcg_gen_addi_i64(cc_src
, cc_src
, 1);
1427 /* The carry flag is the msb of CC; compute into cc_src. */
1428 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
1429 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
1434 static DisasJumpType
op_addc32(DisasContext
*s
, DisasOps
*o
)
1437 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1438 tcg_gen_add_i64(o
->out
, o
->out
, cc_src
);
1442 static DisasJumpType
op_addc64(DisasContext
*s
, DisasOps
*o
)
1446 TCGv_i64 zero
= tcg_const_i64(0);
1447 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, zero
);
1448 tcg_gen_add2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
1449 tcg_temp_free_i64(zero
);
1454 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1456 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1458 o
->in1
= tcg_temp_new_i64();
1460 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1462 /* Perform the atomic addition in memory. */
1463 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1467 /* Recompute also for atomic case: needed for setting CC. */
1468 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1471 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1476 static DisasJumpType
op_asiu64(DisasContext
*s
, DisasOps
*o
)
1478 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1480 o
->in1
= tcg_temp_new_i64();
1482 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1484 /* Perform the atomic addition in memory. */
1485 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1489 /* Recompute also for atomic case: needed for setting CC. */
1490 tcg_gen_movi_i64(cc_src
, 0);
1491 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1494 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1499 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1501 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1505 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1507 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1511 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1513 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1514 return_low128(o
->out2
);
1518 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1520 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1524 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1526 int shift
= s
->insn
->data
& 0xff;
1527 int size
= s
->insn
->data
>> 8;
1528 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1531 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1532 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1533 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1535 /* Produce the CC from only the bits manipulated. */
1536 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1537 set_cc_nz_u64(s
, cc_dst
);
1541 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1543 o
->in1
= tcg_temp_new_i64();
1545 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1546 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1548 /* Perform the atomic operation in memory. */
1549 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1553 /* Recompute also for atomic case: needed for setting CC. */
1554 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1556 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1557 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1562 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1564 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1566 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1567 per_branch(s
, false);
1568 return DISAS_PC_UPDATED
;
1574 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1578 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1579 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1583 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1584 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1585 t
= tcg_temp_new_i64();
1586 tcg_gen_shri_i64(t
, psw_mask
, 16);
1587 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1588 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1589 tcg_gen_extu_i32_i64(t
, cc_op
);
1590 tcg_gen_shli_i64(t
, t
, 28);
1591 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1592 tcg_temp_free_i64(t
);
1595 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1597 save_link_info(s
, o
);
1599 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1600 per_branch(s
, false);
1601 return DISAS_PC_UPDATED
;
1607 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1609 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1610 return help_goto_direct(s
, s
->base
.pc_next
+ 2 * get_field(s
, i2
));
1613 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1615 int m1
= get_field(s
, m1
);
1616 bool is_imm
= have_field(s
, i2
);
1617 int imm
= is_imm
? get_field(s
, i2
) : 0;
1620 /* BCR with R2 = 0 causes no branching */
1621 if (have_field(s
, r2
) && get_field(s
, r2
) == 0) {
1623 /* Perform serialization */
1624 /* FIXME: check for fast-BCR-serialization facility */
1625 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1628 /* Perform serialization */
1629 /* FIXME: perform checkpoint-synchronisation */
1630 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1635 disas_jcc(s
, &c
, m1
);
1636 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1639 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1641 int r1
= get_field(s
, r1
);
1642 bool is_imm
= have_field(s
, i2
);
1643 int imm
= is_imm
? get_field(s
, i2
) : 0;
1647 c
.cond
= TCG_COND_NE
;
1652 t
= tcg_temp_new_i64();
1653 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1654 store_reg32_i64(r1
, t
);
1655 c
.u
.s32
.a
= tcg_temp_new_i32();
1656 c
.u
.s32
.b
= tcg_const_i32(0);
1657 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1658 tcg_temp_free_i64(t
);
1660 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1663 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1665 int r1
= get_field(s
, r1
);
1666 int imm
= get_field(s
, i2
);
1670 c
.cond
= TCG_COND_NE
;
1675 t
= tcg_temp_new_i64();
1676 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1677 tcg_gen_subi_i64(t
, t
, 1);
1678 store_reg32h_i64(r1
, t
);
1679 c
.u
.s32
.a
= tcg_temp_new_i32();
1680 c
.u
.s32
.b
= tcg_const_i32(0);
1681 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1682 tcg_temp_free_i64(t
);
1684 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1687 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1689 int r1
= get_field(s
, r1
);
1690 bool is_imm
= have_field(s
, i2
);
1691 int imm
= is_imm
? get_field(s
, i2
) : 0;
1694 c
.cond
= TCG_COND_NE
;
1699 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1700 c
.u
.s64
.a
= regs
[r1
];
1701 c
.u
.s64
.b
= tcg_const_i64(0);
1703 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1706 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1708 int r1
= get_field(s
, r1
);
1709 int r3
= get_field(s
, r3
);
1710 bool is_imm
= have_field(s
, i2
);
1711 int imm
= is_imm
? get_field(s
, i2
) : 0;
1715 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1720 t
= tcg_temp_new_i64();
1721 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1722 c
.u
.s32
.a
= tcg_temp_new_i32();
1723 c
.u
.s32
.b
= tcg_temp_new_i32();
1724 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1725 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1726 store_reg32_i64(r1
, t
);
1727 tcg_temp_free_i64(t
);
1729 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1732 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1734 int r1
= get_field(s
, r1
);
1735 int r3
= get_field(s
, r3
);
1736 bool is_imm
= have_field(s
, i2
);
1737 int imm
= is_imm
? get_field(s
, i2
) : 0;
1740 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1743 if (r1
== (r3
| 1)) {
1744 c
.u
.s64
.b
= load_reg(r3
| 1);
1747 c
.u
.s64
.b
= regs
[r3
| 1];
1751 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1752 c
.u
.s64
.a
= regs
[r1
];
1755 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1758 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1760 int imm
, m3
= get_field(s
, m3
);
1764 c
.cond
= ltgt_cond
[m3
];
1765 if (s
->insn
->data
) {
1766 c
.cond
= tcg_unsigned_cond(c
.cond
);
1768 c
.is_64
= c
.g1
= c
.g2
= true;
1772 is_imm
= have_field(s
, i4
);
1774 imm
= get_field(s
, i4
);
1777 o
->out
= get_address(s
, 0, get_field(s
, b4
),
1781 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1784 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1786 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1791 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1793 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1798 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1800 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1805 static TCGv_i32
fpinst_extract_m34(DisasContext
*s
, bool m3_with_fpe
,
1808 const bool fpe
= s390_has_feat(S390_FEAT_FLOATING_POINT_EXT
);
1809 uint8_t m3
= get_field(s
, m3
);
1810 uint8_t m4
= get_field(s
, m4
);
1812 /* m3 field was introduced with FPE */
1813 if (!fpe
&& m3_with_fpe
) {
1816 /* m4 field was introduced with FPE */
1817 if (!fpe
&& m4_with_fpe
) {
1821 /* Check for valid rounding modes. Mode 3 was introduced later. */
1822 if (m3
== 2 || m3
> 7 || (!fpe
&& m3
== 3)) {
1823 gen_program_exception(s
, PGM_SPECIFICATION
);
1827 return tcg_const_i32(deposit32(m3
, 4, 4, m4
));
1830 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1832 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1835 return DISAS_NORETURN
;
1837 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1838 tcg_temp_free_i32(m34
);
1839 gen_set_cc_nz_f32(s
, o
->in2
);
1843 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1845 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1848 return DISAS_NORETURN
;
1850 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1851 tcg_temp_free_i32(m34
);
1852 gen_set_cc_nz_f64(s
, o
->in2
);
1856 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1858 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1861 return DISAS_NORETURN
;
1863 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1864 tcg_temp_free_i32(m34
);
1865 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1869 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1871 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1874 return DISAS_NORETURN
;
1876 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1877 tcg_temp_free_i32(m34
);
1878 gen_set_cc_nz_f32(s
, o
->in2
);
1882 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1884 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1887 return DISAS_NORETURN
;
1889 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1890 tcg_temp_free_i32(m34
);
1891 gen_set_cc_nz_f64(s
, o
->in2
);
1895 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1897 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1900 return DISAS_NORETURN
;
1902 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1903 tcg_temp_free_i32(m34
);
1904 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1908 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1910 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1913 return DISAS_NORETURN
;
1915 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1916 tcg_temp_free_i32(m34
);
1917 gen_set_cc_nz_f32(s
, o
->in2
);
1921 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1923 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1926 return DISAS_NORETURN
;
1928 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1929 tcg_temp_free_i32(m34
);
1930 gen_set_cc_nz_f64(s
, o
->in2
);
1934 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1936 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1939 return DISAS_NORETURN
;
1941 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1942 tcg_temp_free_i32(m34
);
1943 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1947 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1949 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1952 return DISAS_NORETURN
;
1954 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1955 tcg_temp_free_i32(m34
);
1956 gen_set_cc_nz_f32(s
, o
->in2
);
1960 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1962 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1965 return DISAS_NORETURN
;
1967 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1968 tcg_temp_free_i32(m34
);
1969 gen_set_cc_nz_f64(s
, o
->in2
);
1973 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1975 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1978 return DISAS_NORETURN
;
1980 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1981 tcg_temp_free_i32(m34
);
1982 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1986 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1988 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1991 return DISAS_NORETURN
;
1993 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m34
);
1994 tcg_temp_free_i32(m34
);
1998 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
2000 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2003 return DISAS_NORETURN
;
2005 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m34
);
2006 tcg_temp_free_i32(m34
);
2010 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
2012 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2015 return DISAS_NORETURN
;
2017 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m34
);
2018 tcg_temp_free_i32(m34
);
2019 return_low128(o
->out2
);
2023 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
2025 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2028 return DISAS_NORETURN
;
2030 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m34
);
2031 tcg_temp_free_i32(m34
);
2035 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
2037 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2040 return DISAS_NORETURN
;
2042 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2043 tcg_temp_free_i32(m34
);
2047 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
2049 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2052 return DISAS_NORETURN
;
2054 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2055 tcg_temp_free_i32(m34
);
2056 return_low128(o
->out2
);
2060 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
2062 int r2
= get_field(s
, r2
);
2063 TCGv_i64 len
= tcg_temp_new_i64();
2065 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
2067 return_low128(o
->out
);
2069 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
2070 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
2071 tcg_temp_free_i64(len
);
2076 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
2078 int l
= get_field(s
, l1
);
2083 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2084 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2087 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2088 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2091 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2092 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2095 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2096 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2099 vl
= tcg_const_i32(l
);
2100 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2101 tcg_temp_free_i32(vl
);
2105 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2109 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
2111 int r1
= get_field(s
, r1
);
2112 int r2
= get_field(s
, r2
);
2115 /* r1 and r2 must be even. */
2116 if (r1
& 1 || r2
& 1) {
2117 gen_program_exception(s
, PGM_SPECIFICATION
);
2118 return DISAS_NORETURN
;
2121 t1
= tcg_const_i32(r1
);
2122 t2
= tcg_const_i32(r2
);
2123 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
2124 tcg_temp_free_i32(t1
);
2125 tcg_temp_free_i32(t2
);
2130 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
2132 int r1
= get_field(s
, r1
);
2133 int r3
= get_field(s
, r3
);
2136 /* r1 and r3 must be even. */
2137 if (r1
& 1 || r3
& 1) {
2138 gen_program_exception(s
, PGM_SPECIFICATION
);
2139 return DISAS_NORETURN
;
2142 t1
= tcg_const_i32(r1
);
2143 t3
= tcg_const_i32(r3
);
2144 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2145 tcg_temp_free_i32(t1
);
2146 tcg_temp_free_i32(t3
);
2151 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
2153 int r1
= get_field(s
, r1
);
2154 int r3
= get_field(s
, r3
);
2157 /* r1 and r3 must be even. */
2158 if (r1
& 1 || r3
& 1) {
2159 gen_program_exception(s
, PGM_SPECIFICATION
);
2160 return DISAS_NORETURN
;
2163 t1
= tcg_const_i32(r1
);
2164 t3
= tcg_const_i32(r3
);
2165 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2166 tcg_temp_free_i32(t1
);
2167 tcg_temp_free_i32(t3
);
2172 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
2174 TCGv_i32 m3
= tcg_const_i32(get_field(s
, m3
));
2175 TCGv_i32 t1
= tcg_temp_new_i32();
2176 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
2177 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2179 tcg_temp_free_i32(t1
);
2180 tcg_temp_free_i32(m3
);
2184 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
2186 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2188 return_low128(o
->in2
);
2192 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
2194 TCGv_i64 t
= tcg_temp_new_i64();
2195 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
2196 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
2197 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2198 tcg_temp_free_i64(t
);
2202 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2204 int d2
= get_field(s
, d2
);
2205 int b2
= get_field(s
, b2
);
2208 /* Note that in1 = R3 (new value) and
2209 in2 = (zero-extended) R1 (expected value). */
2211 addr
= get_address(s
, 0, b2
, d2
);
2212 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2213 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2214 tcg_temp_free_i64(addr
);
2216 /* Are the memory and expected values (un)equal? Note that this setcond
2217 produces the output CC value, thus the NE sense of the test. */
2218 cc
= tcg_temp_new_i64();
2219 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2220 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2221 tcg_temp_free_i64(cc
);
2227 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2229 int r1
= get_field(s
, r1
);
2230 int r3
= get_field(s
, r3
);
2231 int d2
= get_field(s
, d2
);
2232 int b2
= get_field(s
, b2
);
2233 DisasJumpType ret
= DISAS_NEXT
;
2235 TCGv_i32 t_r1
, t_r3
;
2237 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2238 addr
= get_address(s
, 0, b2
, d2
);
2239 t_r1
= tcg_const_i32(r1
);
2240 t_r3
= tcg_const_i32(r3
);
2241 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
2242 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2243 } else if (HAVE_CMPXCHG128
) {
2244 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2246 gen_helper_exit_atomic(cpu_env
);
2247 ret
= DISAS_NORETURN
;
2249 tcg_temp_free_i64(addr
);
2250 tcg_temp_free_i32(t_r1
);
2251 tcg_temp_free_i32(t_r3
);
2257 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2259 int r3
= get_field(s
, r3
);
2260 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2262 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2263 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2265 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2267 tcg_temp_free_i32(t_r3
);
2273 #ifndef CONFIG_USER_ONLY
2274 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2276 MemOp mop
= s
->insn
->data
;
2277 TCGv_i64 addr
, old
, cc
;
2278 TCGLabel
*lab
= gen_new_label();
2280 /* Note that in1 = R1 (zero-extended expected value),
2281 out = R1 (original reg), out2 = R1+1 (new value). */
2283 addr
= tcg_temp_new_i64();
2284 old
= tcg_temp_new_i64();
2285 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2286 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2287 get_mem_index(s
), mop
| MO_ALIGN
);
2288 tcg_temp_free_i64(addr
);
2290 /* Are the memory and expected values (un)equal? */
2291 cc
= tcg_temp_new_i64();
2292 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2293 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2295 /* Write back the output now, so that it happens before the
2296 following branch, so that we don't need local temps. */
2297 if ((mop
& MO_SIZE
) == MO_32
) {
2298 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2300 tcg_gen_mov_i64(o
->out
, old
);
2302 tcg_temp_free_i64(old
);
2304 /* If the comparison was equal, and the LSB of R2 was set,
2305 then we need to flush the TLB (for all cpus). */
2306 tcg_gen_xori_i64(cc
, cc
, 1);
2307 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2308 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2309 tcg_temp_free_i64(cc
);
2311 gen_helper_purge(cpu_env
);
2318 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2320 TCGv_i64 t1
= tcg_temp_new_i64();
2321 TCGv_i32 t2
= tcg_temp_new_i32();
2322 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2323 gen_helper_cvd(t1
, t2
);
2324 tcg_temp_free_i32(t2
);
2325 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2326 tcg_temp_free_i64(t1
);
2330 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2332 int m3
= get_field(s
, m3
);
2333 TCGLabel
*lab
= gen_new_label();
2336 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2337 if (s
->insn
->data
) {
2338 c
= tcg_unsigned_cond(c
);
2340 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2349 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2351 int m3
= get_field(s
, m3
);
2352 int r1
= get_field(s
, r1
);
2353 int r2
= get_field(s
, r2
);
2354 TCGv_i32 tr1
, tr2
, chk
;
2356 /* R1 and R2 must both be even. */
2357 if ((r1
| r2
) & 1) {
2358 gen_program_exception(s
, PGM_SPECIFICATION
);
2359 return DISAS_NORETURN
;
2361 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2365 tr1
= tcg_const_i32(r1
);
2366 tr2
= tcg_const_i32(r2
);
2367 chk
= tcg_const_i32(m3
);
2369 switch (s
->insn
->data
) {
2371 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2374 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2377 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2380 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2383 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2386 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2389 g_assert_not_reached();
2392 tcg_temp_free_i32(tr1
);
2393 tcg_temp_free_i32(tr2
);
2394 tcg_temp_free_i32(chk
);
2399 #ifndef CONFIG_USER_ONLY
2400 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2402 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
2403 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
2404 TCGv_i32 func_code
= tcg_const_i32(get_field(s
, i2
));
2406 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2408 tcg_temp_free_i32(func_code
);
2409 tcg_temp_free_i32(r3
);
2410 tcg_temp_free_i32(r1
);
2415 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2417 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2418 return_low128(o
->out
);
2422 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2424 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2425 return_low128(o
->out
);
2429 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2431 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2432 return_low128(o
->out
);
2436 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2438 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2439 return_low128(o
->out
);
2443 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2445 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2449 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2451 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2455 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2457 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2458 return_low128(o
->out2
);
2462 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2464 int r2
= get_field(s
, r2
);
2465 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2469 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2471 /* No cache information provided. */
2472 tcg_gen_movi_i64(o
->out
, -1);
2476 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2478 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2482 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2484 int r1
= get_field(s
, r1
);
2485 int r2
= get_field(s
, r2
);
2486 TCGv_i64 t
= tcg_temp_new_i64();
2488 /* Note the "subsequently" in the PoO, which implies a defined result
2489 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2490 tcg_gen_shri_i64(t
, psw_mask
, 32);
2491 store_reg32_i64(r1
, t
);
2493 store_reg32_i64(r2
, psw_mask
);
2496 tcg_temp_free_i64(t
);
2500 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2502 int r1
= get_field(s
, r1
);
2506 /* Nested EXECUTE is not allowed. */
2507 if (unlikely(s
->ex_value
)) {
2508 gen_program_exception(s
, PGM_EXECUTE
);
2509 return DISAS_NORETURN
;
2516 v1
= tcg_const_i64(0);
2521 ilen
= tcg_const_i32(s
->ilen
);
2522 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2523 tcg_temp_free_i32(ilen
);
2526 tcg_temp_free_i64(v1
);
2529 return DISAS_PC_CC_UPDATED
;
2532 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2534 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2537 return DISAS_NORETURN
;
2539 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m34
);
2540 tcg_temp_free_i32(m34
);
2544 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2546 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2549 return DISAS_NORETURN
;
2551 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m34
);
2552 tcg_temp_free_i32(m34
);
2556 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2558 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2561 return DISAS_NORETURN
;
2563 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2564 return_low128(o
->out2
);
2565 tcg_temp_free_i32(m34
);
2569 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2571 /* We'll use the original input for cc computation, since we get to
2572 compare that against 0, which ought to be better than comparing
2573 the real output against 64. It also lets cc_dst be a convenient
2574 temporary during our computation. */
2575 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2577 /* R1 = IN ? CLZ(IN) : 64. */
2578 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2580 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2581 value by 64, which is undefined. But since the shift is 64 iff the
2582 input is zero, we still get the correct result after and'ing. */
2583 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2584 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2585 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2589 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2591 int m3
= get_field(s
, m3
);
2592 int pos
, len
, base
= s
->insn
->data
;
2593 TCGv_i64 tmp
= tcg_temp_new_i64();
2598 /* Effectively a 32-bit load. */
2599 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2606 /* Effectively a 16-bit load. */
2607 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2615 /* Effectively an 8-bit load. */
2616 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2621 pos
= base
+ ctz32(m3
) * 8;
2622 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2623 ccm
= ((1ull << len
) - 1) << pos
;
2627 /* This is going to be a sequence of loads and inserts. */
2628 pos
= base
+ 32 - 8;
2632 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2633 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2634 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2637 m3
= (m3
<< 1) & 0xf;
2643 tcg_gen_movi_i64(tmp
, ccm
);
2644 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2645 tcg_temp_free_i64(tmp
);
2649 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2651 int shift
= s
->insn
->data
& 0xff;
2652 int size
= s
->insn
->data
>> 8;
2653 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2657 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2662 t1
= tcg_temp_new_i64();
2663 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2664 t2
= tcg_temp_new_i64();
2665 tcg_gen_extu_i32_i64(t2
, cc_op
);
2666 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2667 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2668 tcg_temp_free_i64(t1
);
2669 tcg_temp_free_i64(t2
);
2673 #ifndef CONFIG_USER_ONLY
2674 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2678 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2679 m4
= tcg_const_i32(get_field(s
, m4
));
2681 m4
= tcg_const_i32(0);
2683 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2684 tcg_temp_free_i32(m4
);
2688 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2692 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2693 m4
= tcg_const_i32(get_field(s
, m4
));
2695 m4
= tcg_const_i32(0);
2697 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2698 tcg_temp_free_i32(m4
);
2702 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2704 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2709 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2711 int r1
= have_field(s
, r1
) ? get_field(s
, r1
) : 0;
2712 int r2
= have_field(s
, r2
) ? get_field(s
, r2
) : 0;
2713 int r3
= have_field(s
, r3
) ? get_field(s
, r3
) : 0;
2714 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2716 switch (s
->insn
->data
) {
2717 case S390_FEAT_TYPE_KMA
:
2718 if (r3
== r1
|| r3
== r2
) {
2719 gen_program_exception(s
, PGM_SPECIFICATION
);
2720 return DISAS_NORETURN
;
2723 case S390_FEAT_TYPE_KMCTR
:
2724 if (r3
& 1 || !r3
) {
2725 gen_program_exception(s
, PGM_SPECIFICATION
);
2726 return DISAS_NORETURN
;
2729 case S390_FEAT_TYPE_PPNO
:
2730 case S390_FEAT_TYPE_KMF
:
2731 case S390_FEAT_TYPE_KMC
:
2732 case S390_FEAT_TYPE_KMO
:
2733 case S390_FEAT_TYPE_KM
:
2734 if (r1
& 1 || !r1
) {
2735 gen_program_exception(s
, PGM_SPECIFICATION
);
2736 return DISAS_NORETURN
;
2739 case S390_FEAT_TYPE_KMAC
:
2740 case S390_FEAT_TYPE_KIMD
:
2741 case S390_FEAT_TYPE_KLMD
:
2742 if (r2
& 1 || !r2
) {
2743 gen_program_exception(s
, PGM_SPECIFICATION
);
2744 return DISAS_NORETURN
;
2747 case S390_FEAT_TYPE_PCKMO
:
2748 case S390_FEAT_TYPE_PCC
:
2751 g_assert_not_reached();
2754 t_r1
= tcg_const_i32(r1
);
2755 t_r2
= tcg_const_i32(r2
);
2756 t_r3
= tcg_const_i32(r3
);
2757 type
= tcg_const_i32(s
->insn
->data
);
2758 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2760 tcg_temp_free_i32(t_r1
);
2761 tcg_temp_free_i32(t_r2
);
2762 tcg_temp_free_i32(t_r3
);
2763 tcg_temp_free_i32(type
);
2767 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2769 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2774 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2776 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2781 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2783 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2788 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2790 /* The real output is indeed the original value in memory;
2791 recompute the addition for the computation of CC. */
2792 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2793 s
->insn
->data
| MO_ALIGN
);
2794 /* However, we need to recompute the addition for setting CC. */
2795 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2799 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2801 /* The real output is indeed the original value in memory;
2802 recompute the addition for the computation of CC. */
2803 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2804 s
->insn
->data
| MO_ALIGN
);
2805 /* However, we need to recompute the operation for setting CC. */
2806 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2810 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2812 /* The real output is indeed the original value in memory;
2813 recompute the addition for the computation of CC. */
2814 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2815 s
->insn
->data
| MO_ALIGN
);
2816 /* However, we need to recompute the operation for setting CC. */
2817 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2821 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2823 /* The real output is indeed the original value in memory;
2824 recompute the addition for the computation of CC. */
2825 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2826 s
->insn
->data
| MO_ALIGN
);
2827 /* However, we need to recompute the operation for setting CC. */
2828 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2832 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2834 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2838 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2840 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2843 return DISAS_NORETURN
;
2845 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
, m34
);
2846 tcg_temp_free_i32(m34
);
2850 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2852 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2855 return DISAS_NORETURN
;
2857 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2858 tcg_temp_free_i32(m34
);
2862 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2864 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2867 return DISAS_NORETURN
;
2869 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2870 tcg_temp_free_i32(m34
);
2874 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2876 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2877 return_low128(o
->out2
);
2881 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2883 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2884 return_low128(o
->out2
);
2888 static DisasJumpType
op_lde(DisasContext
*s
, DisasOps
*o
)
2890 tcg_gen_shli_i64(o
->out
, o
->in2
, 32);
2894 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2896 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2900 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2902 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2906 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2908 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2912 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2914 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2918 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2920 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2924 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2926 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2930 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2932 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2936 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2938 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2942 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2944 TCGLabel
*lab
= gen_new_label();
2945 store_reg32_i64(get_field(s
, r1
), o
->in2
);
2946 /* The value is stored even in case of trap. */
2947 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2953 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2955 TCGLabel
*lab
= gen_new_label();
2956 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2957 /* The value is stored even in case of trap. */
2958 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2964 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2966 TCGLabel
*lab
= gen_new_label();
2967 store_reg32h_i64(get_field(s
, r1
), o
->in2
);
2968 /* The value is stored even in case of trap. */
2969 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2975 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2977 TCGLabel
*lab
= gen_new_label();
2978 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2979 /* The value is stored even in case of trap. */
2980 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2986 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2988 TCGLabel
*lab
= gen_new_label();
2989 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2990 /* The value is stored even in case of trap. */
2991 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2997 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
3001 disas_jcc(s
, &c
, get_field(s
, m3
));
3004 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
3008 TCGv_i32 t32
= tcg_temp_new_i32();
3011 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
3014 t
= tcg_temp_new_i64();
3015 tcg_gen_extu_i32_i64(t
, t32
);
3016 tcg_temp_free_i32(t32
);
3018 z
= tcg_const_i64(0);
3019 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
3020 tcg_temp_free_i64(t
);
3021 tcg_temp_free_i64(z
);
3027 #ifndef CONFIG_USER_ONLY
3028 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
3030 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3031 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3032 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
3033 tcg_temp_free_i32(r1
);
3034 tcg_temp_free_i32(r3
);
3035 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3036 return DISAS_PC_STALE_NOCHAIN
;
3039 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
3041 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3042 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3043 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
3044 tcg_temp_free_i32(r1
);
3045 tcg_temp_free_i32(r3
);
3046 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3047 return DISAS_PC_STALE_NOCHAIN
;
3050 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
3052 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
3057 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
3059 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
3063 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
3067 per_breaking_event(s
);
3069 t1
= tcg_temp_new_i64();
3070 t2
= tcg_temp_new_i64();
3071 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3072 MO_TEUL
| MO_ALIGN_8
);
3073 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
3074 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
3075 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3076 tcg_gen_shli_i64(t1
, t1
, 32);
3077 gen_helper_load_psw(cpu_env
, t1
, t2
);
3078 tcg_temp_free_i64(t1
);
3079 tcg_temp_free_i64(t2
);
3080 return DISAS_NORETURN
;
3083 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
3087 per_breaking_event(s
);
3089 t1
= tcg_temp_new_i64();
3090 t2
= tcg_temp_new_i64();
3091 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3092 MO_TEQ
| MO_ALIGN_8
);
3093 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3094 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
3095 gen_helper_load_psw(cpu_env
, t1
, t2
);
3096 tcg_temp_free_i64(t1
);
3097 tcg_temp_free_i64(t2
);
3098 return DISAS_NORETURN
;
3102 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
3104 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3105 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3106 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
3107 tcg_temp_free_i32(r1
);
3108 tcg_temp_free_i32(r3
);
3112 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
3114 int r1
= get_field(s
, r1
);
3115 int r3
= get_field(s
, r3
);
3118 /* Only one register to read. */
3119 t1
= tcg_temp_new_i64();
3120 if (unlikely(r1
== r3
)) {
3121 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3122 store_reg32_i64(r1
, t1
);
3127 /* First load the values of the first and last registers to trigger
3128 possible page faults. */
3129 t2
= tcg_temp_new_i64();
3130 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3131 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3132 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3133 store_reg32_i64(r1
, t1
);
3134 store_reg32_i64(r3
, t2
);
3136 /* Only two registers to read. */
3137 if (((r1
+ 1) & 15) == r3
) {
3143 /* Then load the remaining registers. Page fault can't occur. */
3145 tcg_gen_movi_i64(t2
, 4);
3148 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3149 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3150 store_reg32_i64(r1
, t1
);
3158 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
3160 int r1
= get_field(s
, r1
);
3161 int r3
= get_field(s
, r3
);
3164 /* Only one register to read. */
3165 t1
= tcg_temp_new_i64();
3166 if (unlikely(r1
== r3
)) {
3167 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3168 store_reg32h_i64(r1
, t1
);
3173 /* First load the values of the first and last registers to trigger
3174 possible page faults. */
3175 t2
= tcg_temp_new_i64();
3176 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3177 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3178 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3179 store_reg32h_i64(r1
, t1
);
3180 store_reg32h_i64(r3
, t2
);
3182 /* Only two registers to read. */
3183 if (((r1
+ 1) & 15) == r3
) {
3189 /* Then load the remaining registers. Page fault can't occur. */
3191 tcg_gen_movi_i64(t2
, 4);
3194 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3195 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3196 store_reg32h_i64(r1
, t1
);
3204 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
3206 int r1
= get_field(s
, r1
);
3207 int r3
= get_field(s
, r3
);
3210 /* Only one register to read. */
3211 if (unlikely(r1
== r3
)) {
3212 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3216 /* First load the values of the first and last registers to trigger
3217 possible page faults. */
3218 t1
= tcg_temp_new_i64();
3219 t2
= tcg_temp_new_i64();
3220 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
3221 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
3222 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
3223 tcg_gen_mov_i64(regs
[r1
], t1
);
3226 /* Only two registers to read. */
3227 if (((r1
+ 1) & 15) == r3
) {
3232 /* Then load the remaining registers. Page fault can't occur. */
3234 tcg_gen_movi_i64(t1
, 8);
3237 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
3238 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3245 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3248 MemOp mop
= s
->insn
->data
;
3250 /* In a parallel context, stop the world and single step. */
3251 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3254 gen_exception(EXCP_ATOMIC
);
3255 return DISAS_NORETURN
;
3258 /* In a serial context, perform the two loads ... */
3259 a1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
3260 a2
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
3261 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3262 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3263 tcg_temp_free_i64(a1
);
3264 tcg_temp_free_i64(a2
);
3266 /* ... and indicate that we performed them while interlocked. */
3267 gen_op_movi_cc(s
, 0);
3271 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3273 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3274 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3275 } else if (HAVE_ATOMIC128
) {
3276 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3278 gen_helper_exit_atomic(cpu_env
);
3279 return DISAS_NORETURN
;
3281 return_low128(o
->out2
);
3285 #ifndef CONFIG_USER_ONLY
3286 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3288 o
->addr1
= get_address(s
, 0, get_field(s
, r2
), 0);
3289 tcg_gen_qemu_ld_tl(o
->out
, o
->addr1
, MMU_REAL_IDX
, s
->insn
->data
);
3294 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3296 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3300 static DisasJumpType
op_lcbb(DisasContext
*s
, DisasOps
*o
)
3302 const int64_t block_size
= (1ull << (get_field(s
, m3
) + 6));
3304 if (get_field(s
, m3
) > 6) {
3305 gen_program_exception(s
, PGM_SPECIFICATION
);
3306 return DISAS_NORETURN
;
3309 tcg_gen_ori_i64(o
->addr1
, o
->addr1
, -block_size
);
3310 tcg_gen_neg_i64(o
->addr1
, o
->addr1
);
3311 tcg_gen_movi_i64(o
->out
, 16);
3312 tcg_gen_umin_i64(o
->out
, o
->out
, o
->addr1
);
3313 gen_op_update1_cc_i64(s
, CC_OP_LCBB
, o
->out
);
3317 static DisasJumpType
op_mc(DisasContext
*s
, DisasOps
*o
)
3319 #if !defined(CONFIG_USER_ONLY)
3322 const uint16_t monitor_class
= get_field(s
, i2
);
3324 if (monitor_class
& 0xff00) {
3325 gen_program_exception(s
, PGM_SPECIFICATION
);
3326 return DISAS_NORETURN
;
3329 #if !defined(CONFIG_USER_ONLY)
3330 i2
= tcg_const_i32(monitor_class
);
3331 gen_helper_monitor_call(cpu_env
, o
->addr1
, i2
);
3332 tcg_temp_free_i32(i2
);
3334 /* Defaults to a NOP. */
3338 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3341 o
->g_out
= o
->g_in2
;
3347 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3349 int b2
= get_field(s
, b2
);
3350 TCGv ar1
= tcg_temp_new_i64();
3353 o
->g_out
= o
->g_in2
;
3357 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3358 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3359 tcg_gen_movi_i64(ar1
, 0);
3361 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3362 tcg_gen_movi_i64(ar1
, 1);
3364 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3366 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3368 tcg_gen_movi_i64(ar1
, 0);
3371 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3372 tcg_gen_movi_i64(ar1
, 2);
3376 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3377 tcg_temp_free_i64(ar1
);
3382 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3386 o
->g_out
= o
->g_in1
;
3387 o
->g_out2
= o
->g_in2
;
3390 o
->g_in1
= o
->g_in2
= false;
3394 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3396 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3397 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3398 tcg_temp_free_i32(l
);
3402 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3404 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3405 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3406 tcg_temp_free_i32(l
);
3410 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3412 int r1
= get_field(s
, r1
);
3413 int r2
= get_field(s
, r2
);
3416 /* r1 and r2 must be even. */
3417 if (r1
& 1 || r2
& 1) {
3418 gen_program_exception(s
, PGM_SPECIFICATION
);
3419 return DISAS_NORETURN
;
3422 t1
= tcg_const_i32(r1
);
3423 t2
= tcg_const_i32(r2
);
3424 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3425 tcg_temp_free_i32(t1
);
3426 tcg_temp_free_i32(t2
);
3431 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3433 int r1
= get_field(s
, r1
);
3434 int r3
= get_field(s
, r3
);
3437 /* r1 and r3 must be even. */
3438 if (r1
& 1 || r3
& 1) {
3439 gen_program_exception(s
, PGM_SPECIFICATION
);
3440 return DISAS_NORETURN
;
3443 t1
= tcg_const_i32(r1
);
3444 t3
= tcg_const_i32(r3
);
3445 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3446 tcg_temp_free_i32(t1
);
3447 tcg_temp_free_i32(t3
);
3452 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3454 int r1
= get_field(s
, r1
);
3455 int r3
= get_field(s
, r3
);
3458 /* r1 and r3 must be even. */
3459 if (r1
& 1 || r3
& 1) {
3460 gen_program_exception(s
, PGM_SPECIFICATION
);
3461 return DISAS_NORETURN
;
3464 t1
= tcg_const_i32(r1
);
3465 t3
= tcg_const_i32(r3
);
3466 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3467 tcg_temp_free_i32(t1
);
3468 tcg_temp_free_i32(t3
);
3473 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3475 int r3
= get_field(s
, r3
);
3476 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3481 #ifndef CONFIG_USER_ONLY
3482 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3484 int r1
= get_field(s
, l1
);
3485 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3490 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3492 int r1
= get_field(s
, l1
);
3493 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3499 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3501 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3502 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3503 tcg_temp_free_i32(l
);
3507 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3509 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3510 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3511 tcg_temp_free_i32(l
);
3515 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3517 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3522 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3524 TCGv_i32 t1
= tcg_const_i32(get_field(s
, r1
));
3525 TCGv_i32 t2
= tcg_const_i32(get_field(s
, r2
));
3527 gen_helper_mvst(cc_op
, cpu_env
, t1
, t2
);
3528 tcg_temp_free_i32(t1
);
3529 tcg_temp_free_i32(t2
);
3534 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3536 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3537 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3538 tcg_temp_free_i32(l
);
3542 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3544 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3548 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3550 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3554 static DisasJumpType
op_muls128(DisasContext
*s
, DisasOps
*o
)
3556 tcg_gen_muls2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3560 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3562 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3566 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3568 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3572 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3574 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3578 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3580 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3581 return_low128(o
->out2
);
3585 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3587 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3588 return_low128(o
->out2
);
3592 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3594 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3595 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3596 tcg_temp_free_i64(r3
);
3600 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3602 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3603 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3604 tcg_temp_free_i64(r3
);
3608 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3610 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3611 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3612 tcg_temp_free_i64(r3
);
3616 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3618 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3619 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3620 tcg_temp_free_i64(r3
);
3624 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3627 z
= tcg_const_i64(0);
3628 n
= tcg_temp_new_i64();
3629 tcg_gen_neg_i64(n
, o
->in2
);
3630 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3631 tcg_temp_free_i64(n
);
3632 tcg_temp_free_i64(z
);
3636 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3638 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3642 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3644 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3648 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3650 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3651 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3655 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3657 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3658 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3659 tcg_temp_free_i32(l
);
3664 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3666 tcg_gen_neg_i64(o
->out
, o
->in2
);
3670 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3672 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3676 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3678 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3682 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3684 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3685 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3689 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3691 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3692 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3693 tcg_temp_free_i32(l
);
3698 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3700 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3704 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3706 int shift
= s
->insn
->data
& 0xff;
3707 int size
= s
->insn
->data
>> 8;
3708 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3711 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3712 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3714 /* Produce the CC from only the bits manipulated. */
3715 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3716 set_cc_nz_u64(s
, cc_dst
);
3720 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3722 o
->in1
= tcg_temp_new_i64();
3724 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3725 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3727 /* Perform the atomic operation in memory. */
3728 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3732 /* Recompute also for atomic case: needed for setting CC. */
3733 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3735 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3736 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3741 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3743 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3744 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3745 tcg_temp_free_i32(l
);
3749 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3751 int l2
= get_field(s
, l2
) + 1;
3754 /* The length must not exceed 32 bytes. */
3756 gen_program_exception(s
, PGM_SPECIFICATION
);
3757 return DISAS_NORETURN
;
3759 l
= tcg_const_i32(l2
);
3760 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3761 tcg_temp_free_i32(l
);
3765 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3767 int l2
= get_field(s
, l2
) + 1;
3770 /* The length must be even and should not exceed 64 bytes. */
3771 if ((l2
& 1) || (l2
> 64)) {
3772 gen_program_exception(s
, PGM_SPECIFICATION
);
3773 return DISAS_NORETURN
;
3775 l
= tcg_const_i32(l2
);
3776 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3777 tcg_temp_free_i32(l
);
3781 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3783 gen_helper_popcnt(o
->out
, o
->in2
);
3787 #ifndef CONFIG_USER_ONLY
3788 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3790 gen_helper_ptlb(cpu_env
);
3795 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3797 int i3
= get_field(s
, i3
);
3798 int i4
= get_field(s
, i4
);
3799 int i5
= get_field(s
, i5
);
3800 int do_zero
= i4
& 0x80;
3801 uint64_t mask
, imask
, pmask
;
3804 /* Adjust the arguments for the specific insn. */
3805 switch (s
->fields
.op2
) {
3806 case 0x55: /* risbg */
3807 case 0x59: /* risbgn */
3812 case 0x5d: /* risbhg */
3815 pmask
= 0xffffffff00000000ull
;
3817 case 0x51: /* risblg */
3820 pmask
= 0x00000000ffffffffull
;
3823 g_assert_not_reached();
3826 /* MASK is the set of bits to be inserted from R2.
3827 Take care for I3/I4 wraparound. */
3830 mask
^= pmask
>> i4
>> 1;
3832 mask
|= ~(pmask
>> i4
>> 1);
3836 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3837 insns, we need to keep the other half of the register. */
3838 imask
= ~mask
| ~pmask
;
3846 if (s
->fields
.op2
== 0x5d) {
3850 /* In some cases we can implement this with extract. */
3851 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3852 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3856 /* In some cases we can implement this with deposit. */
3857 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3858 /* Note that we rotate the bits to be inserted to the lsb, not to
3859 the position as described in the PoO. */
3860 rot
= (rot
- pos
) & 63;
3865 /* Rotate the input as necessary. */
3866 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3868 /* Insert the selected bits into the output. */
3871 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3873 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3875 } else if (imask
== 0) {
3876 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3878 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3879 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3880 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3885 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3887 int i3
= get_field(s
, i3
);
3888 int i4
= get_field(s
, i4
);
3889 int i5
= get_field(s
, i5
);
3892 /* If this is a test-only form, arrange to discard the result. */
3894 o
->out
= tcg_temp_new_i64();
3902 /* MASK is the set of bits to be operated on from R2.
3903 Take care for I3/I4 wraparound. */
3906 mask
^= ~0ull >> i4
>> 1;
3908 mask
|= ~(~0ull >> i4
>> 1);
3911 /* Rotate the input as necessary. */
3912 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3915 switch (s
->fields
.op2
) {
3916 case 0x54: /* AND */
3917 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3918 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3921 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3922 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3924 case 0x57: /* XOR */
3925 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3926 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3933 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3934 set_cc_nz_u64(s
, cc_dst
);
3938 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3940 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3944 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3946 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3950 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3952 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3956 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3958 TCGv_i32 t1
= tcg_temp_new_i32();
3959 TCGv_i32 t2
= tcg_temp_new_i32();
3960 TCGv_i32 to
= tcg_temp_new_i32();
3961 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3962 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3963 tcg_gen_rotl_i32(to
, t1
, t2
);
3964 tcg_gen_extu_i32_i64(o
->out
, to
);
3965 tcg_temp_free_i32(t1
);
3966 tcg_temp_free_i32(t2
);
3967 tcg_temp_free_i32(to
);
3971 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3973 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3977 #ifndef CONFIG_USER_ONLY
3978 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3980 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3985 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3987 gen_helper_sacf(cpu_env
, o
->in2
);
3988 /* Addressing mode has changed, so end the block. */
3989 return DISAS_PC_STALE
;
3993 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3995 int sam
= s
->insn
->data
;
4011 /* Bizarre but true, we check the address of the current insn for the
4012 specification exception, not the next to be executed. Thus the PoO
4013 documents that Bad Things Happen two bytes before the end. */
4014 if (s
->base
.pc_next
& ~mask
) {
4015 gen_program_exception(s
, PGM_SPECIFICATION
);
4016 return DISAS_NORETURN
;
4020 tsam
= tcg_const_i64(sam
);
4021 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
4022 tcg_temp_free_i64(tsam
);
4024 /* Always exit the TB, since we (may have) changed execution mode. */
4025 return DISAS_PC_STALE
;
4028 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
4030 int r1
= get_field(s
, r1
);
4031 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
4035 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
4037 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4041 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
4043 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4047 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
4049 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
4050 return_low128(o
->out2
);
4054 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
4056 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
4060 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
4062 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
4066 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
4068 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4069 return_low128(o
->out2
);
4073 #ifndef CONFIG_USER_ONLY
4074 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
4076 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
4081 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
4083 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4084 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4085 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
4087 tcg_temp_free_i32(r1
);
4088 tcg_temp_free_i32(r3
);
4093 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
4100 disas_jcc(s
, &c
, get_field(s
, m3
));
4102 /* We want to store when the condition is fulfilled, so branch
4103 out when it's not */
4104 c
.cond
= tcg_invert_cond(c
.cond
);
4106 lab
= gen_new_label();
4108 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
4110 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
4114 r1
= get_field(s
, r1
);
4115 a
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
4116 switch (s
->insn
->data
) {
4118 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
4121 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
4123 case 2: /* STOCFH */
4124 h
= tcg_temp_new_i64();
4125 tcg_gen_shri_i64(h
, regs
[r1
], 32);
4126 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
4127 tcg_temp_free_i64(h
);
4130 g_assert_not_reached();
4132 tcg_temp_free_i64(a
);
4138 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
4140 uint64_t sign
= 1ull << s
->insn
->data
;
4141 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
4142 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
4143 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4144 /* The arithmetic left shift is curious in that it does not affect
4145 the sign bit. Copy that over from the source unchanged. */
4146 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
4147 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
4148 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
4152 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
4154 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4158 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
4160 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
4164 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
4166 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
4170 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
4172 gen_helper_sfpc(cpu_env
, o
->in2
);
4176 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
4178 gen_helper_sfas(cpu_env
, o
->in2
);
4182 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
4184 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4185 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x3ull
);
4186 gen_helper_srnm(cpu_env
, o
->addr1
);
4190 static DisasJumpType
op_srnmb(DisasContext
*s
, DisasOps
*o
)
4192 /* Bits 0-55 are are ignored. */
4193 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0xffull
);
4194 gen_helper_srnm(cpu_env
, o
->addr1
);
4198 static DisasJumpType
op_srnmt(DisasContext
*s
, DisasOps
*o
)
4200 TCGv_i64 tmp
= tcg_temp_new_i64();
4202 /* Bits other than 61-63 are ignored. */
4203 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x7ull
);
4205 /* No need to call a helper, we don't implement dfp */
4206 tcg_gen_ld32u_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4207 tcg_gen_deposit_i64(tmp
, tmp
, o
->addr1
, 4, 3);
4208 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4210 tcg_temp_free_i64(tmp
);
4214 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
4216 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
4217 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
4220 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
4221 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
4225 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
4227 int b1
= get_field(s
, b1
);
4228 int d1
= get_field(s
, d1
);
4229 int b2
= get_field(s
, b2
);
4230 int d2
= get_field(s
, d2
);
4231 int r3
= get_field(s
, r3
);
4232 TCGv_i64 tmp
= tcg_temp_new_i64();
4234 /* fetch all operands first */
4235 o
->in1
= tcg_temp_new_i64();
4236 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
4237 o
->in2
= tcg_temp_new_i64();
4238 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
4239 o
->addr1
= get_address(s
, 0, r3
, 0);
4241 /* load the third operand into r3 before modifying anything */
4242 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
4244 /* subtract CPU timer from first operand and store in GR0 */
4245 gen_helper_stpt(tmp
, cpu_env
);
4246 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
4248 /* store second operand in GR1 */
4249 tcg_gen_mov_i64(regs
[1], o
->in2
);
4251 tcg_temp_free_i64(tmp
);
4255 #ifndef CONFIG_USER_ONLY
4256 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
4258 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
4259 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
4263 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
4265 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
4269 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
4271 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
4272 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4273 return DISAS_PC_STALE_NOCHAIN
;
4276 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
4278 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
4283 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4285 gen_helper_stck(o
->out
, cpu_env
);
4286 /* ??? We don't implement clock states. */
4287 gen_op_movi_cc(s
, 0);
4291 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4293 TCGv_i64 c1
= tcg_temp_new_i64();
4294 TCGv_i64 c2
= tcg_temp_new_i64();
4295 TCGv_i64 todpr
= tcg_temp_new_i64();
4296 gen_helper_stck(c1
, cpu_env
);
4297 /* 16 bit value store in an uint32_t (only valid bits set) */
4298 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4299 /* Shift the 64-bit value into its place as a zero-extended
4300 104-bit value. Note that "bit positions 64-103 are always
4301 non-zero so that they compare differently to STCK"; we set
4302 the least significant bit to 1. */
4303 tcg_gen_shli_i64(c2
, c1
, 56);
4304 tcg_gen_shri_i64(c1
, c1
, 8);
4305 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4306 tcg_gen_or_i64(c2
, c2
, todpr
);
4307 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4308 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4309 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4310 tcg_temp_free_i64(c1
);
4311 tcg_temp_free_i64(c2
);
4312 tcg_temp_free_i64(todpr
);
4313 /* ??? We don't implement clock states. */
4314 gen_op_movi_cc(s
, 0);
4318 #ifndef CONFIG_USER_ONLY
4319 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4321 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4322 gen_helper_sck(cc_op
, cpu_env
, o
->in1
);
4327 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4329 gen_helper_sckc(cpu_env
, o
->in2
);
4333 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4335 gen_helper_sckpf(cpu_env
, regs
[0]);
4339 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4341 gen_helper_stckc(o
->out
, cpu_env
);
4345 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4347 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4348 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4349 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4350 tcg_temp_free_i32(r1
);
4351 tcg_temp_free_i32(r3
);
4355 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4357 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4358 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4359 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4360 tcg_temp_free_i32(r1
);
4361 tcg_temp_free_i32(r3
);
4365 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4367 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4371 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4373 gen_helper_spt(cpu_env
, o
->in2
);
4377 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4379 gen_helper_stfl(cpu_env
);
4383 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4385 gen_helper_stpt(o
->out
, cpu_env
);
4389 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4391 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4396 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4398 gen_helper_spx(cpu_env
, o
->in2
);
4402 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4404 gen_helper_xsch(cpu_env
, regs
[1]);
4409 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4411 gen_helper_csch(cpu_env
, regs
[1]);
4416 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4418 gen_helper_hsch(cpu_env
, regs
[1]);
4423 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4425 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4430 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4432 gen_helper_rchp(cpu_env
, regs
[1]);
4437 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4439 gen_helper_rsch(cpu_env
, regs
[1]);
4444 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4446 gen_helper_sal(cpu_env
, regs
[1]);
4450 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4452 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4456 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4458 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4459 gen_op_movi_cc(s
, 3);
4463 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4465 /* The instruction is suppressed if not provided. */
4469 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4471 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4476 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4478 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4483 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4485 gen_helper_stcrw(cpu_env
, o
->in2
);
4490 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4492 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4497 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4499 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4504 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4506 gen_helper_chsc(cpu_env
, o
->in2
);
4511 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4513 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4514 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4518 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4520 uint64_t i2
= get_field(s
, i2
);
4523 /* It is important to do what the instruction name says: STORE THEN.
4524 If we let the output hook perform the store then if we fault and
4525 restart, we'll have the wrong SYSTEM MASK in place. */
4526 t
= tcg_temp_new_i64();
4527 tcg_gen_shri_i64(t
, psw_mask
, 56);
4528 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4529 tcg_temp_free_i64(t
);
4531 if (s
->fields
.op
== 0xac) {
4532 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4533 (i2
<< 56) | 0x00ffffffffffffffull
);
4535 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4538 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4539 return DISAS_PC_STALE_NOCHAIN
;
4542 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4544 o
->addr1
= get_address(s
, 0, get_field(s
, r2
), 0);
4545 tcg_gen_qemu_st_tl(o
->in1
, o
->addr1
, MMU_REAL_IDX
, s
->insn
->data
);
4547 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
4549 gen_helper_per_store_real(cpu_env
);
4555 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4557 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4562 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4564 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4568 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4570 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4574 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4576 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4580 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4582 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4586 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4588 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4589 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4590 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4591 tcg_temp_free_i32(r1
);
4592 tcg_temp_free_i32(r3
);
4596 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4598 int m3
= get_field(s
, m3
);
4599 int pos
, base
= s
->insn
->data
;
4600 TCGv_i64 tmp
= tcg_temp_new_i64();
4602 pos
= base
+ ctz32(m3
) * 8;
4605 /* Effectively a 32-bit store. */
4606 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4607 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4613 /* Effectively a 16-bit store. */
4614 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4615 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4622 /* Effectively an 8-bit store. */
4623 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4624 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4628 /* This is going to be a sequence of shifts and stores. */
4629 pos
= base
+ 32 - 8;
4632 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4633 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4634 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4636 m3
= (m3
<< 1) & 0xf;
4641 tcg_temp_free_i64(tmp
);
4645 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4647 int r1
= get_field(s
, r1
);
4648 int r3
= get_field(s
, r3
);
4649 int size
= s
->insn
->data
;
4650 TCGv_i64 tsize
= tcg_const_i64(size
);
4654 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4656 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4661 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4665 tcg_temp_free_i64(tsize
);
4669 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4671 int r1
= get_field(s
, r1
);
4672 int r3
= get_field(s
, r3
);
4673 TCGv_i64 t
= tcg_temp_new_i64();
4674 TCGv_i64 t4
= tcg_const_i64(4);
4675 TCGv_i64 t32
= tcg_const_i64(32);
4678 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4679 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4683 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4687 tcg_temp_free_i64(t
);
4688 tcg_temp_free_i64(t4
);
4689 tcg_temp_free_i64(t32
);
4693 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4695 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4696 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4697 } else if (HAVE_ATOMIC128
) {
4698 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4700 gen_helper_exit_atomic(cpu_env
);
4701 return DISAS_NORETURN
;
4706 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4708 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4709 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4711 gen_helper_srst(cpu_env
, r1
, r2
);
4713 tcg_temp_free_i32(r1
);
4714 tcg_temp_free_i32(r2
);
4719 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4721 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4722 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4724 gen_helper_srstu(cpu_env
, r1
, r2
);
4726 tcg_temp_free_i32(r1
);
4727 tcg_temp_free_i32(r2
);
4732 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4734 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4738 static DisasJumpType
op_subu64(DisasContext
*s
, DisasOps
*o
)
4740 tcg_gen_movi_i64(cc_src
, 0);
4741 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
4745 /* Compute borrow (0, -1) into cc_src. */
4746 static void compute_borrow(DisasContext
*s
)
4750 /* The borrow value is already in cc_src (0,-1). */
4756 /* The carry flag is the msb of CC; compute into cc_src. */
4757 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
4758 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
4761 /* Convert carry (1,0) to borrow (0,-1). */
4762 tcg_gen_subi_i64(cc_src
, cc_src
, 1);
4767 static DisasJumpType
op_subb32(DisasContext
*s
, DisasOps
*o
)
4771 /* Borrow is {0, -1}, so add to subtract. */
4772 tcg_gen_add_i64(o
->out
, o
->in1
, cc_src
);
4773 tcg_gen_sub_i64(o
->out
, o
->out
, o
->in2
);
4777 static DisasJumpType
op_subb64(DisasContext
*s
, DisasOps
*o
)
4782 * Borrow is {0, -1}, so add to subtract; replicate the
4783 * borrow input to produce 128-bit -1 for the addition.
4785 TCGv_i64 zero
= tcg_const_i64(0);
4786 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, cc_src
);
4787 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
4788 tcg_temp_free_i64(zero
);
4793 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4800 t
= tcg_const_i32(get_field(s
, i1
) & 0xff);
4801 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4802 tcg_temp_free_i32(t
);
4804 t
= tcg_const_i32(s
->ilen
);
4805 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4806 tcg_temp_free_i32(t
);
4808 gen_exception(EXCP_SVC
);
4809 return DISAS_NORETURN
;
4812 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4816 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4817 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4818 gen_op_movi_cc(s
, cc
);
4822 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4824 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4829 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4831 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4836 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4838 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4843 #ifndef CONFIG_USER_ONLY
4845 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4847 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4852 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4854 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4861 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4863 TCGv_i32 l1
= tcg_const_i32(get_field(s
, l1
) + 1);
4864 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4865 tcg_temp_free_i32(l1
);
4870 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4872 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4873 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4874 tcg_temp_free_i32(l
);
4879 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4881 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4882 return_low128(o
->out2
);
4887 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4889 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4890 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4891 tcg_temp_free_i32(l
);
4896 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4898 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4899 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4900 tcg_temp_free_i32(l
);
4905 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4907 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4908 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4909 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4910 TCGv_i32 tst
= tcg_temp_new_i32();
4911 int m3
= get_field(s
, m3
);
4913 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4917 tcg_gen_movi_i32(tst
, -1);
4919 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4920 if (s
->insn
->opc
& 3) {
4921 tcg_gen_ext8u_i32(tst
, tst
);
4923 tcg_gen_ext16u_i32(tst
, tst
);
4926 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4928 tcg_temp_free_i32(r1
);
4929 tcg_temp_free_i32(r2
);
4930 tcg_temp_free_i32(sizes
);
4931 tcg_temp_free_i32(tst
);
4936 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4938 TCGv_i32 t1
= tcg_const_i32(0xff);
4939 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4940 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4941 tcg_temp_free_i32(t1
);
4946 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4948 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4949 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4950 tcg_temp_free_i32(l
);
4954 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4956 int l1
= get_field(s
, l1
) + 1;
4959 /* The length must not exceed 32 bytes. */
4961 gen_program_exception(s
, PGM_SPECIFICATION
);
4962 return DISAS_NORETURN
;
4964 l
= tcg_const_i32(l1
);
4965 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4966 tcg_temp_free_i32(l
);
4971 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4973 int l1
= get_field(s
, l1
) + 1;
4976 /* The length must be even and should not exceed 64 bytes. */
4977 if ((l1
& 1) || (l1
> 64)) {
4978 gen_program_exception(s
, PGM_SPECIFICATION
);
4979 return DISAS_NORETURN
;
4981 l
= tcg_const_i32(l1
);
4982 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4983 tcg_temp_free_i32(l
);
4989 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4991 int d1
= get_field(s
, d1
);
4992 int d2
= get_field(s
, d2
);
4993 int b1
= get_field(s
, b1
);
4994 int b2
= get_field(s
, b2
);
4995 int l
= get_field(s
, l1
);
4998 o
->addr1
= get_address(s
, 0, b1
, d1
);
5000 /* If the addresses are identical, this is a store/memset of zero. */
5001 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
5002 o
->in2
= tcg_const_i64(0);
5006 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
5009 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
5013 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
5016 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
5020 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
5023 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
5027 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
5029 gen_op_movi_cc(s
, 0);
5033 /* But in general we'll defer to a helper. */
5034 o
->in2
= get_address(s
, 0, b2
, d2
);
5035 t32
= tcg_const_i32(l
);
5036 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
5037 tcg_temp_free_i32(t32
);
5042 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
5044 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5048 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
5050 int shift
= s
->insn
->data
& 0xff;
5051 int size
= s
->insn
->data
>> 8;
5052 uint64_t mask
= ((1ull << size
) - 1) << shift
;
5055 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
5056 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5058 /* Produce the CC from only the bits manipulated. */
5059 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
5060 set_cc_nz_u64(s
, cc_dst
);
5064 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
5066 o
->in1
= tcg_temp_new_i64();
5068 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5069 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5071 /* Perform the atomic operation in memory. */
5072 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
5076 /* Recompute also for atomic case: needed for setting CC. */
5077 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5079 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5080 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5085 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
5087 o
->out
= tcg_const_i64(0);
5091 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
5093 o
->out
= tcg_const_i64(0);
5099 #ifndef CONFIG_USER_ONLY
5100 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
5102 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5104 gen_helper_clp(cpu_env
, r2
);
5105 tcg_temp_free_i32(r2
);
5110 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
5112 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5113 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5115 gen_helper_pcilg(cpu_env
, r1
, r2
);
5116 tcg_temp_free_i32(r1
);
5117 tcg_temp_free_i32(r2
);
5122 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
5124 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5125 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5127 gen_helper_pcistg(cpu_env
, r1
, r2
);
5128 tcg_temp_free_i32(r1
);
5129 tcg_temp_free_i32(r2
);
5134 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
5136 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5137 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5139 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5140 tcg_temp_free_i32(ar
);
5141 tcg_temp_free_i32(r1
);
5146 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
5148 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
5152 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
5154 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5155 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5157 gen_helper_rpcit(cpu_env
, r1
, r2
);
5158 tcg_temp_free_i32(r1
);
5159 tcg_temp_free_i32(r2
);
5164 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
5166 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5167 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
5168 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5170 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
5171 tcg_temp_free_i32(ar
);
5172 tcg_temp_free_i32(r1
);
5173 tcg_temp_free_i32(r3
);
5178 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
5180 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5181 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5183 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5184 tcg_temp_free_i32(ar
);
5185 tcg_temp_free_i32(r1
);
5191 #include "translate_vx.c.inc"
5193 /* ====================================================================== */
5194 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5195 the original inputs), update the various cc data structures in order to
5196 be able to compute the new condition code. */
5198 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
5200 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
5203 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
5205 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
5208 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
5210 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
5213 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
5215 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
5218 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
5220 tcg_gen_shri_i64(cc_src
, o
->out
, 32);
5221 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5222 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, cc_dst
);
5225 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
5227 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, o
->out
);
5230 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
5232 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
5235 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
5237 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
5240 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
5242 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
5245 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
5247 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
5250 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
5252 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
5255 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
5257 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
5260 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
5262 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
5265 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
5267 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
5270 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
5272 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
5275 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
5277 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
5280 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
5282 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
5285 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
5287 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5288 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
5291 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5293 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5296 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5298 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5301 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5303 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5306 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5308 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5311 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5313 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5316 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5318 tcg_gen_sari_i64(cc_src
, o
->out
, 32);
5319 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5320 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, cc_dst
);
5323 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5325 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, o
->out
);
5328 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5330 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5333 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5335 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5338 static void cout_muls32(DisasContext
*s
, DisasOps
*o
)
5340 gen_op_update1_cc_i64(s
, CC_OP_MULS_32
, o
->out
);
5343 static void cout_muls64(DisasContext
*s
, DisasOps
*o
)
5345 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5346 gen_op_update2_cc_i64(s
, CC_OP_MULS_64
, o
->out
, o
->out2
);
5349 /* ====================================================================== */
5350 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5351 with the TCG register to which we will write. Used in combination with
5352 the "wout" generators, in some cases we need a new temporary, and in
5353 some cases we can write to a TCG global. */
5355 static void prep_new(DisasContext
*s
, DisasOps
*o
)
5357 o
->out
= tcg_temp_new_i64();
5359 #define SPEC_prep_new 0
5361 static void prep_new_P(DisasContext
*s
, DisasOps
*o
)
5363 o
->out
= tcg_temp_new_i64();
5364 o
->out2
= tcg_temp_new_i64();
5366 #define SPEC_prep_new_P 0
5368 static void prep_r1(DisasContext
*s
, DisasOps
*o
)
5370 o
->out
= regs
[get_field(s
, r1
)];
5373 #define SPEC_prep_r1 0
5375 static void prep_r1_P(DisasContext
*s
, DisasOps
*o
)
5377 int r1
= get_field(s
, r1
);
5379 o
->out2
= regs
[r1
+ 1];
5380 o
->g_out
= o
->g_out2
= true;
5382 #define SPEC_prep_r1_P SPEC_r1_even
5384 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5385 static void prep_x1(DisasContext
*s
, DisasOps
*o
)
5387 o
->out
= load_freg(get_field(s
, r1
));
5388 o
->out2
= load_freg(get_field(s
, r1
) + 2);
5390 #define SPEC_prep_x1 SPEC_r1_f128
5392 /* ====================================================================== */
5393 /* The "Write OUTput" generators. These generally perform some non-trivial
5394 copy of data to TCG globals, or to main memory. The trivial cases are
5395 generally handled by having a "prep" generator install the TCG global
5396 as the destination of the operation. */
5398 static void wout_r1(DisasContext
*s
, DisasOps
*o
)
5400 store_reg(get_field(s
, r1
), o
->out
);
5402 #define SPEC_wout_r1 0
5404 static void wout_out2_r1(DisasContext
*s
, DisasOps
*o
)
5406 store_reg(get_field(s
, r1
), o
->out2
);
5408 #define SPEC_wout_out2_r1 0
5410 static void wout_r1_8(DisasContext
*s
, DisasOps
*o
)
5412 int r1
= get_field(s
, r1
);
5413 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5415 #define SPEC_wout_r1_8 0
5417 static void wout_r1_16(DisasContext
*s
, DisasOps
*o
)
5419 int r1
= get_field(s
, r1
);
5420 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5422 #define SPEC_wout_r1_16 0
5424 static void wout_r1_32(DisasContext
*s
, DisasOps
*o
)
5426 store_reg32_i64(get_field(s
, r1
), o
->out
);
5428 #define SPEC_wout_r1_32 0
5430 static void wout_r1_32h(DisasContext
*s
, DisasOps
*o
)
5432 store_reg32h_i64(get_field(s
, r1
), o
->out
);
5434 #define SPEC_wout_r1_32h 0
5436 static void wout_r1_P32(DisasContext
*s
, DisasOps
*o
)
5438 int r1
= get_field(s
, r1
);
5439 store_reg32_i64(r1
, o
->out
);
5440 store_reg32_i64(r1
+ 1, o
->out2
);
5442 #define SPEC_wout_r1_P32 SPEC_r1_even
5444 static void wout_r1_D32(DisasContext
*s
, DisasOps
*o
)
5446 int r1
= get_field(s
, r1
);
5447 store_reg32_i64(r1
+ 1, o
->out
);
5448 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5449 store_reg32_i64(r1
, o
->out
);
5451 #define SPEC_wout_r1_D32 SPEC_r1_even
5453 static void wout_r3_P32(DisasContext
*s
, DisasOps
*o
)
5455 int r3
= get_field(s
, r3
);
5456 store_reg32_i64(r3
, o
->out
);
5457 store_reg32_i64(r3
+ 1, o
->out2
);
5459 #define SPEC_wout_r3_P32 SPEC_r3_even
5461 static void wout_r3_P64(DisasContext
*s
, DisasOps
*o
)
5463 int r3
= get_field(s
, r3
);
5464 store_reg(r3
, o
->out
);
5465 store_reg(r3
+ 1, o
->out2
);
5467 #define SPEC_wout_r3_P64 SPEC_r3_even
5469 static void wout_e1(DisasContext
*s
, DisasOps
*o
)
5471 store_freg32_i64(get_field(s
, r1
), o
->out
);
5473 #define SPEC_wout_e1 0
5475 static void wout_f1(DisasContext
*s
, DisasOps
*o
)
5477 store_freg(get_field(s
, r1
), o
->out
);
5479 #define SPEC_wout_f1 0
5481 static void wout_x1(DisasContext
*s
, DisasOps
*o
)
5483 int f1
= get_field(s
, r1
);
5484 store_freg(f1
, o
->out
);
5485 store_freg(f1
+ 2, o
->out2
);
5487 #define SPEC_wout_x1 SPEC_r1_f128
5489 static void wout_cond_r1r2_32(DisasContext
*s
, DisasOps
*o
)
5491 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5492 store_reg32_i64(get_field(s
, r1
), o
->out
);
5495 #define SPEC_wout_cond_r1r2_32 0
5497 static void wout_cond_e1e2(DisasContext
*s
, DisasOps
*o
)
5499 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5500 store_freg32_i64(get_field(s
, r1
), o
->out
);
5503 #define SPEC_wout_cond_e1e2 0
5505 static void wout_m1_8(DisasContext
*s
, DisasOps
*o
)
5507 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5509 #define SPEC_wout_m1_8 0
5511 static void wout_m1_16(DisasContext
*s
, DisasOps
*o
)
5513 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5515 #define SPEC_wout_m1_16 0
5517 #ifndef CONFIG_USER_ONLY
5518 static void wout_m1_16a(DisasContext
*s
, DisasOps
*o
)
5520 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5522 #define SPEC_wout_m1_16a 0
5525 static void wout_m1_32(DisasContext
*s
, DisasOps
*o
)
5527 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5529 #define SPEC_wout_m1_32 0
5531 #ifndef CONFIG_USER_ONLY
5532 static void wout_m1_32a(DisasContext
*s
, DisasOps
*o
)
5534 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5536 #define SPEC_wout_m1_32a 0
5539 static void wout_m1_64(DisasContext
*s
, DisasOps
*o
)
5541 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5543 #define SPEC_wout_m1_64 0
5545 #ifndef CONFIG_USER_ONLY
5546 static void wout_m1_64a(DisasContext
*s
, DisasOps
*o
)
5548 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5550 #define SPEC_wout_m1_64a 0
5553 static void wout_m2_32(DisasContext
*s
, DisasOps
*o
)
5555 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5557 #define SPEC_wout_m2_32 0
5559 static void wout_in2_r1(DisasContext
*s
, DisasOps
*o
)
5561 store_reg(get_field(s
, r1
), o
->in2
);
5563 #define SPEC_wout_in2_r1 0
5565 static void wout_in2_r1_32(DisasContext
*s
, DisasOps
*o
)
5567 store_reg32_i64(get_field(s
, r1
), o
->in2
);
5569 #define SPEC_wout_in2_r1_32 0
5571 /* ====================================================================== */
5572 /* The "INput 1" generators. These load the first operand to an insn. */
5574 static void in1_r1(DisasContext
*s
, DisasOps
*o
)
5576 o
->in1
= load_reg(get_field(s
, r1
));
5578 #define SPEC_in1_r1 0
5580 static void in1_r1_o(DisasContext
*s
, DisasOps
*o
)
5582 o
->in1
= regs
[get_field(s
, r1
)];
5585 #define SPEC_in1_r1_o 0
5587 static void in1_r1_32s(DisasContext
*s
, DisasOps
*o
)
5589 o
->in1
= tcg_temp_new_i64();
5590 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5592 #define SPEC_in1_r1_32s 0
5594 static void in1_r1_32u(DisasContext
*s
, DisasOps
*o
)
5596 o
->in1
= tcg_temp_new_i64();
5597 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5599 #define SPEC_in1_r1_32u 0
5601 static void in1_r1_sr32(DisasContext
*s
, DisasOps
*o
)
5603 o
->in1
= tcg_temp_new_i64();
5604 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r1
)], 32);
5606 #define SPEC_in1_r1_sr32 0
5608 static void in1_r1p1(DisasContext
*s
, DisasOps
*o
)
5610 o
->in1
= load_reg(get_field(s
, r1
) + 1);
5612 #define SPEC_in1_r1p1 SPEC_r1_even
5614 static void in1_r1p1_o(DisasContext
*s
, DisasOps
*o
)
5616 o
->in1
= regs
[get_field(s
, r1
) + 1];
5619 #define SPEC_in1_r1p1_o SPEC_r1_even
5621 static void in1_r1p1_32s(DisasContext
*s
, DisasOps
*o
)
5623 o
->in1
= tcg_temp_new_i64();
5624 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5626 #define SPEC_in1_r1p1_32s SPEC_r1_even
5628 static void in1_r1p1_32u(DisasContext
*s
, DisasOps
*o
)
5630 o
->in1
= tcg_temp_new_i64();
5631 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5633 #define SPEC_in1_r1p1_32u SPEC_r1_even
5635 static void in1_r1_D32(DisasContext
*s
, DisasOps
*o
)
5637 int r1
= get_field(s
, r1
);
5638 o
->in1
= tcg_temp_new_i64();
5639 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5641 #define SPEC_in1_r1_D32 SPEC_r1_even
5643 static void in1_r2(DisasContext
*s
, DisasOps
*o
)
5645 o
->in1
= load_reg(get_field(s
, r2
));
5647 #define SPEC_in1_r2 0
5649 static void in1_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5651 o
->in1
= tcg_temp_new_i64();
5652 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r2
)], 32);
5654 #define SPEC_in1_r2_sr32 0
5656 static void in1_r2_32u(DisasContext
*s
, DisasOps
*o
)
5658 o
->in1
= tcg_temp_new_i64();
5659 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r2
)]);
5661 #define SPEC_in1_r2_32u 0
5663 static void in1_r3(DisasContext
*s
, DisasOps
*o
)
5665 o
->in1
= load_reg(get_field(s
, r3
));
5667 #define SPEC_in1_r3 0
5669 static void in1_r3_o(DisasContext
*s
, DisasOps
*o
)
5671 o
->in1
= regs
[get_field(s
, r3
)];
5674 #define SPEC_in1_r3_o 0
5676 static void in1_r3_32s(DisasContext
*s
, DisasOps
*o
)
5678 o
->in1
= tcg_temp_new_i64();
5679 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5681 #define SPEC_in1_r3_32s 0
5683 static void in1_r3_32u(DisasContext
*s
, DisasOps
*o
)
5685 o
->in1
= tcg_temp_new_i64();
5686 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5688 #define SPEC_in1_r3_32u 0
5690 static void in1_r3_D32(DisasContext
*s
, DisasOps
*o
)
5692 int r3
= get_field(s
, r3
);
5693 o
->in1
= tcg_temp_new_i64();
5694 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5696 #define SPEC_in1_r3_D32 SPEC_r3_even
5698 static void in1_e1(DisasContext
*s
, DisasOps
*o
)
5700 o
->in1
= load_freg32_i64(get_field(s
, r1
));
5702 #define SPEC_in1_e1 0
5704 static void in1_f1(DisasContext
*s
, DisasOps
*o
)
5706 o
->in1
= load_freg(get_field(s
, r1
));
5708 #define SPEC_in1_f1 0
5710 /* Load the high double word of an extended (128-bit) format FP number */
5711 static void in1_x2h(DisasContext
*s
, DisasOps
*o
)
5713 o
->in1
= load_freg(get_field(s
, r2
));
5715 #define SPEC_in1_x2h SPEC_r2_f128
5717 static void in1_f3(DisasContext
*s
, DisasOps
*o
)
5719 o
->in1
= load_freg(get_field(s
, r3
));
5721 #define SPEC_in1_f3 0
5723 static void in1_la1(DisasContext
*s
, DisasOps
*o
)
5725 o
->addr1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
5727 #define SPEC_in1_la1 0
5729 static void in1_la2(DisasContext
*s
, DisasOps
*o
)
5731 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5732 o
->addr1
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5734 #define SPEC_in1_la2 0
5736 static void in1_m1_8u(DisasContext
*s
, DisasOps
*o
)
5739 o
->in1
= tcg_temp_new_i64();
5740 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5742 #define SPEC_in1_m1_8u 0
5744 static void in1_m1_16s(DisasContext
*s
, DisasOps
*o
)
5747 o
->in1
= tcg_temp_new_i64();
5748 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5750 #define SPEC_in1_m1_16s 0
5752 static void in1_m1_16u(DisasContext
*s
, DisasOps
*o
)
5755 o
->in1
= tcg_temp_new_i64();
5756 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5758 #define SPEC_in1_m1_16u 0
5760 static void in1_m1_32s(DisasContext
*s
, DisasOps
*o
)
5763 o
->in1
= tcg_temp_new_i64();
5764 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5766 #define SPEC_in1_m1_32s 0
5768 static void in1_m1_32u(DisasContext
*s
, DisasOps
*o
)
5771 o
->in1
= tcg_temp_new_i64();
5772 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5774 #define SPEC_in1_m1_32u 0
5776 static void in1_m1_64(DisasContext
*s
, DisasOps
*o
)
5779 o
->in1
= tcg_temp_new_i64();
5780 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5782 #define SPEC_in1_m1_64 0
5784 /* ====================================================================== */
5785 /* The "INput 2" generators. These load the second operand to an insn. */
5787 static void in2_r1_o(DisasContext
*s
, DisasOps
*o
)
5789 o
->in2
= regs
[get_field(s
, r1
)];
5792 #define SPEC_in2_r1_o 0
5794 static void in2_r1_16u(DisasContext
*s
, DisasOps
*o
)
5796 o
->in2
= tcg_temp_new_i64();
5797 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5799 #define SPEC_in2_r1_16u 0
5801 static void in2_r1_32u(DisasContext
*s
, DisasOps
*o
)
5803 o
->in2
= tcg_temp_new_i64();
5804 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5806 #define SPEC_in2_r1_32u 0
5808 static void in2_r1_D32(DisasContext
*s
, DisasOps
*o
)
5810 int r1
= get_field(s
, r1
);
5811 o
->in2
= tcg_temp_new_i64();
5812 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5814 #define SPEC_in2_r1_D32 SPEC_r1_even
5816 static void in2_r2(DisasContext
*s
, DisasOps
*o
)
5818 o
->in2
= load_reg(get_field(s
, r2
));
5820 #define SPEC_in2_r2 0
5822 static void in2_r2_o(DisasContext
*s
, DisasOps
*o
)
5824 o
->in2
= regs
[get_field(s
, r2
)];
5827 #define SPEC_in2_r2_o 0
5829 static void in2_r2_nz(DisasContext
*s
, DisasOps
*o
)
5831 int r2
= get_field(s
, r2
);
5833 o
->in2
= load_reg(r2
);
5836 #define SPEC_in2_r2_nz 0
5838 static void in2_r2_8s(DisasContext
*s
, DisasOps
*o
)
5840 o
->in2
= tcg_temp_new_i64();
5841 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5843 #define SPEC_in2_r2_8s 0
5845 static void in2_r2_8u(DisasContext
*s
, DisasOps
*o
)
5847 o
->in2
= tcg_temp_new_i64();
5848 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5850 #define SPEC_in2_r2_8u 0
5852 static void in2_r2_16s(DisasContext
*s
, DisasOps
*o
)
5854 o
->in2
= tcg_temp_new_i64();
5855 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5857 #define SPEC_in2_r2_16s 0
5859 static void in2_r2_16u(DisasContext
*s
, DisasOps
*o
)
5861 o
->in2
= tcg_temp_new_i64();
5862 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5864 #define SPEC_in2_r2_16u 0
5866 static void in2_r3(DisasContext
*s
, DisasOps
*o
)
5868 o
->in2
= load_reg(get_field(s
, r3
));
5870 #define SPEC_in2_r3 0
5872 static void in2_r3_sr32(DisasContext
*s
, DisasOps
*o
)
5874 o
->in2
= tcg_temp_new_i64();
5875 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r3
)], 32);
5877 #define SPEC_in2_r3_sr32 0
5879 static void in2_r3_32u(DisasContext
*s
, DisasOps
*o
)
5881 o
->in2
= tcg_temp_new_i64();
5882 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r3
)]);
5884 #define SPEC_in2_r3_32u 0
5886 static void in2_r2_32s(DisasContext
*s
, DisasOps
*o
)
5888 o
->in2
= tcg_temp_new_i64();
5889 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5891 #define SPEC_in2_r2_32s 0
5893 static void in2_r2_32u(DisasContext
*s
, DisasOps
*o
)
5895 o
->in2
= tcg_temp_new_i64();
5896 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5898 #define SPEC_in2_r2_32u 0
5900 static void in2_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5902 o
->in2
= tcg_temp_new_i64();
5903 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r2
)], 32);
5905 #define SPEC_in2_r2_sr32 0
5907 static void in2_e2(DisasContext
*s
, DisasOps
*o
)
5909 o
->in2
= load_freg32_i64(get_field(s
, r2
));
5911 #define SPEC_in2_e2 0
5913 static void in2_f2(DisasContext
*s
, DisasOps
*o
)
5915 o
->in2
= load_freg(get_field(s
, r2
));
5917 #define SPEC_in2_f2 0
5919 /* Load the low double word of an extended (128-bit) format FP number */
5920 static void in2_x2l(DisasContext
*s
, DisasOps
*o
)
5922 o
->in2
= load_freg(get_field(s
, r2
) + 2);
5924 #define SPEC_in2_x2l SPEC_r2_f128
5926 static void in2_ra2(DisasContext
*s
, DisasOps
*o
)
5928 o
->in2
= get_address(s
, 0, get_field(s
, r2
), 0);
5930 #define SPEC_in2_ra2 0
5932 static void in2_a2(DisasContext
*s
, DisasOps
*o
)
5934 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5935 o
->in2
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5937 #define SPEC_in2_a2 0
5939 static void in2_ri2(DisasContext
*s
, DisasOps
*o
)
5941 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(s
, i2
) * 2);
5943 #define SPEC_in2_ri2 0
5945 static void in2_sh32(DisasContext
*s
, DisasOps
*o
)
5947 help_l2_shift(s
, o
, 31);
5949 #define SPEC_in2_sh32 0
5951 static void in2_sh64(DisasContext
*s
, DisasOps
*o
)
5953 help_l2_shift(s
, o
, 63);
5955 #define SPEC_in2_sh64 0
5957 static void in2_m2_8u(DisasContext
*s
, DisasOps
*o
)
5960 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5962 #define SPEC_in2_m2_8u 0
5964 static void in2_m2_16s(DisasContext
*s
, DisasOps
*o
)
5967 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5969 #define SPEC_in2_m2_16s 0
5971 static void in2_m2_16u(DisasContext
*s
, DisasOps
*o
)
5974 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5976 #define SPEC_in2_m2_16u 0
5978 static void in2_m2_32s(DisasContext
*s
, DisasOps
*o
)
5981 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5983 #define SPEC_in2_m2_32s 0
5985 static void in2_m2_32u(DisasContext
*s
, DisasOps
*o
)
5988 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5990 #define SPEC_in2_m2_32u 0
5992 #ifndef CONFIG_USER_ONLY
5993 static void in2_m2_32ua(DisasContext
*s
, DisasOps
*o
)
5996 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5998 #define SPEC_in2_m2_32ua 0
6001 static void in2_m2_64(DisasContext
*s
, DisasOps
*o
)
6004 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6006 #define SPEC_in2_m2_64 0
6008 static void in2_m2_64w(DisasContext
*s
, DisasOps
*o
)
6011 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6012 gen_addi_and_wrap_i64(s
, o
->in2
, o
->in2
, 0);
6014 #define SPEC_in2_m2_64w 0
6016 #ifndef CONFIG_USER_ONLY
6017 static void in2_m2_64a(DisasContext
*s
, DisasOps
*o
)
6020 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
6022 #define SPEC_in2_m2_64a 0
6025 static void in2_mri2_16u(DisasContext
*s
, DisasOps
*o
)
6028 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
6030 #define SPEC_in2_mri2_16u 0
6032 static void in2_mri2_32s(DisasContext
*s
, DisasOps
*o
)
6035 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
6037 #define SPEC_in2_mri2_32s 0
6039 static void in2_mri2_32u(DisasContext
*s
, DisasOps
*o
)
6042 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
6044 #define SPEC_in2_mri2_32u 0
6046 static void in2_mri2_64(DisasContext
*s
, DisasOps
*o
)
6049 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6051 #define SPEC_in2_mri2_64 0
6053 static void in2_i2(DisasContext
*s
, DisasOps
*o
)
6055 o
->in2
= tcg_const_i64(get_field(s
, i2
));
6057 #define SPEC_in2_i2 0
6059 static void in2_i2_8u(DisasContext
*s
, DisasOps
*o
)
6061 o
->in2
= tcg_const_i64((uint8_t)get_field(s
, i2
));
6063 #define SPEC_in2_i2_8u 0
6065 static void in2_i2_16u(DisasContext
*s
, DisasOps
*o
)
6067 o
->in2
= tcg_const_i64((uint16_t)get_field(s
, i2
));
6069 #define SPEC_in2_i2_16u 0
6071 static void in2_i2_32u(DisasContext
*s
, DisasOps
*o
)
6073 o
->in2
= tcg_const_i64((uint32_t)get_field(s
, i2
));
6075 #define SPEC_in2_i2_32u 0
6077 static void in2_i2_16u_shl(DisasContext
*s
, DisasOps
*o
)
6079 uint64_t i2
= (uint16_t)get_field(s
, i2
);
6080 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
6082 #define SPEC_in2_i2_16u_shl 0
6084 static void in2_i2_32u_shl(DisasContext
*s
, DisasOps
*o
)
6086 uint64_t i2
= (uint32_t)get_field(s
, i2
);
6087 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
6089 #define SPEC_in2_i2_32u_shl 0
6091 #ifndef CONFIG_USER_ONLY
6092 static void in2_insn(DisasContext
*s
, DisasOps
*o
)
6094 o
->in2
= tcg_const_i64(s
->fields
.raw_insn
);
6096 #define SPEC_in2_insn 0
6099 /* ====================================================================== */
6101 /* Find opc within the table of insns. This is formulated as a switch
6102 statement so that (1) we get compile-time notice of cut-paste errors
6103 for duplicated opcodes, and (2) the compiler generates the binary
6104 search tree, rather than us having to post-process the table. */
6106 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6107 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6109 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6110 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6112 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6113 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6115 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6117 enum DisasInsnEnum
{
6118 #include "insn-data.def"
6122 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6127 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6129 .help_in1 = in1_##I1, \
6130 .help_in2 = in2_##I2, \
6131 .help_prep = prep_##P, \
6132 .help_wout = wout_##W, \
6133 .help_cout = cout_##CC, \
6134 .help_op = op_##OP, \
6138 /* Allow 0 to be used for NULL in the table below. */
6146 #define SPEC_in1_0 0
6147 #define SPEC_in2_0 0
6148 #define SPEC_prep_0 0
6149 #define SPEC_wout_0 0
6151 /* Give smaller names to the various facilities. */
6152 #define FAC_Z S390_FEAT_ZARCH
6153 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6154 #define FAC_DFP S390_FEAT_DFP
6155 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6156 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6157 #define FAC_EE S390_FEAT_EXECUTE_EXT
6158 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6159 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6160 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6161 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6162 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6163 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6164 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6165 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6166 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6167 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6168 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6169 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6170 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6171 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6172 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6173 #define FAC_SFLE S390_FEAT_STFLE
6174 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6175 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6176 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6177 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6178 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6179 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6180 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6181 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6182 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6183 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6184 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6185 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6186 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6187 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6188 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6189 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6190 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6191 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6192 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6193 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6195 static const DisasInsn insn_info
[] = {
6196 #include "insn-data.def"
6200 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6201 case OPC: return &insn_info[insn_ ## NM];
6203 static const DisasInsn
*lookup_opc(uint16_t opc
)
6206 #include "insn-data.def"
6217 /* Extract a field from the insn. The INSN should be left-aligned in
6218 the uint64_t so that we can more easily utilize the big-bit-endian
6219 definitions we extract from the Principals of Operation. */
6221 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
6229 /* Zero extract the field from the insn. */
6230 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
6232 /* Sign-extend, or un-swap the field as necessary. */
6234 case 0: /* unsigned */
6236 case 1: /* signed */
6237 assert(f
->size
<= 32);
6238 m
= 1u << (f
->size
- 1);
6241 case 2: /* dl+dh split, signed 20 bit. */
6242 r
= ((int8_t)r
<< 12) | (r
>> 8);
6244 case 3: /* MSB stored in RXB */
6245 g_assert(f
->size
== 4);
6248 r
|= extract64(insn
, 63 - 36, 1) << 4;
6251 r
|= extract64(insn
, 63 - 37, 1) << 4;
6254 r
|= extract64(insn
, 63 - 38, 1) << 4;
6257 r
|= extract64(insn
, 63 - 39, 1) << 4;
6260 g_assert_not_reached();
6267 /* Validate that the "compressed" encoding we selected above is valid.
6268 I.e. we havn't make two different original fields overlap. */
6269 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
6270 o
->presentC
|= 1 << f
->indexC
;
6271 o
->presentO
|= 1 << f
->indexO
;
6273 o
->c
[f
->indexC
] = r
;
6276 /* Lookup the insn at the current PC, extracting the operands into O and
6277 returning the info struct for the insn. Returns NULL for invalid insn. */
6279 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
)
6281 uint64_t insn
, pc
= s
->base
.pc_next
;
6283 const DisasInsn
*info
;
6285 if (unlikely(s
->ex_value
)) {
6286 /* Drop the EX data now, so that it's clear on exception paths. */
6287 TCGv_i64 zero
= tcg_const_i64(0);
6288 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
6289 tcg_temp_free_i64(zero
);
6291 /* Extract the values saved by EXECUTE. */
6292 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
6293 ilen
= s
->ex_value
& 0xf;
6296 insn
= ld_code2(env
, pc
);
6297 op
= (insn
>> 8) & 0xff;
6298 ilen
= get_ilen(op
);
6304 insn
= ld_code4(env
, pc
) << 32;
6307 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
6310 g_assert_not_reached();
6313 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
6316 /* We can't actually determine the insn format until we've looked up
6317 the full insn opcode. Which we can't do without locating the
6318 secondary opcode. Assume by default that OP2 is at bit 40; for
6319 those smaller insns that don't actually have a secondary opcode
6320 this will correctly result in OP2 = 0. */
6326 case 0xb2: /* S, RRF, RRE, IE */
6327 case 0xb3: /* RRE, RRD, RRF */
6328 case 0xb9: /* RRE, RRF */
6329 case 0xe5: /* SSE, SIL */
6330 op2
= (insn
<< 8) >> 56;
6334 case 0xc0: /* RIL */
6335 case 0xc2: /* RIL */
6336 case 0xc4: /* RIL */
6337 case 0xc6: /* RIL */
6338 case 0xc8: /* SSF */
6339 case 0xcc: /* RIL */
6340 op2
= (insn
<< 12) >> 60;
6342 case 0xc5: /* MII */
6343 case 0xc7: /* SMI */
6344 case 0xd0 ... 0xdf: /* SS */
6350 case 0xee ... 0xf3: /* SS */
6351 case 0xf8 ... 0xfd: /* SS */
6355 op2
= (insn
<< 40) >> 56;
6359 memset(&s
->fields
, 0, sizeof(s
->fields
));
6360 s
->fields
.raw_insn
= insn
;
6362 s
->fields
.op2
= op2
;
6364 /* Lookup the instruction. */
6365 info
= lookup_opc(op
<< 8 | op2
);
6368 /* If we found it, extract the operands. */
6370 DisasFormat fmt
= info
->fmt
;
6373 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6374 extract_field(&s
->fields
, &format_info
[fmt
].op
[i
], insn
);
6380 static bool is_afp_reg(int reg
)
6382 return reg
% 2 || reg
> 6;
6385 static bool is_fp_pair(int reg
)
6387 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6388 return !(reg
& 0x2);
6391 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6393 const DisasInsn
*insn
;
6394 DisasJumpType ret
= DISAS_NEXT
;
6396 bool icount
= false;
6398 /* Search for the insn in the table. */
6399 insn
= extract_insn(env
, s
);
6401 /* Emit insn_start now that we know the ILEN. */
6402 tcg_gen_insn_start(s
->base
.pc_next
, s
->cc_op
, s
->ilen
);
6404 /* Not found means unimplemented/illegal opcode. */
6406 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6407 s
->fields
.op
, s
->fields
.op2
);
6408 gen_illegal_opcode(s
);
6409 return DISAS_NORETURN
;
6412 #ifndef CONFIG_USER_ONLY
6413 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6414 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6415 gen_helper_per_ifetch(cpu_env
, addr
);
6416 tcg_temp_free_i64(addr
);
6422 /* privileged instruction */
6423 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6424 gen_program_exception(s
, PGM_PRIVILEGED
);
6425 return DISAS_NORETURN
;
6428 /* if AFP is not enabled, instructions and registers are forbidden */
6429 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6432 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(s
, r1
))) {
6435 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(s
, r2
))) {
6438 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(s
, r3
))) {
6441 if (insn
->flags
& IF_BFP
) {
6444 if (insn
->flags
& IF_DFP
) {
6447 if (insn
->flags
& IF_VEC
) {
6451 gen_data_exception(dxc
);
6452 return DISAS_NORETURN
;
6456 /* if vector instructions not enabled, executing them is forbidden */
6457 if (insn
->flags
& IF_VEC
) {
6458 if (!((s
->base
.tb
->flags
& FLAG_MASK_VECTOR
))) {
6459 gen_data_exception(0xfe);
6460 return DISAS_NORETURN
;
6464 /* input/output is the special case for icount mode */
6465 if (unlikely(insn
->flags
& IF_IO
)) {
6466 icount
= tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
;
6473 /* Check for insn specification exceptions. */
6475 if ((insn
->spec
& SPEC_r1_even
&& get_field(s
, r1
) & 1) ||
6476 (insn
->spec
& SPEC_r2_even
&& get_field(s
, r2
) & 1) ||
6477 (insn
->spec
& SPEC_r3_even
&& get_field(s
, r3
) & 1) ||
6478 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(s
, r1
))) ||
6479 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(s
, r2
)))) {
6480 gen_program_exception(s
, PGM_SPECIFICATION
);
6481 return DISAS_NORETURN
;
6485 /* Implement the instruction. */
6486 if (insn
->help_in1
) {
6487 insn
->help_in1(s
, &o
);
6489 if (insn
->help_in2
) {
6490 insn
->help_in2(s
, &o
);
6492 if (insn
->help_prep
) {
6493 insn
->help_prep(s
, &o
);
6495 if (insn
->help_op
) {
6496 ret
= insn
->help_op(s
, &o
);
6498 if (ret
!= DISAS_NORETURN
) {
6499 if (insn
->help_wout
) {
6500 insn
->help_wout(s
, &o
);
6502 if (insn
->help_cout
) {
6503 insn
->help_cout(s
, &o
);
6507 /* Free any temporaries created by the helpers. */
6508 if (o
.out
&& !o
.g_out
) {
6509 tcg_temp_free_i64(o
.out
);
6511 if (o
.out2
&& !o
.g_out2
) {
6512 tcg_temp_free_i64(o
.out2
);
6514 if (o
.in1
&& !o
.g_in1
) {
6515 tcg_temp_free_i64(o
.in1
);
6517 if (o
.in2
&& !o
.g_in2
) {
6518 tcg_temp_free_i64(o
.in2
);
6521 tcg_temp_free_i64(o
.addr1
);
6524 /* io should be the last instruction in tb when icount is enabled */
6525 if (unlikely(icount
&& ret
== DISAS_NEXT
)) {
6526 ret
= DISAS_PC_STALE
;
6529 #ifndef CONFIG_USER_ONLY
6530 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6531 /* An exception might be triggered, save PSW if not already done. */
6532 if (ret
== DISAS_NEXT
|| ret
== DISAS_PC_STALE
) {
6533 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6536 /* Call the helper to check for a possible PER exception. */
6537 gen_helper_per_check_exception(cpu_env
);
6541 /* Advance to the next instruction. */
6542 s
->base
.pc_next
= s
->pc_tmp
;
6546 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6548 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6551 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6552 dc
->base
.pc_first
&= 0x7fffffff;
6553 dc
->base
.pc_next
= dc
->base
.pc_first
;
6556 dc
->cc_op
= CC_OP_DYNAMIC
;
6557 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6558 dc
->do_debug
= dc
->base
.singlestep_enabled
;
6561 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6565 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6569 static bool s390x_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
6570 const CPUBreakpoint
*bp
)
6572 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6575 * Emit an insn_start to accompany the breakpoint exception.
6576 * The ILEN value is a dummy, since this does not result in
6577 * an s390x exception, but an internal qemu exception which
6578 * brings us back to interact with the gdbstub.
6580 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
, 2);
6582 dc
->base
.is_jmp
= DISAS_PC_STALE
;
6583 dc
->do_debug
= true;
6584 /* The address covered by the breakpoint must be included in
6585 [tb->pc, tb->pc + tb->size) in order to for it to be
6586 properly cleared -- thus we increment the PC here so that
6587 the logic setting tb->size does the right thing. */
6588 dc
->base
.pc_next
+= 2;
6592 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6594 CPUS390XState
*env
= cs
->env_ptr
;
6595 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6597 dc
->base
.is_jmp
= translate_one(env
, dc
);
6598 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6599 uint64_t page_start
;
6601 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6602 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6603 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6608 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6610 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6612 switch (dc
->base
.is_jmp
) {
6614 case DISAS_NORETURN
:
6616 case DISAS_TOO_MANY
:
6617 case DISAS_PC_STALE
:
6618 case DISAS_PC_STALE_NOCHAIN
:
6619 update_psw_addr(dc
);
6621 case DISAS_PC_UPDATED
:
6622 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6623 cc op type is in env */
6626 case DISAS_PC_CC_UPDATED
:
6627 /* Exit the TB, either by raising a debug exception or by return. */
6629 gen_exception(EXCP_DEBUG
);
6630 } else if (use_exit_tb(dc
) ||
6631 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6632 tcg_gen_exit_tb(NULL
, 0);
6634 tcg_gen_lookup_and_goto_ptr();
6638 g_assert_not_reached();
6642 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
6644 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6646 if (unlikely(dc
->ex_value
)) {
6647 /* ??? Unfortunately log_target_disas can't use host memory. */
6648 qemu_log("IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6650 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6651 log_target_disas(cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6655 static const TranslatorOps s390x_tr_ops
= {
6656 .init_disas_context
= s390x_tr_init_disas_context
,
6657 .tb_start
= s390x_tr_tb_start
,
6658 .insn_start
= s390x_tr_insn_start
,
6659 .breakpoint_check
= s390x_tr_breakpoint_check
,
6660 .translate_insn
= s390x_tr_translate_insn
,
6661 .tb_stop
= s390x_tr_tb_stop
,
6662 .disas_log
= s390x_tr_disas_log
,
6665 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
6669 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
, max_insns
);
6672 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6675 int cc_op
= data
[1];
6677 env
->psw
.addr
= data
[0];
6679 /* Update the CC opcode if it is not already up-to-date. */
6680 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {
6685 env
->int_pgm_ilen
= data
[2];