4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/translator.h"
49 /* Information that (most) every instruction needs to manipulate. */
50 typedef struct DisasContext DisasContext
;
51 typedef struct DisasInsn DisasInsn
;
52 typedef struct DisasFields DisasFields
;
55 DisasContextBase base
;
56 const DisasInsn
*insn
;
60 * During translate_one(), pc_tmp is used to determine the instruction
61 * to be executed after base.pc_next - e.g. next sequential instruction
70 /* Information carried about a condition to be evaluated. */
77 struct { TCGv_i64 a
, b
; } s64
;
78 struct { TCGv_i32 a
, b
; } s32
;
82 #ifdef DEBUG_INLINE_BRANCHES
83 static uint64_t inline_branch_hit
[CC_OP_MAX
];
84 static uint64_t inline_branch_miss
[CC_OP_MAX
];
87 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 static TCGv_i64 psw_addr
;
98 static TCGv_i64 psw_mask
;
101 static TCGv_i32 cc_op
;
102 static TCGv_i64 cc_src
;
103 static TCGv_i64 cc_dst
;
104 static TCGv_i64 cc_vr
;
106 static char cpu_reg_names
[32][4];
107 static TCGv_i64 regs
[16];
108 static TCGv_i64 fregs
[16];
110 void s390x_translate_init(void)
114 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
115 offsetof(CPUS390XState
, psw
.addr
),
117 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
118 offsetof(CPUS390XState
, psw
.mask
),
120 gbea
= tcg_global_mem_new_i64(cpu_env
,
121 offsetof(CPUS390XState
, gbea
),
124 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
126 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
128 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
130 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
133 for (i
= 0; i
< 16; i
++) {
134 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
135 regs
[i
] = tcg_global_mem_new(cpu_env
,
136 offsetof(CPUS390XState
, regs
[i
]),
140 for (i
= 0; i
< 16; i
++) {
141 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
142 fregs
[i
] = tcg_global_mem_new(cpu_env
,
143 offsetof(CPUS390XState
, vregs
[i
][0].d
),
144 cpu_reg_names
[i
+ 16]);
148 static TCGv_i64
load_reg(int reg
)
150 TCGv_i64 r
= tcg_temp_new_i64();
151 tcg_gen_mov_i64(r
, regs
[reg
]);
155 static TCGv_i64
load_freg32_i64(int reg
)
157 TCGv_i64 r
= tcg_temp_new_i64();
158 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
162 static void store_reg(int reg
, TCGv_i64 v
)
164 tcg_gen_mov_i64(regs
[reg
], v
);
167 static void store_freg(int reg
, TCGv_i64 v
)
169 tcg_gen_mov_i64(fregs
[reg
], v
);
172 static void store_reg32_i64(int reg
, TCGv_i64 v
)
174 /* 32 bit register writes keep the upper half */
175 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
178 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
180 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
183 static void store_freg32_i64(int reg
, TCGv_i64 v
)
185 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
188 static void return_low128(TCGv_i64 dest
)
190 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
193 static void update_psw_addr(DisasContext
*s
)
196 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
199 static void per_branch(DisasContext
*s
, bool to_next
)
201 #ifndef CONFIG_USER_ONLY
202 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
204 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
205 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
206 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
208 tcg_temp_free_i64(next_pc
);
214 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
215 TCGv_i64 arg1
, TCGv_i64 arg2
)
217 #ifndef CONFIG_USER_ONLY
218 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
219 TCGLabel
*lab
= gen_new_label();
220 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
222 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
223 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
227 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
228 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
229 tcg_temp_free_i64(pc
);
234 static void per_breaking_event(DisasContext
*s
)
236 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
239 static void update_cc_op(DisasContext
*s
)
241 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
242 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
246 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
248 return (uint64_t)cpu_lduw_code(env
, pc
);
251 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
253 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
256 static int get_mem_index(DisasContext
*s
)
258 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
262 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
263 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
264 return MMU_PRIMARY_IDX
;
265 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
266 return MMU_SECONDARY_IDX
;
267 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
275 static void gen_exception(int excp
)
277 TCGv_i32 tmp
= tcg_const_i32(excp
);
278 gen_helper_exception(cpu_env
, tmp
);
279 tcg_temp_free_i32(tmp
);
282 static void gen_program_exception(DisasContext
*s
, int code
)
286 /* Remember what pgm exeption this was. */
287 tmp
= tcg_const_i32(code
);
288 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
289 tcg_temp_free_i32(tmp
);
291 tmp
= tcg_const_i32(s
->ilen
);
292 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
293 tcg_temp_free_i32(tmp
);
301 /* Trigger exception. */
302 gen_exception(EXCP_PGM
);
305 static inline void gen_illegal_opcode(DisasContext
*s
)
307 gen_program_exception(s
, PGM_OPERATION
);
310 static inline void gen_trap(DisasContext
*s
)
314 /* Set DXC to 0xff. */
315 t
= tcg_temp_new_i32();
316 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
317 tcg_gen_ori_i32(t
, t
, 0xff00);
318 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
319 tcg_temp_free_i32(t
);
321 gen_program_exception(s
, PGM_DATA
);
324 #ifndef CONFIG_USER_ONLY
325 static void check_privileged(DisasContext
*s
)
327 if (s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) {
328 gen_program_exception(s
, PGM_PRIVILEGED
);
333 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
335 TCGv_i64 tmp
= tcg_temp_new_i64();
336 bool need_31
= !(s
->base
.tb
->flags
& FLAG_MASK_64
);
338 /* Note that d2 is limited to 20 bits, signed. If we crop negative
339 displacements early we create larger immedate addends. */
341 /* Note that addi optimizes the imm==0 case. */
343 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
344 tcg_gen_addi_i64(tmp
, tmp
, d2
);
346 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
348 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
354 tcg_gen_movi_i64(tmp
, d2
);
357 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
363 static inline bool live_cc_data(DisasContext
*s
)
365 return (s
->cc_op
!= CC_OP_DYNAMIC
366 && s
->cc_op
!= CC_OP_STATIC
370 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
372 if (live_cc_data(s
)) {
373 tcg_gen_discard_i64(cc_src
);
374 tcg_gen_discard_i64(cc_dst
);
375 tcg_gen_discard_i64(cc_vr
);
377 s
->cc_op
= CC_OP_CONST0
+ val
;
380 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
382 if (live_cc_data(s
)) {
383 tcg_gen_discard_i64(cc_src
);
384 tcg_gen_discard_i64(cc_vr
);
386 tcg_gen_mov_i64(cc_dst
, dst
);
390 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
393 if (live_cc_data(s
)) {
394 tcg_gen_discard_i64(cc_vr
);
396 tcg_gen_mov_i64(cc_src
, src
);
397 tcg_gen_mov_i64(cc_dst
, dst
);
401 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
402 TCGv_i64 dst
, TCGv_i64 vr
)
404 tcg_gen_mov_i64(cc_src
, src
);
405 tcg_gen_mov_i64(cc_dst
, dst
);
406 tcg_gen_mov_i64(cc_vr
, vr
);
410 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
412 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
415 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
417 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
420 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
422 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
425 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
427 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
430 /* CC value is in env->cc_op */
431 static void set_cc_static(DisasContext
*s
)
433 if (live_cc_data(s
)) {
434 tcg_gen_discard_i64(cc_src
);
435 tcg_gen_discard_i64(cc_dst
);
436 tcg_gen_discard_i64(cc_vr
);
438 s
->cc_op
= CC_OP_STATIC
;
441 /* calculates cc into cc_op */
442 static void gen_op_calc_cc(DisasContext
*s
)
444 TCGv_i32 local_cc_op
= NULL
;
445 TCGv_i64 dummy
= NULL
;
449 dummy
= tcg_const_i64(0);
463 local_cc_op
= tcg_const_i32(s
->cc_op
);
479 /* s->cc_op is the cc value */
480 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
483 /* env->cc_op already is the cc value */
498 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
503 case CC_OP_LTUGTU_32
:
504 case CC_OP_LTUGTU_64
:
511 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
526 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
529 /* unknown operation - assume 3 arguments and cc_op in env */
530 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
537 tcg_temp_free_i32(local_cc_op
);
540 tcg_temp_free_i64(dummy
);
543 /* We now have cc in cc_op as constant */
547 static bool use_exit_tb(DisasContext
*s
)
549 return s
->base
.singlestep_enabled
||
550 (tb_cflags(s
->base
.tb
) & CF_LAST_IO
) ||
551 (s
->base
.tb
->flags
& FLAG_MASK_PER
);
554 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
556 if (unlikely(use_exit_tb(s
))) {
559 #ifndef CONFIG_USER_ONLY
560 return (dest
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
561 (dest
& TARGET_PAGE_MASK
) == (s
->base
.pc_next
& TARGET_PAGE_MASK
);
567 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
569 #ifdef DEBUG_INLINE_BRANCHES
570 inline_branch_miss
[cc_op
]++;
574 static void account_inline_branch(DisasContext
*s
, int cc_op
)
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_hit
[cc_op
]++;
581 /* Table of mask values to comparison codes, given a comparison as input.
582 For such, CC=3 should not be possible. */
583 static const TCGCond ltgt_cond
[16] = {
584 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
585 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
586 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
587 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
588 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
589 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
590 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
591 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
594 /* Table of mask values to comparison codes, given a logic op as input.
595 For such, only CC=0 and CC=1 should be possible. */
596 static const TCGCond nz_cond
[16] = {
597 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
598 TCG_COND_NEVER
, TCG_COND_NEVER
,
599 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
600 TCG_COND_NE
, TCG_COND_NE
,
601 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
602 TCG_COND_EQ
, TCG_COND_EQ
,
603 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
604 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
607 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
608 details required to generate a TCG comparison. */
609 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
612 enum cc_op old_cc_op
= s
->cc_op
;
614 if (mask
== 15 || mask
== 0) {
615 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
618 c
->g1
= c
->g2
= true;
623 /* Find the TCG condition for the mask + cc op. */
629 cond
= ltgt_cond
[mask
];
630 if (cond
== TCG_COND_NEVER
) {
633 account_inline_branch(s
, old_cc_op
);
636 case CC_OP_LTUGTU_32
:
637 case CC_OP_LTUGTU_64
:
638 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
639 if (cond
== TCG_COND_NEVER
) {
642 account_inline_branch(s
, old_cc_op
);
646 cond
= nz_cond
[mask
];
647 if (cond
== TCG_COND_NEVER
) {
650 account_inline_branch(s
, old_cc_op
);
665 account_inline_branch(s
, old_cc_op
);
680 account_inline_branch(s
, old_cc_op
);
684 switch (mask
& 0xa) {
685 case 8: /* src == 0 -> no one bit found */
688 case 2: /* src != 0 -> one bit found */
694 account_inline_branch(s
, old_cc_op
);
700 case 8 | 2: /* vr == 0 */
703 case 4 | 1: /* vr != 0 */
706 case 8 | 4: /* no carry -> vr >= src */
709 case 2 | 1: /* carry -> vr < src */
715 account_inline_branch(s
, old_cc_op
);
720 /* Note that CC=0 is impossible; treat it as dont-care. */
722 case 2: /* zero -> op1 == op2 */
725 case 4 | 1: /* !zero -> op1 != op2 */
728 case 4: /* borrow (!carry) -> op1 < op2 */
731 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
737 account_inline_branch(s
, old_cc_op
);
742 /* Calculate cc value. */
747 /* Jump based on CC. We'll load up the real cond below;
748 the assignment here merely avoids a compiler warning. */
749 account_noninline_branch(s
, old_cc_op
);
750 old_cc_op
= CC_OP_STATIC
;
751 cond
= TCG_COND_NEVER
;
755 /* Load up the arguments of the comparison. */
757 c
->g1
= c
->g2
= false;
761 c
->u
.s32
.a
= tcg_temp_new_i32();
762 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
763 c
->u
.s32
.b
= tcg_const_i32(0);
766 case CC_OP_LTUGTU_32
:
769 c
->u
.s32
.a
= tcg_temp_new_i32();
770 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
771 c
->u
.s32
.b
= tcg_temp_new_i32();
772 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
779 c
->u
.s64
.b
= tcg_const_i64(0);
783 case CC_OP_LTUGTU_64
:
787 c
->g1
= c
->g2
= true;
793 c
->u
.s64
.a
= tcg_temp_new_i64();
794 c
->u
.s64
.b
= tcg_const_i64(0);
795 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
800 c
->u
.s32
.a
= tcg_temp_new_i32();
801 c
->u
.s32
.b
= tcg_temp_new_i32();
802 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
803 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
804 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
806 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
813 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
814 c
->u
.s64
.b
= tcg_const_i64(0);
826 case 0x8 | 0x4 | 0x2: /* cc != 3 */
828 c
->u
.s32
.b
= tcg_const_i32(3);
830 case 0x8 | 0x4 | 0x1: /* cc != 2 */
832 c
->u
.s32
.b
= tcg_const_i32(2);
834 case 0x8 | 0x2 | 0x1: /* cc != 1 */
836 c
->u
.s32
.b
= tcg_const_i32(1);
838 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
841 c
->u
.s32
.a
= tcg_temp_new_i32();
842 c
->u
.s32
.b
= tcg_const_i32(0);
843 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
845 case 0x8 | 0x4: /* cc < 2 */
847 c
->u
.s32
.b
= tcg_const_i32(2);
849 case 0x8: /* cc == 0 */
851 c
->u
.s32
.b
= tcg_const_i32(0);
853 case 0x4 | 0x2 | 0x1: /* cc != 0 */
855 c
->u
.s32
.b
= tcg_const_i32(0);
857 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
860 c
->u
.s32
.a
= tcg_temp_new_i32();
861 c
->u
.s32
.b
= tcg_const_i32(0);
862 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
864 case 0x4: /* cc == 1 */
866 c
->u
.s32
.b
= tcg_const_i32(1);
868 case 0x2 | 0x1: /* cc > 1 */
870 c
->u
.s32
.b
= tcg_const_i32(1);
872 case 0x2: /* cc == 2 */
874 c
->u
.s32
.b
= tcg_const_i32(2);
876 case 0x1: /* cc == 3 */
878 c
->u
.s32
.b
= tcg_const_i32(3);
881 /* CC is masked by something else: (8 >> cc) & mask. */
884 c
->u
.s32
.a
= tcg_const_i32(8);
885 c
->u
.s32
.b
= tcg_const_i32(0);
886 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
887 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
898 static void free_compare(DisasCompare
*c
)
902 tcg_temp_free_i64(c
->u
.s64
.a
);
904 tcg_temp_free_i32(c
->u
.s32
.a
);
909 tcg_temp_free_i64(c
->u
.s64
.b
);
911 tcg_temp_free_i32(c
->u
.s32
.b
);
916 /* ====================================================================== */
917 /* Define the insn format enumeration. */
918 #define F0(N) FMT_##N,
919 #define F1(N, X1) F0(N)
920 #define F2(N, X1, X2) F0(N)
921 #define F3(N, X1, X2, X3) F0(N)
922 #define F4(N, X1, X2, X3, X4) F0(N)
923 #define F5(N, X1, X2, X3, X4, X5) F0(N)
926 #include "insn-format.def"
936 /* Define a structure to hold the decoded fields. We'll store each inside
937 an array indexed by an enum. In order to conserve memory, we'll arrange
938 for fields that do not exist at the same time to overlap, thus the "C"
939 for compact. For checking purposes there is an "O" for original index
940 as well that will be applied to availability bitmaps. */
942 enum DisasFieldIndexO
{
965 enum DisasFieldIndexC
{
1000 unsigned presentC
:16;
1001 unsigned int presentO
;
1005 /* This is the way fields are to be accessed out of DisasFields. */
1006 #define have_field(S, F) have_field1((S), FLD_O_##F)
1007 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1009 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1011 return (f
->presentO
>> c
) & 1;
1014 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1015 enum DisasFieldIndexC c
)
1017 assert(have_field1(f
, o
));
1021 /* Describe the layout of each field in each format. */
1022 typedef struct DisasField
{
1024 unsigned int size
:8;
1025 unsigned int type
:2;
1026 unsigned int indexC
:6;
1027 enum DisasFieldIndexO indexO
:8;
1030 typedef struct DisasFormatInfo
{
1031 DisasField op
[NUM_C_FIELD
];
1034 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1035 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1036 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1037 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1040 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1041 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1042 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1044 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1045 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1046 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1047 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1049 #define F0(N) { { } },
1050 #define F1(N, X1) { { X1 } },
1051 #define F2(N, X1, X2) { { X1, X2 } },
1052 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1053 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1054 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1056 static const DisasFormatInfo format_info
[] = {
1057 #include "insn-format.def"
1075 /* Generally, we'll extract operands into this structures, operate upon
1076 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1077 of routines below for more details. */
1079 bool g_out
, g_out2
, g_in1
, g_in2
;
1080 TCGv_i64 out
, out2
, in1
, in2
;
1084 /* Instructions can place constraints on their operands, raising specification
1085 exceptions if they are violated. To make this easy to automate, each "in1",
1086 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1087 of the following, or 0. To make this easy to document, we'll put the
1088 SPEC_<name> defines next to <name>. */
1090 #define SPEC_r1_even 1
1091 #define SPEC_r2_even 2
1092 #define SPEC_r3_even 4
1093 #define SPEC_r1_f128 8
1094 #define SPEC_r2_f128 16
1096 /* Return values from translate_one, indicating the state of the TB. */
1098 /* We are not using a goto_tb (for whatever reason), but have updated
1099 the PC (for whatever reason), so there's no need to do it again on
1101 #define DISAS_PC_UPDATED DISAS_TARGET_0
1103 /* We have emitted one or more goto_tb. No fixup required. */
1104 #define DISAS_GOTO_TB DISAS_TARGET_1
1106 /* We have updated the PC and CC values. */
1107 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1109 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1110 updated the PC for the next instruction to be executed. */
1111 #define DISAS_PC_STALE DISAS_TARGET_3
1113 /* We are exiting the TB to the main loop. */
1114 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1124 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1125 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1126 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1127 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1128 void (*help_cout
)(DisasContext
*, DisasOps
*);
1129 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1134 /* ====================================================================== */
1135 /* Miscellaneous helpers, used by several operations. */
1137 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1138 DisasOps
*o
, int mask
)
1140 int b2
= get_field(f
, b2
);
1141 int d2
= get_field(f
, d2
);
1144 o
->in2
= tcg_const_i64(d2
& mask
);
1146 o
->in2
= get_address(s
, 0, b2
, d2
);
1147 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1151 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1153 if (dest
== s
->pc_tmp
) {
1154 per_branch(s
, true);
1157 if (use_goto_tb(s
, dest
)) {
1159 per_breaking_event(s
);
1161 tcg_gen_movi_i64(psw_addr
, dest
);
1162 tcg_gen_exit_tb(s
->base
.tb
, 0);
1163 return DISAS_GOTO_TB
;
1165 tcg_gen_movi_i64(psw_addr
, dest
);
1166 per_branch(s
, false);
1167 return DISAS_PC_UPDATED
;
1171 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1172 bool is_imm
, int imm
, TCGv_i64 cdest
)
1175 uint64_t dest
= s
->base
.pc_next
+ 2 * imm
;
1178 /* Take care of the special cases first. */
1179 if (c
->cond
== TCG_COND_NEVER
) {
1184 if (dest
== s
->pc_tmp
) {
1185 /* Branch to next. */
1186 per_branch(s
, true);
1190 if (c
->cond
== TCG_COND_ALWAYS
) {
1191 ret
= help_goto_direct(s
, dest
);
1196 /* E.g. bcr %r0 -> no branch. */
1200 if (c
->cond
== TCG_COND_ALWAYS
) {
1201 tcg_gen_mov_i64(psw_addr
, cdest
);
1202 per_branch(s
, false);
1203 ret
= DISAS_PC_UPDATED
;
1208 if (use_goto_tb(s
, s
->pc_tmp
)) {
1209 if (is_imm
&& use_goto_tb(s
, dest
)) {
1210 /* Both exits can use goto_tb. */
1213 lab
= gen_new_label();
1215 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1217 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1220 /* Branch not taken. */
1222 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1223 tcg_gen_exit_tb(s
->base
.tb
, 0);
1227 per_breaking_event(s
);
1229 tcg_gen_movi_i64(psw_addr
, dest
);
1230 tcg_gen_exit_tb(s
->base
.tb
, 1);
1232 ret
= DISAS_GOTO_TB
;
1234 /* Fallthru can use goto_tb, but taken branch cannot. */
1235 /* Store taken branch destination before the brcond. This
1236 avoids having to allocate a new local temp to hold it.
1237 We'll overwrite this in the not taken case anyway. */
1239 tcg_gen_mov_i64(psw_addr
, cdest
);
1242 lab
= gen_new_label();
1244 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1246 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1249 /* Branch not taken. */
1252 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1253 tcg_gen_exit_tb(s
->base
.tb
, 0);
1257 tcg_gen_movi_i64(psw_addr
, dest
);
1259 per_breaking_event(s
);
1260 ret
= DISAS_PC_UPDATED
;
1263 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1264 Most commonly we're single-stepping or some other condition that
1265 disables all use of goto_tb. Just update the PC and exit. */
1267 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1269 cdest
= tcg_const_i64(dest
);
1273 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1275 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1277 TCGv_i32 t0
= tcg_temp_new_i32();
1278 TCGv_i64 t1
= tcg_temp_new_i64();
1279 TCGv_i64 z
= tcg_const_i64(0);
1280 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1281 tcg_gen_extu_i32_i64(t1
, t0
);
1282 tcg_temp_free_i32(t0
);
1283 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1284 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1285 tcg_temp_free_i64(t1
);
1286 tcg_temp_free_i64(z
);
1290 tcg_temp_free_i64(cdest
);
1292 tcg_temp_free_i64(next
);
1294 ret
= DISAS_PC_UPDATED
;
1302 /* ====================================================================== */
1303 /* The operations. These perform the bulk of the work for any insn,
1304 usually after the operands have been loaded and output initialized. */
1306 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1309 z
= tcg_const_i64(0);
1310 n
= tcg_temp_new_i64();
1311 tcg_gen_neg_i64(n
, o
->in2
);
1312 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1313 tcg_temp_free_i64(n
);
1314 tcg_temp_free_i64(z
);
1318 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1320 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1324 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1326 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1330 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1332 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1333 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1337 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1339 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1343 static DisasJumpType
op_addc(DisasContext
*s
, DisasOps
*o
)
1348 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1350 /* The carry flag is the msb of CC, therefore the branch mask that would
1351 create that comparison is 3. Feeding the generated comparison to
1352 setcond produces the carry flag that we desire. */
1353 disas_jcc(s
, &cmp
, 3);
1354 carry
= tcg_temp_new_i64();
1356 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1358 TCGv_i32 t
= tcg_temp_new_i32();
1359 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1360 tcg_gen_extu_i32_i64(carry
, t
);
1361 tcg_temp_free_i32(t
);
1365 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1366 tcg_temp_free_i64(carry
);
1370 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1372 o
->in1
= tcg_temp_new_i64();
1374 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1375 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1377 /* Perform the atomic addition in memory. */
1378 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1382 /* Recompute also for atomic case: needed for setting CC. */
1383 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1385 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1386 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1391 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1393 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1397 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1399 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1403 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1405 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1406 return_low128(o
->out2
);
1410 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1412 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1416 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1418 int shift
= s
->insn
->data
& 0xff;
1419 int size
= s
->insn
->data
>> 8;
1420 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1423 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1424 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1425 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1427 /* Produce the CC from only the bits manipulated. */
1428 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1429 set_cc_nz_u64(s
, cc_dst
);
1433 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1435 o
->in1
= tcg_temp_new_i64();
1437 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1438 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1440 /* Perform the atomic operation in memory. */
1441 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1445 /* Recompute also for atomic case: needed for setting CC. */
1446 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1448 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1449 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1454 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1456 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->pc_tmp
));
1458 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1459 per_branch(s
, false);
1460 return DISAS_PC_UPDATED
;
1466 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1468 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->pc_tmp
));
1469 return help_goto_direct(s
, s
->base
.pc_next
+ 2 * get_field(s
->fields
, i2
));
1472 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1474 int m1
= get_field(s
->fields
, m1
);
1475 bool is_imm
= have_field(s
->fields
, i2
);
1476 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1479 /* BCR with R2 = 0 causes no branching */
1480 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1482 /* Perform serialization */
1483 /* FIXME: check for fast-BCR-serialization facility */
1484 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1487 /* Perform serialization */
1488 /* FIXME: perform checkpoint-synchronisation */
1489 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1494 disas_jcc(s
, &c
, m1
);
1495 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1498 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1500 int r1
= get_field(s
->fields
, r1
);
1501 bool is_imm
= have_field(s
->fields
, i2
);
1502 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1506 c
.cond
= TCG_COND_NE
;
1511 t
= tcg_temp_new_i64();
1512 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1513 store_reg32_i64(r1
, t
);
1514 c
.u
.s32
.a
= tcg_temp_new_i32();
1515 c
.u
.s32
.b
= tcg_const_i32(0);
1516 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1517 tcg_temp_free_i64(t
);
1519 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1522 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1524 int r1
= get_field(s
->fields
, r1
);
1525 int imm
= get_field(s
->fields
, i2
);
1529 c
.cond
= TCG_COND_NE
;
1534 t
= tcg_temp_new_i64();
1535 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1536 tcg_gen_subi_i64(t
, t
, 1);
1537 store_reg32h_i64(r1
, t
);
1538 c
.u
.s32
.a
= tcg_temp_new_i32();
1539 c
.u
.s32
.b
= tcg_const_i32(0);
1540 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1541 tcg_temp_free_i64(t
);
1543 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1546 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1548 int r1
= get_field(s
->fields
, r1
);
1549 bool is_imm
= have_field(s
->fields
, i2
);
1550 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1553 c
.cond
= TCG_COND_NE
;
1558 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1559 c
.u
.s64
.a
= regs
[r1
];
1560 c
.u
.s64
.b
= tcg_const_i64(0);
1562 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1565 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1567 int r1
= get_field(s
->fields
, r1
);
1568 int r3
= get_field(s
->fields
, r3
);
1569 bool is_imm
= have_field(s
->fields
, i2
);
1570 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1574 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1579 t
= tcg_temp_new_i64();
1580 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1581 c
.u
.s32
.a
= tcg_temp_new_i32();
1582 c
.u
.s32
.b
= tcg_temp_new_i32();
1583 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1584 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1585 store_reg32_i64(r1
, t
);
1586 tcg_temp_free_i64(t
);
1588 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1591 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1593 int r1
= get_field(s
->fields
, r1
);
1594 int r3
= get_field(s
->fields
, r3
);
1595 bool is_imm
= have_field(s
->fields
, i2
);
1596 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1599 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1602 if (r1
== (r3
| 1)) {
1603 c
.u
.s64
.b
= load_reg(r3
| 1);
1606 c
.u
.s64
.b
= regs
[r3
| 1];
1610 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1611 c
.u
.s64
.a
= regs
[r1
];
1614 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1617 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1619 int imm
, m3
= get_field(s
->fields
, m3
);
1623 c
.cond
= ltgt_cond
[m3
];
1624 if (s
->insn
->data
) {
1625 c
.cond
= tcg_unsigned_cond(c
.cond
);
1627 c
.is_64
= c
.g1
= c
.g2
= true;
1631 is_imm
= have_field(s
->fields
, i4
);
1633 imm
= get_field(s
->fields
, i4
);
1636 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1637 get_field(s
->fields
, d4
));
1640 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1643 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1645 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1650 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1652 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1657 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1659 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1664 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1666 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1667 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1668 tcg_temp_free_i32(m3
);
1669 gen_set_cc_nz_f32(s
, o
->in2
);
1673 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1675 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1676 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1677 tcg_temp_free_i32(m3
);
1678 gen_set_cc_nz_f64(s
, o
->in2
);
1682 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1684 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1685 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1686 tcg_temp_free_i32(m3
);
1687 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1691 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1693 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1694 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1695 tcg_temp_free_i32(m3
);
1696 gen_set_cc_nz_f32(s
, o
->in2
);
1700 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1702 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1703 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1704 tcg_temp_free_i32(m3
);
1705 gen_set_cc_nz_f64(s
, o
->in2
);
1709 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1711 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1712 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1713 tcg_temp_free_i32(m3
);
1714 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1718 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1720 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1721 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1722 tcg_temp_free_i32(m3
);
1723 gen_set_cc_nz_f32(s
, o
->in2
);
1727 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1729 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1730 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1731 tcg_temp_free_i32(m3
);
1732 gen_set_cc_nz_f64(s
, o
->in2
);
1736 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1738 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1739 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1740 tcg_temp_free_i32(m3
);
1741 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1745 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1747 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1748 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1749 tcg_temp_free_i32(m3
);
1750 gen_set_cc_nz_f32(s
, o
->in2
);
1754 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1756 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1757 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1758 tcg_temp_free_i32(m3
);
1759 gen_set_cc_nz_f64(s
, o
->in2
);
1763 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1765 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1766 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1767 tcg_temp_free_i32(m3
);
1768 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1772 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1774 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1775 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1776 tcg_temp_free_i32(m3
);
1780 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1782 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1783 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1784 tcg_temp_free_i32(m3
);
1788 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1790 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1791 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1792 tcg_temp_free_i32(m3
);
1793 return_low128(o
->out2
);
1797 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
1799 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1800 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1801 tcg_temp_free_i32(m3
);
1805 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1807 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1808 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1809 tcg_temp_free_i32(m3
);
1813 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1815 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1816 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1817 tcg_temp_free_i32(m3
);
1818 return_low128(o
->out2
);
1822 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
1824 int r2
= get_field(s
->fields
, r2
);
1825 TCGv_i64 len
= tcg_temp_new_i64();
1827 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1829 return_low128(o
->out
);
1831 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1832 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1833 tcg_temp_free_i64(len
);
1838 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
1840 int l
= get_field(s
->fields
, l1
);
1845 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1846 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1849 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1850 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1853 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1854 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1857 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1858 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1861 vl
= tcg_const_i32(l
);
1862 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1863 tcg_temp_free_i32(vl
);
1867 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1871 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
1873 int r1
= get_field(s
->fields
, r1
);
1874 int r2
= get_field(s
->fields
, r2
);
1877 /* r1 and r2 must be even. */
1878 if (r1
& 1 || r2
& 1) {
1879 gen_program_exception(s
, PGM_SPECIFICATION
);
1880 return DISAS_NORETURN
;
1883 t1
= tcg_const_i32(r1
);
1884 t2
= tcg_const_i32(r2
);
1885 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1886 tcg_temp_free_i32(t1
);
1887 tcg_temp_free_i32(t2
);
1892 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
1894 int r1
= get_field(s
->fields
, r1
);
1895 int r3
= get_field(s
->fields
, r3
);
1898 /* r1 and r3 must be even. */
1899 if (r1
& 1 || r3
& 1) {
1900 gen_program_exception(s
, PGM_SPECIFICATION
);
1901 return DISAS_NORETURN
;
1904 t1
= tcg_const_i32(r1
);
1905 t3
= tcg_const_i32(r3
);
1906 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1907 tcg_temp_free_i32(t1
);
1908 tcg_temp_free_i32(t3
);
1913 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
1915 int r1
= get_field(s
->fields
, r1
);
1916 int r3
= get_field(s
->fields
, r3
);
1919 /* r1 and r3 must be even. */
1920 if (r1
& 1 || r3
& 1) {
1921 gen_program_exception(s
, PGM_SPECIFICATION
);
1922 return DISAS_NORETURN
;
1925 t1
= tcg_const_i32(r1
);
1926 t3
= tcg_const_i32(r3
);
1927 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1928 tcg_temp_free_i32(t1
);
1929 tcg_temp_free_i32(t3
);
1934 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
1936 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1937 TCGv_i32 t1
= tcg_temp_new_i32();
1938 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1939 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1941 tcg_temp_free_i32(t1
);
1942 tcg_temp_free_i32(m3
);
1946 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
1948 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1950 return_low128(o
->in2
);
1954 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
1956 TCGv_i64 t
= tcg_temp_new_i64();
1957 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1958 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1959 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1960 tcg_temp_free_i64(t
);
1964 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
1966 int d2
= get_field(s
->fields
, d2
);
1967 int b2
= get_field(s
->fields
, b2
);
1970 /* Note that in1 = R3 (new value) and
1971 in2 = (zero-extended) R1 (expected value). */
1973 addr
= get_address(s
, 0, b2
, d2
);
1974 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1975 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1976 tcg_temp_free_i64(addr
);
1978 /* Are the memory and expected values (un)equal? Note that this setcond
1979 produces the output CC value, thus the NE sense of the test. */
1980 cc
= tcg_temp_new_i64();
1981 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1982 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1983 tcg_temp_free_i64(cc
);
1989 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1991 int r1
= get_field(s
->fields
, r1
);
1992 int r3
= get_field(s
->fields
, r3
);
1993 int d2
= get_field(s
->fields
, d2
);
1994 int b2
= get_field(s
->fields
, b2
);
1996 TCGv_i32 t_r1
, t_r3
;
1998 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1999 addr
= get_address(s
, 0, b2
, d2
);
2000 t_r1
= tcg_const_i32(r1
);
2001 t_r3
= tcg_const_i32(r3
);
2002 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2003 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2005 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2007 tcg_temp_free_i64(addr
);
2008 tcg_temp_free_i32(t_r1
);
2009 tcg_temp_free_i32(t_r3
);
2015 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2017 int r3
= get_field(s
->fields
, r3
);
2018 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2020 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2021 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
2023 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
2025 tcg_temp_free_i32(t_r3
);
2031 #ifndef CONFIG_USER_ONLY
2032 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2034 TCGMemOp mop
= s
->insn
->data
;
2035 TCGv_i64 addr
, old
, cc
;
2036 TCGLabel
*lab
= gen_new_label();
2038 /* Note that in1 = R1 (zero-extended expected value),
2039 out = R1 (original reg), out2 = R1+1 (new value). */
2041 check_privileged(s
);
2042 addr
= tcg_temp_new_i64();
2043 old
= tcg_temp_new_i64();
2044 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2045 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2046 get_mem_index(s
), mop
| MO_ALIGN
);
2047 tcg_temp_free_i64(addr
);
2049 /* Are the memory and expected values (un)equal? */
2050 cc
= tcg_temp_new_i64();
2051 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2052 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2054 /* Write back the output now, so that it happens before the
2055 following branch, so that we don't need local temps. */
2056 if ((mop
& MO_SIZE
) == MO_32
) {
2057 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2059 tcg_gen_mov_i64(o
->out
, old
);
2061 tcg_temp_free_i64(old
);
2063 /* If the comparison was equal, and the LSB of R2 was set,
2064 then we need to flush the TLB (for all cpus). */
2065 tcg_gen_xori_i64(cc
, cc
, 1);
2066 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2067 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2068 tcg_temp_free_i64(cc
);
2070 gen_helper_purge(cpu_env
);
2077 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2079 TCGv_i64 t1
= tcg_temp_new_i64();
2080 TCGv_i32 t2
= tcg_temp_new_i32();
2081 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2082 gen_helper_cvd(t1
, t2
);
2083 tcg_temp_free_i32(t2
);
2084 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2085 tcg_temp_free_i64(t1
);
2089 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2091 int m3
= get_field(s
->fields
, m3
);
2092 TCGLabel
*lab
= gen_new_label();
2095 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2096 if (s
->insn
->data
) {
2097 c
= tcg_unsigned_cond(c
);
2099 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2108 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2110 int m3
= get_field(s
->fields
, m3
);
2111 int r1
= get_field(s
->fields
, r1
);
2112 int r2
= get_field(s
->fields
, r2
);
2113 TCGv_i32 tr1
, tr2
, chk
;
2115 /* R1 and R2 must both be even. */
2116 if ((r1
| r2
) & 1) {
2117 gen_program_exception(s
, PGM_SPECIFICATION
);
2118 return DISAS_NORETURN
;
2120 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2124 tr1
= tcg_const_i32(r1
);
2125 tr2
= tcg_const_i32(r2
);
2126 chk
= tcg_const_i32(m3
);
2128 switch (s
->insn
->data
) {
2130 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2133 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2136 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2139 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2142 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2145 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2148 g_assert_not_reached();
2151 tcg_temp_free_i32(tr1
);
2152 tcg_temp_free_i32(tr2
);
2153 tcg_temp_free_i32(chk
);
2158 #ifndef CONFIG_USER_ONLY
2159 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2161 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2162 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2163 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2165 check_privileged(s
);
2166 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2168 tcg_temp_free_i32(func_code
);
2169 tcg_temp_free_i32(r3
);
2170 tcg_temp_free_i32(r1
);
2175 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2177 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2178 return_low128(o
->out
);
2182 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2184 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2185 return_low128(o
->out
);
2189 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2191 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2192 return_low128(o
->out
);
2196 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2198 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2199 return_low128(o
->out
);
2203 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2205 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2209 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2211 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2215 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2217 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2218 return_low128(o
->out2
);
2222 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2224 int r2
= get_field(s
->fields
, r2
);
2225 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2229 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2231 /* No cache information provided. */
2232 tcg_gen_movi_i64(o
->out
, -1);
2236 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2238 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2242 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2244 int r1
= get_field(s
->fields
, r1
);
2245 int r2
= get_field(s
->fields
, r2
);
2246 TCGv_i64 t
= tcg_temp_new_i64();
2248 /* Note the "subsequently" in the PoO, which implies a defined result
2249 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2250 tcg_gen_shri_i64(t
, psw_mask
, 32);
2251 store_reg32_i64(r1
, t
);
2253 store_reg32_i64(r2
, psw_mask
);
2256 tcg_temp_free_i64(t
);
2260 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2262 int r1
= get_field(s
->fields
, r1
);
2266 /* Nested EXECUTE is not allowed. */
2267 if (unlikely(s
->ex_value
)) {
2268 gen_program_exception(s
, PGM_EXECUTE
);
2269 return DISAS_NORETURN
;
2276 v1
= tcg_const_i64(0);
2281 ilen
= tcg_const_i32(s
->ilen
);
2282 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2283 tcg_temp_free_i32(ilen
);
2286 tcg_temp_free_i64(v1
);
2289 return DISAS_PC_CC_UPDATED
;
2292 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2294 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2295 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2296 tcg_temp_free_i32(m3
);
2300 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2302 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2303 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2304 tcg_temp_free_i32(m3
);
2308 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2310 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2311 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2312 return_low128(o
->out2
);
2313 tcg_temp_free_i32(m3
);
2317 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2319 /* We'll use the original input for cc computation, since we get to
2320 compare that against 0, which ought to be better than comparing
2321 the real output against 64. It also lets cc_dst be a convenient
2322 temporary during our computation. */
2323 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2325 /* R1 = IN ? CLZ(IN) : 64. */
2326 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2328 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2329 value by 64, which is undefined. But since the shift is 64 iff the
2330 input is zero, we still get the correct result after and'ing. */
2331 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2332 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2333 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2337 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2339 int m3
= get_field(s
->fields
, m3
);
2340 int pos
, len
, base
= s
->insn
->data
;
2341 TCGv_i64 tmp
= tcg_temp_new_i64();
2346 /* Effectively a 32-bit load. */
2347 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2354 /* Effectively a 16-bit load. */
2355 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2363 /* Effectively an 8-bit load. */
2364 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2369 pos
= base
+ ctz32(m3
) * 8;
2370 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2371 ccm
= ((1ull << len
) - 1) << pos
;
2375 /* This is going to be a sequence of loads and inserts. */
2376 pos
= base
+ 32 - 8;
2380 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2381 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2382 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2385 m3
= (m3
<< 1) & 0xf;
2391 tcg_gen_movi_i64(tmp
, ccm
);
2392 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2393 tcg_temp_free_i64(tmp
);
2397 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2399 int shift
= s
->insn
->data
& 0xff;
2400 int size
= s
->insn
->data
>> 8;
2401 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2405 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2410 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2412 t1
= tcg_temp_new_i64();
2413 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2414 tcg_gen_shri_i64(t1
, t1
, 36);
2415 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2417 tcg_gen_extu_i32_i64(t1
, cc_op
);
2418 tcg_gen_shli_i64(t1
, t1
, 28);
2419 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2420 tcg_temp_free_i64(t1
);
2424 #ifndef CONFIG_USER_ONLY
2425 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2429 check_privileged(s
);
2430 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2431 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2433 m4
= tcg_const_i32(0);
2435 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2436 tcg_temp_free_i32(m4
);
2440 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2444 check_privileged(s
);
2445 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2446 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2448 m4
= tcg_const_i32(0);
2450 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2451 tcg_temp_free_i32(m4
);
2455 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2457 check_privileged(s
);
2458 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2463 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2465 int r1
= have_field(s
->fields
, r1
) ? get_field(s
->fields
, r1
) : 0;
2466 int r2
= have_field(s
->fields
, r2
) ? get_field(s
->fields
, r2
) : 0;
2467 int r3
= have_field(s
->fields
, r3
) ? get_field(s
->fields
, r3
) : 0;
2468 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2470 switch (s
->insn
->data
) {
2471 case S390_FEAT_TYPE_KMCTR
:
2472 if (r3
& 1 || !r3
) {
2473 gen_program_exception(s
, PGM_SPECIFICATION
);
2474 return DISAS_NORETURN
;
2477 case S390_FEAT_TYPE_PPNO
:
2478 case S390_FEAT_TYPE_KMF
:
2479 case S390_FEAT_TYPE_KMC
:
2480 case S390_FEAT_TYPE_KMO
:
2481 case S390_FEAT_TYPE_KM
:
2482 if (r1
& 1 || !r1
) {
2483 gen_program_exception(s
, PGM_SPECIFICATION
);
2484 return DISAS_NORETURN
;
2487 case S390_FEAT_TYPE_KMAC
:
2488 case S390_FEAT_TYPE_KIMD
:
2489 case S390_FEAT_TYPE_KLMD
:
2490 if (r2
& 1 || !r2
) {
2491 gen_program_exception(s
, PGM_SPECIFICATION
);
2492 return DISAS_NORETURN
;
2495 case S390_FEAT_TYPE_PCKMO
:
2496 case S390_FEAT_TYPE_PCC
:
2499 g_assert_not_reached();
2502 t_r1
= tcg_const_i32(r1
);
2503 t_r2
= tcg_const_i32(r2
);
2504 t_r3
= tcg_const_i32(r3
);
2505 type
= tcg_const_i32(s
->insn
->data
);
2506 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2508 tcg_temp_free_i32(t_r1
);
2509 tcg_temp_free_i32(t_r2
);
2510 tcg_temp_free_i32(t_r3
);
2511 tcg_temp_free_i32(type
);
2515 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2517 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2522 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2524 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2529 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2531 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2536 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2538 /* The real output is indeed the original value in memory;
2539 recompute the addition for the computation of CC. */
2540 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2541 s
->insn
->data
| MO_ALIGN
);
2542 /* However, we need to recompute the addition for setting CC. */
2543 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2547 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2549 /* The real output is indeed the original value in memory;
2550 recompute the addition for the computation of CC. */
2551 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2552 s
->insn
->data
| MO_ALIGN
);
2553 /* However, we need to recompute the operation for setting CC. */
2554 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2558 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2560 /* The real output is indeed the original value in memory;
2561 recompute the addition for the computation of CC. */
2562 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2563 s
->insn
->data
| MO_ALIGN
);
2564 /* However, we need to recompute the operation for setting CC. */
2565 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2569 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2571 /* The real output is indeed the original value in memory;
2572 recompute the addition for the computation of CC. */
2573 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2574 s
->insn
->data
| MO_ALIGN
);
2575 /* However, we need to recompute the operation for setting CC. */
2576 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2580 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2582 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2586 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2588 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2592 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2594 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2598 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2600 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2604 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2606 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2607 return_low128(o
->out2
);
2611 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2613 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2614 return_low128(o
->out2
);
2618 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2620 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2624 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2626 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2630 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2632 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2636 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2638 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2642 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2644 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2648 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2650 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2654 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2656 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2660 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2662 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2666 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2668 TCGLabel
*lab
= gen_new_label();
2669 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2670 /* The value is stored even in case of trap. */
2671 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2677 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2679 TCGLabel
*lab
= gen_new_label();
2680 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2681 /* The value is stored even in case of trap. */
2682 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2688 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2690 TCGLabel
*lab
= gen_new_label();
2691 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2692 /* The value is stored even in case of trap. */
2693 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2699 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2701 TCGLabel
*lab
= gen_new_label();
2702 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2703 /* The value is stored even in case of trap. */
2704 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2710 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2712 TCGLabel
*lab
= gen_new_label();
2713 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2714 /* The value is stored even in case of trap. */
2715 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2721 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
2725 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2728 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2732 TCGv_i32 t32
= tcg_temp_new_i32();
2735 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2738 t
= tcg_temp_new_i64();
2739 tcg_gen_extu_i32_i64(t
, t32
);
2740 tcg_temp_free_i32(t32
);
2742 z
= tcg_const_i64(0);
2743 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2744 tcg_temp_free_i64(t
);
2745 tcg_temp_free_i64(z
);
2751 #ifndef CONFIG_USER_ONLY
2752 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
2754 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2755 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2756 check_privileged(s
);
2757 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2758 tcg_temp_free_i32(r1
);
2759 tcg_temp_free_i32(r3
);
2760 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2761 return DISAS_PC_STALE_NOCHAIN
;
2764 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2766 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2767 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2768 check_privileged(s
);
2769 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2770 tcg_temp_free_i32(r1
);
2771 tcg_temp_free_i32(r3
);
2772 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2773 return DISAS_PC_STALE_NOCHAIN
;
2776 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
2778 check_privileged(s
);
2779 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2784 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
2786 check_privileged(s
);
2788 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2792 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2796 check_privileged(s
);
2797 per_breaking_event(s
);
2799 t1
= tcg_temp_new_i64();
2800 t2
= tcg_temp_new_i64();
2801 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2802 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2803 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2804 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2805 tcg_gen_shli_i64(t1
, t1
, 32);
2806 gen_helper_load_psw(cpu_env
, t1
, t2
);
2807 tcg_temp_free_i64(t1
);
2808 tcg_temp_free_i64(t2
);
2809 return DISAS_NORETURN
;
2812 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2816 check_privileged(s
);
2817 per_breaking_event(s
);
2819 t1
= tcg_temp_new_i64();
2820 t2
= tcg_temp_new_i64();
2821 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2822 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2823 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2824 gen_helper_load_psw(cpu_env
, t1
, t2
);
2825 tcg_temp_free_i64(t1
);
2826 tcg_temp_free_i64(t2
);
2827 return DISAS_NORETURN
;
2831 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
2833 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2834 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2835 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2836 tcg_temp_free_i32(r1
);
2837 tcg_temp_free_i32(r3
);
2841 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
2843 int r1
= get_field(s
->fields
, r1
);
2844 int r3
= get_field(s
->fields
, r3
);
2847 /* Only one register to read. */
2848 t1
= tcg_temp_new_i64();
2849 if (unlikely(r1
== r3
)) {
2850 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2851 store_reg32_i64(r1
, t1
);
2856 /* First load the values of the first and last registers to trigger
2857 possible page faults. */
2858 t2
= tcg_temp_new_i64();
2859 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2860 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2861 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2862 store_reg32_i64(r1
, t1
);
2863 store_reg32_i64(r3
, t2
);
2865 /* Only two registers to read. */
2866 if (((r1
+ 1) & 15) == r3
) {
2872 /* Then load the remaining registers. Page fault can't occur. */
2874 tcg_gen_movi_i64(t2
, 4);
2877 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2878 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2879 store_reg32_i64(r1
, t1
);
2887 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
2889 int r1
= get_field(s
->fields
, r1
);
2890 int r3
= get_field(s
->fields
, r3
);
2893 /* Only one register to read. */
2894 t1
= tcg_temp_new_i64();
2895 if (unlikely(r1
== r3
)) {
2896 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2897 store_reg32h_i64(r1
, t1
);
2902 /* First load the values of the first and last registers to trigger
2903 possible page faults. */
2904 t2
= tcg_temp_new_i64();
2905 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2906 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2907 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2908 store_reg32h_i64(r1
, t1
);
2909 store_reg32h_i64(r3
, t2
);
2911 /* Only two registers to read. */
2912 if (((r1
+ 1) & 15) == r3
) {
2918 /* Then load the remaining registers. Page fault can't occur. */
2920 tcg_gen_movi_i64(t2
, 4);
2923 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2924 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2925 store_reg32h_i64(r1
, t1
);
2933 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
2935 int r1
= get_field(s
->fields
, r1
);
2936 int r3
= get_field(s
->fields
, r3
);
2939 /* Only one register to read. */
2940 if (unlikely(r1
== r3
)) {
2941 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2945 /* First load the values of the first and last registers to trigger
2946 possible page faults. */
2947 t1
= tcg_temp_new_i64();
2948 t2
= tcg_temp_new_i64();
2949 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2950 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2951 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2952 tcg_gen_mov_i64(regs
[r1
], t1
);
2955 /* Only two registers to read. */
2956 if (((r1
+ 1) & 15) == r3
) {
2961 /* Then load the remaining registers. Page fault can't occur. */
2963 tcg_gen_movi_i64(t1
, 8);
2966 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2967 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2974 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
2977 TCGMemOp mop
= s
->insn
->data
;
2979 /* In a parallel context, stop the world and single step. */
2980 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2983 gen_exception(EXCP_ATOMIC
);
2984 return DISAS_NORETURN
;
2987 /* In a serial context, perform the two loads ... */
2988 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2989 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2990 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2991 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2992 tcg_temp_free_i64(a1
);
2993 tcg_temp_free_i64(a2
);
2995 /* ... and indicate that we performed them while interlocked. */
2996 gen_op_movi_cc(s
, 0);
3000 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3002 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3003 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3005 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3007 return_low128(o
->out2
);
3011 #ifndef CONFIG_USER_ONLY
3012 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3014 check_privileged(s
);
3015 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
3019 static DisasJumpType
op_lurag(DisasContext
*s
, DisasOps
*o
)
3021 check_privileged(s
);
3022 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
3027 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3029 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3033 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3036 o
->g_out
= o
->g_in2
;
3042 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3044 int b2
= get_field(s
->fields
, b2
);
3045 TCGv ar1
= tcg_temp_new_i64();
3048 o
->g_out
= o
->g_in2
;
3052 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3053 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3054 tcg_gen_movi_i64(ar1
, 0);
3056 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3057 tcg_gen_movi_i64(ar1
, 1);
3059 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3061 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3063 tcg_gen_movi_i64(ar1
, 0);
3066 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3067 tcg_gen_movi_i64(ar1
, 2);
3071 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3072 tcg_temp_free_i64(ar1
);
3077 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3081 o
->g_out
= o
->g_in1
;
3082 o
->g_out2
= o
->g_in2
;
3085 o
->g_in1
= o
->g_in2
= false;
3089 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3091 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3092 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3093 tcg_temp_free_i32(l
);
3097 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3099 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3100 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3101 tcg_temp_free_i32(l
);
3105 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3107 int r1
= get_field(s
->fields
, r1
);
3108 int r2
= get_field(s
->fields
, r2
);
3111 /* r1 and r2 must be even. */
3112 if (r1
& 1 || r2
& 1) {
3113 gen_program_exception(s
, PGM_SPECIFICATION
);
3114 return DISAS_NORETURN
;
3117 t1
= tcg_const_i32(r1
);
3118 t2
= tcg_const_i32(r2
);
3119 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3120 tcg_temp_free_i32(t1
);
3121 tcg_temp_free_i32(t2
);
3126 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3128 int r1
= get_field(s
->fields
, r1
);
3129 int r3
= get_field(s
->fields
, r3
);
3132 /* r1 and r3 must be even. */
3133 if (r1
& 1 || r3
& 1) {
3134 gen_program_exception(s
, PGM_SPECIFICATION
);
3135 return DISAS_NORETURN
;
3138 t1
= tcg_const_i32(r1
);
3139 t3
= tcg_const_i32(r3
);
3140 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3141 tcg_temp_free_i32(t1
);
3142 tcg_temp_free_i32(t3
);
3147 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3149 int r1
= get_field(s
->fields
, r1
);
3150 int r3
= get_field(s
->fields
, r3
);
3153 /* r1 and r3 must be even. */
3154 if (r1
& 1 || r3
& 1) {
3155 gen_program_exception(s
, PGM_SPECIFICATION
);
3156 return DISAS_NORETURN
;
3159 t1
= tcg_const_i32(r1
);
3160 t3
= tcg_const_i32(r3
);
3161 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3162 tcg_temp_free_i32(t1
);
3163 tcg_temp_free_i32(t3
);
3168 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3170 int r3
= get_field(s
->fields
, r3
);
3171 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3176 #ifndef CONFIG_USER_ONLY
3177 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3179 int r1
= get_field(s
->fields
, l1
);
3180 check_privileged(s
);
3181 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3186 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3188 int r1
= get_field(s
->fields
, l1
);
3189 check_privileged(s
);
3190 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3196 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3198 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3199 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3200 tcg_temp_free_i32(l
);
3204 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3206 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3207 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3208 tcg_temp_free_i32(l
);
3212 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3214 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3219 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3221 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3223 return_low128(o
->in2
);
3227 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3229 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3230 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3231 tcg_temp_free_i32(l
);
3235 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3237 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3241 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3243 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3247 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3249 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3253 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3255 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3259 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3261 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3265 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3267 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3268 return_low128(o
->out2
);
3272 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3274 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3275 return_low128(o
->out2
);
3279 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3281 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3282 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3283 tcg_temp_free_i64(r3
);
3287 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3289 int r3
= get_field(s
->fields
, r3
);
3290 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3294 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3296 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3297 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3298 tcg_temp_free_i64(r3
);
3302 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3304 int r3
= get_field(s
->fields
, r3
);
3305 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3309 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3312 z
= tcg_const_i64(0);
3313 n
= tcg_temp_new_i64();
3314 tcg_gen_neg_i64(n
, o
->in2
);
3315 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3316 tcg_temp_free_i64(n
);
3317 tcg_temp_free_i64(z
);
3321 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3323 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3327 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3329 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3333 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3335 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3336 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3340 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3342 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3343 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3344 tcg_temp_free_i32(l
);
3349 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3351 tcg_gen_neg_i64(o
->out
, o
->in2
);
3355 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3357 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3361 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3363 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3367 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3369 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3370 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3374 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3376 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3377 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3378 tcg_temp_free_i32(l
);
3383 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3385 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3389 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3391 int shift
= s
->insn
->data
& 0xff;
3392 int size
= s
->insn
->data
>> 8;
3393 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3396 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3397 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3399 /* Produce the CC from only the bits manipulated. */
3400 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3401 set_cc_nz_u64(s
, cc_dst
);
3405 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3407 o
->in1
= tcg_temp_new_i64();
3409 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3410 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3412 /* Perform the atomic operation in memory. */
3413 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3417 /* Recompute also for atomic case: needed for setting CC. */
3418 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3420 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3421 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3426 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3428 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3429 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3430 tcg_temp_free_i32(l
);
3434 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3436 int l2
= get_field(s
->fields
, l2
) + 1;
3439 /* The length must not exceed 32 bytes. */
3441 gen_program_exception(s
, PGM_SPECIFICATION
);
3442 return DISAS_NORETURN
;
3444 l
= tcg_const_i32(l2
);
3445 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3446 tcg_temp_free_i32(l
);
3450 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3452 int l2
= get_field(s
->fields
, l2
) + 1;
3455 /* The length must be even and should not exceed 64 bytes. */
3456 if ((l2
& 1) || (l2
> 64)) {
3457 gen_program_exception(s
, PGM_SPECIFICATION
);
3458 return DISAS_NORETURN
;
3460 l
= tcg_const_i32(l2
);
3461 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3462 tcg_temp_free_i32(l
);
3466 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3468 gen_helper_popcnt(o
->out
, o
->in2
);
3472 #ifndef CONFIG_USER_ONLY
3473 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3475 check_privileged(s
);
3476 gen_helper_ptlb(cpu_env
);
3481 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3483 int i3
= get_field(s
->fields
, i3
);
3484 int i4
= get_field(s
->fields
, i4
);
3485 int i5
= get_field(s
->fields
, i5
);
3486 int do_zero
= i4
& 0x80;
3487 uint64_t mask
, imask
, pmask
;
3490 /* Adjust the arguments for the specific insn. */
3491 switch (s
->fields
->op2
) {
3492 case 0x55: /* risbg */
3493 case 0x59: /* risbgn */
3498 case 0x5d: /* risbhg */
3501 pmask
= 0xffffffff00000000ull
;
3503 case 0x51: /* risblg */
3506 pmask
= 0x00000000ffffffffull
;
3509 g_assert_not_reached();
3512 /* MASK is the set of bits to be inserted from R2.
3513 Take care for I3/I4 wraparound. */
3516 mask
^= pmask
>> i4
>> 1;
3518 mask
|= ~(pmask
>> i4
>> 1);
3522 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3523 insns, we need to keep the other half of the register. */
3524 imask
= ~mask
| ~pmask
;
3532 if (s
->fields
->op2
== 0x5d) {
3536 /* In some cases we can implement this with extract. */
3537 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3538 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3542 /* In some cases we can implement this with deposit. */
3543 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3544 /* Note that we rotate the bits to be inserted to the lsb, not to
3545 the position as described in the PoO. */
3546 rot
= (rot
- pos
) & 63;
3551 /* Rotate the input as necessary. */
3552 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3554 /* Insert the selected bits into the output. */
3557 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3559 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3561 } else if (imask
== 0) {
3562 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3564 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3565 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3566 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3571 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3573 int i3
= get_field(s
->fields
, i3
);
3574 int i4
= get_field(s
->fields
, i4
);
3575 int i5
= get_field(s
->fields
, i5
);
3578 /* If this is a test-only form, arrange to discard the result. */
3580 o
->out
= tcg_temp_new_i64();
3588 /* MASK is the set of bits to be operated on from R2.
3589 Take care for I3/I4 wraparound. */
3592 mask
^= ~0ull >> i4
>> 1;
3594 mask
|= ~(~0ull >> i4
>> 1);
3597 /* Rotate the input as necessary. */
3598 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3601 switch (s
->fields
->op2
) {
3602 case 0x55: /* AND */
3603 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3604 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3607 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3608 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3610 case 0x57: /* XOR */
3611 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3612 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3619 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3620 set_cc_nz_u64(s
, cc_dst
);
3624 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3626 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3630 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3632 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3636 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3638 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3642 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3644 TCGv_i32 t1
= tcg_temp_new_i32();
3645 TCGv_i32 t2
= tcg_temp_new_i32();
3646 TCGv_i32 to
= tcg_temp_new_i32();
3647 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3648 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3649 tcg_gen_rotl_i32(to
, t1
, t2
);
3650 tcg_gen_extu_i32_i64(o
->out
, to
);
3651 tcg_temp_free_i32(t1
);
3652 tcg_temp_free_i32(t2
);
3653 tcg_temp_free_i32(to
);
3657 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3659 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3663 #ifndef CONFIG_USER_ONLY
3664 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3666 check_privileged(s
);
3667 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3672 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3674 check_privileged(s
);
3675 gen_helper_sacf(cpu_env
, o
->in2
);
3676 /* Addressing mode has changed, so end the block. */
3677 return DISAS_PC_STALE
;
3681 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3683 int sam
= s
->insn
->data
;
3699 /* Bizarre but true, we check the address of the current insn for the
3700 specification exception, not the next to be executed. Thus the PoO
3701 documents that Bad Things Happen two bytes before the end. */
3702 if (s
->base
.pc_next
& ~mask
) {
3703 gen_program_exception(s
, PGM_SPECIFICATION
);
3704 return DISAS_NORETURN
;
3708 tsam
= tcg_const_i64(sam
);
3709 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3710 tcg_temp_free_i64(tsam
);
3712 /* Always exit the TB, since we (may have) changed execution mode. */
3713 return DISAS_PC_STALE
;
3716 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
3718 int r1
= get_field(s
->fields
, r1
);
3719 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3723 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
3725 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3729 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
3731 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3735 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
3737 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3738 return_low128(o
->out2
);
3742 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3744 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3748 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3750 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3754 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3756 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3757 return_low128(o
->out2
);
3761 #ifndef CONFIG_USER_ONLY
3762 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
3764 check_privileged(s
);
3765 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3770 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
3772 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3773 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3774 check_privileged(s
);
3775 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
3777 tcg_temp_free_i32(r1
);
3778 tcg_temp_free_i32(r3
);
3783 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
3790 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3792 /* We want to store when the condition is fulfilled, so branch
3793 out when it's not */
3794 c
.cond
= tcg_invert_cond(c
.cond
);
3796 lab
= gen_new_label();
3798 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3800 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3804 r1
= get_field(s
->fields
, r1
);
3805 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3806 switch (s
->insn
->data
) {
3808 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3811 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3813 case 2: /* STOCFH */
3814 h
= tcg_temp_new_i64();
3815 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3816 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3817 tcg_temp_free_i64(h
);
3820 g_assert_not_reached();
3822 tcg_temp_free_i64(a
);
3828 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
3830 uint64_t sign
= 1ull << s
->insn
->data
;
3831 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3832 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3833 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3834 /* The arithmetic left shift is curious in that it does not affect
3835 the sign bit. Copy that over from the source unchanged. */
3836 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3837 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3838 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3842 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
3844 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3848 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
3850 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3854 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
3856 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3860 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3862 gen_helper_sfpc(cpu_env
, o
->in2
);
3866 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
3868 gen_helper_sfas(cpu_env
, o
->in2
);
3872 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
3874 int b2
= get_field(s
->fields
, b2
);
3875 int d2
= get_field(s
->fields
, d2
);
3876 TCGv_i64 t1
= tcg_temp_new_i64();
3877 TCGv_i64 t2
= tcg_temp_new_i64();
3880 switch (s
->fields
->op2
) {
3881 case 0x99: /* SRNM */
3884 case 0xb8: /* SRNMB */
3887 case 0xb9: /* SRNMT */
3893 mask
= (1 << len
) - 1;
3895 /* Insert the value into the appropriate field of the FPC. */
3897 tcg_gen_movi_i64(t1
, d2
& mask
);
3899 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3900 tcg_gen_andi_i64(t1
, t1
, mask
);
3902 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3903 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3904 tcg_temp_free_i64(t1
);
3906 /* Then install the new FPC to set the rounding mode in fpu_status. */
3907 gen_helper_sfpc(cpu_env
, t2
);
3908 tcg_temp_free_i64(t2
);
3912 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
3914 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
3915 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
3918 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
3919 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
3923 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
3925 int b1
= get_field(s
->fields
, b1
);
3926 int d1
= get_field(s
->fields
, d1
);
3927 int b2
= get_field(s
->fields
, b2
);
3928 int d2
= get_field(s
->fields
, d2
);
3929 int r3
= get_field(s
->fields
, r3
);
3930 TCGv_i64 tmp
= tcg_temp_new_i64();
3932 /* fetch all operands first */
3933 o
->in1
= tcg_temp_new_i64();
3934 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
3935 o
->in2
= tcg_temp_new_i64();
3936 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
3937 o
->addr1
= get_address(s
, 0, r3
, 0);
3939 /* load the third operand into r3 before modifying anything */
3940 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
3942 /* subtract CPU timer from first operand and store in GR0 */
3943 gen_helper_stpt(tmp
, cpu_env
);
3944 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
3946 /* store second operand in GR1 */
3947 tcg_gen_mov_i64(regs
[1], o
->in2
);
3949 tcg_temp_free_i64(tmp
);
3953 #ifndef CONFIG_USER_ONLY
3954 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
3956 check_privileged(s
);
3957 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3958 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3962 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
3964 check_privileged(s
);
3965 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3969 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
3971 check_privileged(s
);
3972 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3973 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3974 return DISAS_PC_STALE_NOCHAIN
;
3977 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
3979 check_privileged(s
);
3980 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
3984 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
3986 gen_helper_stck(o
->out
, cpu_env
);
3987 /* ??? We don't implement clock states. */
3988 gen_op_movi_cc(s
, 0);
3992 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
3994 TCGv_i64 c1
= tcg_temp_new_i64();
3995 TCGv_i64 c2
= tcg_temp_new_i64();
3996 TCGv_i64 todpr
= tcg_temp_new_i64();
3997 gen_helper_stck(c1
, cpu_env
);
3998 /* 16 bit value store in an uint32_t (only valid bits set) */
3999 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4000 /* Shift the 64-bit value into its place as a zero-extended
4001 104-bit value. Note that "bit positions 64-103 are always
4002 non-zero so that they compare differently to STCK"; we set
4003 the least significant bit to 1. */
4004 tcg_gen_shli_i64(c2
, c1
, 56);
4005 tcg_gen_shri_i64(c1
, c1
, 8);
4006 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4007 tcg_gen_or_i64(c2
, c2
, todpr
);
4008 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4009 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4010 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4011 tcg_temp_free_i64(c1
);
4012 tcg_temp_free_i64(c2
);
4013 tcg_temp_free_i64(todpr
);
4014 /* ??? We don't implement clock states. */
4015 gen_op_movi_cc(s
, 0);
4019 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4021 check_privileged(s
);
4022 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4023 gen_helper_sck(cc_op
, cpu_env
, o
->in1
);
4028 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4030 check_privileged(s
);
4031 gen_helper_sckc(cpu_env
, o
->in2
);
4035 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4037 check_privileged(s
);
4038 gen_helper_sckpf(cpu_env
, regs
[0]);
4042 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4044 check_privileged(s
);
4045 gen_helper_stckc(o
->out
, cpu_env
);
4049 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4051 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4052 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4053 check_privileged(s
);
4054 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4055 tcg_temp_free_i32(r1
);
4056 tcg_temp_free_i32(r3
);
4060 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4062 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4063 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4064 check_privileged(s
);
4065 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4066 tcg_temp_free_i32(r1
);
4067 tcg_temp_free_i32(r3
);
4071 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4073 check_privileged(s
);
4074 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4078 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4080 check_privileged(s
);
4081 gen_helper_spt(cpu_env
, o
->in2
);
4085 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4087 check_privileged(s
);
4088 gen_helper_stfl(cpu_env
);
4092 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4094 check_privileged(s
);
4095 gen_helper_stpt(o
->out
, cpu_env
);
4099 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4101 check_privileged(s
);
4102 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4107 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4109 check_privileged(s
);
4110 gen_helper_spx(cpu_env
, o
->in2
);
4114 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4116 check_privileged(s
);
4117 gen_helper_xsch(cpu_env
, regs
[1]);
4122 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4124 check_privileged(s
);
4125 gen_helper_csch(cpu_env
, regs
[1]);
4130 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4132 check_privileged(s
);
4133 gen_helper_hsch(cpu_env
, regs
[1]);
4138 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4140 check_privileged(s
);
4141 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4146 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4148 check_privileged(s
);
4149 gen_helper_rchp(cpu_env
, regs
[1]);
4154 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4156 check_privileged(s
);
4157 gen_helper_rsch(cpu_env
, regs
[1]);
4162 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4164 check_privileged(s
);
4165 gen_helper_sal(cpu_env
, regs
[1]);
4169 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4171 check_privileged(s
);
4172 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4176 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4178 check_privileged(s
);
4179 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4180 gen_op_movi_cc(s
, 3);
4184 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4186 check_privileged(s
);
4187 /* The instruction is suppressed if not provided. */
4191 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4193 check_privileged(s
);
4194 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4199 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4201 check_privileged(s
);
4202 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4207 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4209 check_privileged(s
);
4210 gen_helper_stcrw(cpu_env
, o
->in2
);
4215 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4217 check_privileged(s
);
4218 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4223 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4225 check_privileged(s
);
4226 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4231 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4233 check_privileged(s
);
4234 gen_helper_chsc(cpu_env
, o
->in2
);
4239 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4241 check_privileged(s
);
4242 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4243 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4247 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4249 uint64_t i2
= get_field(s
->fields
, i2
);
4252 check_privileged(s
);
4254 /* It is important to do what the instruction name says: STORE THEN.
4255 If we let the output hook perform the store then if we fault and
4256 restart, we'll have the wrong SYSTEM MASK in place. */
4257 t
= tcg_temp_new_i64();
4258 tcg_gen_shri_i64(t
, psw_mask
, 56);
4259 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4260 tcg_temp_free_i64(t
);
4262 if (s
->fields
->op
== 0xac) {
4263 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4264 (i2
<< 56) | 0x00ffffffffffffffull
);
4266 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4269 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4270 return DISAS_PC_STALE_NOCHAIN
;
4273 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4275 check_privileged(s
);
4276 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4280 static DisasJumpType
op_sturg(DisasContext
*s
, DisasOps
*o
)
4282 check_privileged(s
);
4283 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4288 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4290 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4295 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4297 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4301 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4303 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4307 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4309 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4313 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4315 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4319 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4321 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4322 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4323 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4324 tcg_temp_free_i32(r1
);
4325 tcg_temp_free_i32(r3
);
4329 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4331 int m3
= get_field(s
->fields
, m3
);
4332 int pos
, base
= s
->insn
->data
;
4333 TCGv_i64 tmp
= tcg_temp_new_i64();
4335 pos
= base
+ ctz32(m3
) * 8;
4338 /* Effectively a 32-bit store. */
4339 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4340 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4346 /* Effectively a 16-bit store. */
4347 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4348 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4355 /* Effectively an 8-bit store. */
4356 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4357 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4361 /* This is going to be a sequence of shifts and stores. */
4362 pos
= base
+ 32 - 8;
4365 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4366 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4367 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4369 m3
= (m3
<< 1) & 0xf;
4374 tcg_temp_free_i64(tmp
);
4378 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4380 int r1
= get_field(s
->fields
, r1
);
4381 int r3
= get_field(s
->fields
, r3
);
4382 int size
= s
->insn
->data
;
4383 TCGv_i64 tsize
= tcg_const_i64(size
);
4387 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4389 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4394 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4398 tcg_temp_free_i64(tsize
);
4402 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4404 int r1
= get_field(s
->fields
, r1
);
4405 int r3
= get_field(s
->fields
, r3
);
4406 TCGv_i64 t
= tcg_temp_new_i64();
4407 TCGv_i64 t4
= tcg_const_i64(4);
4408 TCGv_i64 t32
= tcg_const_i64(32);
4411 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4412 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4416 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4420 tcg_temp_free_i64(t
);
4421 tcg_temp_free_i64(t4
);
4422 tcg_temp_free_i64(t32
);
4426 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4428 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
4429 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4431 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4436 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4438 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4439 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4441 gen_helper_srst(cpu_env
, r1
, r2
);
4443 tcg_temp_free_i32(r1
);
4444 tcg_temp_free_i32(r2
);
4449 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4451 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4452 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4454 gen_helper_srstu(cpu_env
, r1
, r2
);
4456 tcg_temp_free_i32(r1
);
4457 tcg_temp_free_i32(r2
);
4462 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4464 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4468 static DisasJumpType
op_subb(DisasContext
*s
, DisasOps
*o
)
4473 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4475 /* The !borrow flag is the msb of CC. Since we want the inverse of
4476 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4477 disas_jcc(s
, &cmp
, 8 | 4);
4478 borrow
= tcg_temp_new_i64();
4480 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4482 TCGv_i32 t
= tcg_temp_new_i32();
4483 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4484 tcg_gen_extu_i32_i64(borrow
, t
);
4485 tcg_temp_free_i32(t
);
4489 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4490 tcg_temp_free_i64(borrow
);
4494 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4501 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4502 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4503 tcg_temp_free_i32(t
);
4505 t
= tcg_const_i32(s
->ilen
);
4506 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4507 tcg_temp_free_i32(t
);
4509 gen_exception(EXCP_SVC
);
4510 return DISAS_NORETURN
;
4513 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4517 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4518 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4519 gen_op_movi_cc(s
, cc
);
4523 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4525 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4530 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4532 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4537 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4539 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4544 #ifndef CONFIG_USER_ONLY
4546 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4548 check_privileged(s
);
4549 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4554 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4556 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4563 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4565 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4566 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4567 tcg_temp_free_i32(l1
);
4572 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4574 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4575 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4576 tcg_temp_free_i32(l
);
4581 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4583 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4584 return_low128(o
->out2
);
4589 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4591 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4592 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4593 tcg_temp_free_i32(l
);
4598 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4600 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4601 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4602 tcg_temp_free_i32(l
);
4607 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4609 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4610 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4611 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4612 TCGv_i32 tst
= tcg_temp_new_i32();
4613 int m3
= get_field(s
->fields
, m3
);
4615 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4619 tcg_gen_movi_i32(tst
, -1);
4621 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4622 if (s
->insn
->opc
& 3) {
4623 tcg_gen_ext8u_i32(tst
, tst
);
4625 tcg_gen_ext16u_i32(tst
, tst
);
4628 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4630 tcg_temp_free_i32(r1
);
4631 tcg_temp_free_i32(r2
);
4632 tcg_temp_free_i32(sizes
);
4633 tcg_temp_free_i32(tst
);
4638 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4640 TCGv_i32 t1
= tcg_const_i32(0xff);
4641 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4642 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4643 tcg_temp_free_i32(t1
);
4648 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4650 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4651 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4652 tcg_temp_free_i32(l
);
4656 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4658 int l1
= get_field(s
->fields
, l1
) + 1;
4661 /* The length must not exceed 32 bytes. */
4663 gen_program_exception(s
, PGM_SPECIFICATION
);
4664 return DISAS_NORETURN
;
4666 l
= tcg_const_i32(l1
);
4667 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4668 tcg_temp_free_i32(l
);
4673 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4675 int l1
= get_field(s
->fields
, l1
) + 1;
4678 /* The length must be even and should not exceed 64 bytes. */
4679 if ((l1
& 1) || (l1
> 64)) {
4680 gen_program_exception(s
, PGM_SPECIFICATION
);
4681 return DISAS_NORETURN
;
4683 l
= tcg_const_i32(l1
);
4684 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4685 tcg_temp_free_i32(l
);
4691 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4693 int d1
= get_field(s
->fields
, d1
);
4694 int d2
= get_field(s
->fields
, d2
);
4695 int b1
= get_field(s
->fields
, b1
);
4696 int b2
= get_field(s
->fields
, b2
);
4697 int l
= get_field(s
->fields
, l1
);
4700 o
->addr1
= get_address(s
, 0, b1
, d1
);
4702 /* If the addresses are identical, this is a store/memset of zero. */
4703 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4704 o
->in2
= tcg_const_i64(0);
4708 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4711 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4715 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4718 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4722 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4725 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4729 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4731 gen_op_movi_cc(s
, 0);
4735 /* But in general we'll defer to a helper. */
4736 o
->in2
= get_address(s
, 0, b2
, d2
);
4737 t32
= tcg_const_i32(l
);
4738 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4739 tcg_temp_free_i32(t32
);
4744 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
4746 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4750 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
4752 int shift
= s
->insn
->data
& 0xff;
4753 int size
= s
->insn
->data
>> 8;
4754 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4757 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4758 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4760 /* Produce the CC from only the bits manipulated. */
4761 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4762 set_cc_nz_u64(s
, cc_dst
);
4766 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
4768 o
->in1
= tcg_temp_new_i64();
4770 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4771 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4773 /* Perform the atomic operation in memory. */
4774 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
4778 /* Recompute also for atomic case: needed for setting CC. */
4779 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4781 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4782 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4787 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
4789 o
->out
= tcg_const_i64(0);
4793 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
4795 o
->out
= tcg_const_i64(0);
4801 #ifndef CONFIG_USER_ONLY
4802 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
4804 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4806 check_privileged(s
);
4807 gen_helper_clp(cpu_env
, r2
);
4808 tcg_temp_free_i32(r2
);
4813 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
4815 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4816 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4818 check_privileged(s
);
4819 gen_helper_pcilg(cpu_env
, r1
, r2
);
4820 tcg_temp_free_i32(r1
);
4821 tcg_temp_free_i32(r2
);
4826 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
4828 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4829 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4831 check_privileged(s
);
4832 gen_helper_pcistg(cpu_env
, r1
, r2
);
4833 tcg_temp_free_i32(r1
);
4834 tcg_temp_free_i32(r2
);
4839 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
4841 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4842 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4844 check_privileged(s
);
4845 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4846 tcg_temp_free_i32(ar
);
4847 tcg_temp_free_i32(r1
);
4852 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
4854 check_privileged(s
);
4855 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
4859 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
4861 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4862 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4864 check_privileged(s
);
4865 gen_helper_rpcit(cpu_env
, r1
, r2
);
4866 tcg_temp_free_i32(r1
);
4867 tcg_temp_free_i32(r2
);
4872 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
4874 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4875 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4876 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4878 check_privileged(s
);
4879 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
4880 tcg_temp_free_i32(ar
);
4881 tcg_temp_free_i32(r1
);
4882 tcg_temp_free_i32(r3
);
4887 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
4889 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4890 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4892 check_privileged(s
);
4893 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4894 tcg_temp_free_i32(ar
);
4895 tcg_temp_free_i32(r1
);
4901 /* ====================================================================== */
4902 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4903 the original inputs), update the various cc data structures in order to
4904 be able to compute the new condition code. */
4906 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4908 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4911 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4913 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4916 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4918 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4921 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4923 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4926 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4928 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4931 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4933 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4936 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4938 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4941 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4943 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4946 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4948 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4951 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4953 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4956 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4958 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4961 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4963 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4966 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4968 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4971 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4973 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4976 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4978 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4981 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4983 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4986 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4988 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4991 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4993 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4996 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4998 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
5001 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
5003 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5004 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
5007 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5009 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5012 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5014 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5017 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5019 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5022 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5024 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5027 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5029 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5032 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5034 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
5037 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5039 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
5042 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
5044 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
5047 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
5049 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
5052 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5054 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5057 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5059 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5062 /* ====================================================================== */
5063 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5064 with the TCG register to which we will write. Used in combination with
5065 the "wout" generators, in some cases we need a new temporary, and in
5066 some cases we can write to a TCG global. */
5068 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5070 o
->out
= tcg_temp_new_i64();
5072 #define SPEC_prep_new 0
5074 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5076 o
->out
= tcg_temp_new_i64();
5077 o
->out2
= tcg_temp_new_i64();
5079 #define SPEC_prep_new_P 0
5081 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5083 o
->out
= regs
[get_field(f
, r1
)];
5086 #define SPEC_prep_r1 0
5088 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5090 int r1
= get_field(f
, r1
);
5092 o
->out2
= regs
[r1
+ 1];
5093 o
->g_out
= o
->g_out2
= true;
5095 #define SPEC_prep_r1_P SPEC_r1_even
5097 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5099 o
->out
= fregs
[get_field(f
, r1
)];
5102 #define SPEC_prep_f1 0
5104 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5106 int r1
= get_field(f
, r1
);
5108 o
->out2
= fregs
[r1
+ 2];
5109 o
->g_out
= o
->g_out2
= true;
5111 #define SPEC_prep_x1 SPEC_r1_f128
5113 /* ====================================================================== */
5114 /* The "Write OUTput" generators. These generally perform some non-trivial
5115 copy of data to TCG globals, or to main memory. The trivial cases are
5116 generally handled by having a "prep" generator install the TCG global
5117 as the destination of the operation. */
5119 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5121 store_reg(get_field(f
, r1
), o
->out
);
5123 #define SPEC_wout_r1 0
5125 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5127 int r1
= get_field(f
, r1
);
5128 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5130 #define SPEC_wout_r1_8 0
5132 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5134 int r1
= get_field(f
, r1
);
5135 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5137 #define SPEC_wout_r1_16 0
5139 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5141 store_reg32_i64(get_field(f
, r1
), o
->out
);
5143 #define SPEC_wout_r1_32 0
5145 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5147 store_reg32h_i64(get_field(f
, r1
), o
->out
);
5149 #define SPEC_wout_r1_32h 0
5151 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5153 int r1
= get_field(f
, r1
);
5154 store_reg32_i64(r1
, o
->out
);
5155 store_reg32_i64(r1
+ 1, o
->out2
);
5157 #define SPEC_wout_r1_P32 SPEC_r1_even
5159 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5161 int r1
= get_field(f
, r1
);
5162 store_reg32_i64(r1
+ 1, o
->out
);
5163 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5164 store_reg32_i64(r1
, o
->out
);
5166 #define SPEC_wout_r1_D32 SPEC_r1_even
5168 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5170 int r3
= get_field(f
, r3
);
5171 store_reg32_i64(r3
, o
->out
);
5172 store_reg32_i64(r3
+ 1, o
->out2
);
5174 #define SPEC_wout_r3_P32 SPEC_r3_even
5176 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5178 int r3
= get_field(f
, r3
);
5179 store_reg(r3
, o
->out
);
5180 store_reg(r3
+ 1, o
->out2
);
5182 #define SPEC_wout_r3_P64 SPEC_r3_even
5184 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5186 store_freg32_i64(get_field(f
, r1
), o
->out
);
5188 #define SPEC_wout_e1 0
5190 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5192 store_freg(get_field(f
, r1
), o
->out
);
5194 #define SPEC_wout_f1 0
5196 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5198 int f1
= get_field(s
->fields
, r1
);
5199 store_freg(f1
, o
->out
);
5200 store_freg(f1
+ 2, o
->out2
);
5202 #define SPEC_wout_x1 SPEC_r1_f128
5204 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5206 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5207 store_reg32_i64(get_field(f
, r1
), o
->out
);
5210 #define SPEC_wout_cond_r1r2_32 0
5212 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5214 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5215 store_freg32_i64(get_field(f
, r1
), o
->out
);
5218 #define SPEC_wout_cond_e1e2 0
5220 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5222 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5224 #define SPEC_wout_m1_8 0
5226 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5228 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5230 #define SPEC_wout_m1_16 0
5232 #ifndef CONFIG_USER_ONLY
5233 static void wout_m1_16a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5235 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5237 #define SPEC_wout_m1_16a 0
5240 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5242 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5244 #define SPEC_wout_m1_32 0
5246 #ifndef CONFIG_USER_ONLY
5247 static void wout_m1_32a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5249 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5251 #define SPEC_wout_m1_32a 0
5254 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5256 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5258 #define SPEC_wout_m1_64 0
5260 #ifndef CONFIG_USER_ONLY
5261 static void wout_m1_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5263 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5265 #define SPEC_wout_m1_64a 0
5268 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5270 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5272 #define SPEC_wout_m2_32 0
5274 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5276 store_reg(get_field(f
, r1
), o
->in2
);
5278 #define SPEC_wout_in2_r1 0
5280 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5282 store_reg32_i64(get_field(f
, r1
), o
->in2
);
5284 #define SPEC_wout_in2_r1_32 0
5286 /* ====================================================================== */
5287 /* The "INput 1" generators. These load the first operand to an insn. */
5289 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5291 o
->in1
= load_reg(get_field(f
, r1
));
5293 #define SPEC_in1_r1 0
5295 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5297 o
->in1
= regs
[get_field(f
, r1
)];
5300 #define SPEC_in1_r1_o 0
5302 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5304 o
->in1
= tcg_temp_new_i64();
5305 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5307 #define SPEC_in1_r1_32s 0
5309 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5311 o
->in1
= tcg_temp_new_i64();
5312 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5314 #define SPEC_in1_r1_32u 0
5316 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5318 o
->in1
= tcg_temp_new_i64();
5319 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
5321 #define SPEC_in1_r1_sr32 0
5323 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5325 o
->in1
= load_reg(get_field(f
, r1
) + 1);
5327 #define SPEC_in1_r1p1 SPEC_r1_even
5329 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5331 o
->in1
= tcg_temp_new_i64();
5332 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5334 #define SPEC_in1_r1p1_32s SPEC_r1_even
5336 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5338 o
->in1
= tcg_temp_new_i64();
5339 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5341 #define SPEC_in1_r1p1_32u SPEC_r1_even
5343 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5345 int r1
= get_field(f
, r1
);
5346 o
->in1
= tcg_temp_new_i64();
5347 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5349 #define SPEC_in1_r1_D32 SPEC_r1_even
5351 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5353 o
->in1
= load_reg(get_field(f
, r2
));
5355 #define SPEC_in1_r2 0
5357 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5359 o
->in1
= tcg_temp_new_i64();
5360 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5362 #define SPEC_in1_r2_sr32 0
5364 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5366 o
->in1
= load_reg(get_field(f
, r3
));
5368 #define SPEC_in1_r3 0
5370 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5372 o
->in1
= regs
[get_field(f
, r3
)];
5375 #define SPEC_in1_r3_o 0
5377 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5379 o
->in1
= tcg_temp_new_i64();
5380 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5382 #define SPEC_in1_r3_32s 0
5384 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5386 o
->in1
= tcg_temp_new_i64();
5387 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5389 #define SPEC_in1_r3_32u 0
5391 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5393 int r3
= get_field(f
, r3
);
5394 o
->in1
= tcg_temp_new_i64();
5395 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5397 #define SPEC_in1_r3_D32 SPEC_r3_even
5399 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5401 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5403 #define SPEC_in1_e1 0
5405 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5407 o
->in1
= fregs
[get_field(f
, r1
)];
5410 #define SPEC_in1_f1_o 0
5412 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5414 int r1
= get_field(f
, r1
);
5416 o
->out2
= fregs
[r1
+ 2];
5417 o
->g_out
= o
->g_out2
= true;
5419 #define SPEC_in1_x1_o SPEC_r1_f128
5421 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5423 o
->in1
= fregs
[get_field(f
, r3
)];
5426 #define SPEC_in1_f3_o 0
5428 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5430 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5432 #define SPEC_in1_la1 0
5434 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5436 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5437 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5439 #define SPEC_in1_la2 0
5441 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5444 o
->in1
= tcg_temp_new_i64();
5445 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5447 #define SPEC_in1_m1_8u 0
5449 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5452 o
->in1
= tcg_temp_new_i64();
5453 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5455 #define SPEC_in1_m1_16s 0
5457 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5460 o
->in1
= tcg_temp_new_i64();
5461 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5463 #define SPEC_in1_m1_16u 0
5465 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5468 o
->in1
= tcg_temp_new_i64();
5469 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5471 #define SPEC_in1_m1_32s 0
5473 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5476 o
->in1
= tcg_temp_new_i64();
5477 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5479 #define SPEC_in1_m1_32u 0
5481 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5484 o
->in1
= tcg_temp_new_i64();
5485 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5487 #define SPEC_in1_m1_64 0
5489 /* ====================================================================== */
5490 /* The "INput 2" generators. These load the second operand to an insn. */
5492 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5494 o
->in2
= regs
[get_field(f
, r1
)];
5497 #define SPEC_in2_r1_o 0
5499 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5501 o
->in2
= tcg_temp_new_i64();
5502 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5504 #define SPEC_in2_r1_16u 0
5506 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5508 o
->in2
= tcg_temp_new_i64();
5509 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5511 #define SPEC_in2_r1_32u 0
5513 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5515 int r1
= get_field(f
, r1
);
5516 o
->in2
= tcg_temp_new_i64();
5517 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5519 #define SPEC_in2_r1_D32 SPEC_r1_even
5521 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5523 o
->in2
= load_reg(get_field(f
, r2
));
5525 #define SPEC_in2_r2 0
5527 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5529 o
->in2
= regs
[get_field(f
, r2
)];
5532 #define SPEC_in2_r2_o 0
5534 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5536 int r2
= get_field(f
, r2
);
5538 o
->in2
= load_reg(r2
);
5541 #define SPEC_in2_r2_nz 0
5543 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5545 o
->in2
= tcg_temp_new_i64();
5546 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5548 #define SPEC_in2_r2_8s 0
5550 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5552 o
->in2
= tcg_temp_new_i64();
5553 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5555 #define SPEC_in2_r2_8u 0
5557 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5559 o
->in2
= tcg_temp_new_i64();
5560 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5562 #define SPEC_in2_r2_16s 0
5564 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5566 o
->in2
= tcg_temp_new_i64();
5567 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5569 #define SPEC_in2_r2_16u 0
5571 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5573 o
->in2
= load_reg(get_field(f
, r3
));
5575 #define SPEC_in2_r3 0
5577 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5579 o
->in2
= tcg_temp_new_i64();
5580 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5582 #define SPEC_in2_r3_sr32 0
5584 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5586 o
->in2
= tcg_temp_new_i64();
5587 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5589 #define SPEC_in2_r2_32s 0
5591 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5593 o
->in2
= tcg_temp_new_i64();
5594 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5596 #define SPEC_in2_r2_32u 0
5598 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5600 o
->in2
= tcg_temp_new_i64();
5601 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5603 #define SPEC_in2_r2_sr32 0
5605 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5607 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5609 #define SPEC_in2_e2 0
5611 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5613 o
->in2
= fregs
[get_field(f
, r2
)];
5616 #define SPEC_in2_f2_o 0
5618 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5620 int r2
= get_field(f
, r2
);
5622 o
->in2
= fregs
[r2
+ 2];
5623 o
->g_in1
= o
->g_in2
= true;
5625 #define SPEC_in2_x2_o SPEC_r2_f128
5627 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5629 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5631 #define SPEC_in2_ra2 0
5633 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5635 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5636 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5638 #define SPEC_in2_a2 0
5640 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5642 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(f
, i2
) * 2);
5644 #define SPEC_in2_ri2 0
5646 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5648 help_l2_shift(s
, f
, o
, 31);
5650 #define SPEC_in2_sh32 0
5652 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5654 help_l2_shift(s
, f
, o
, 63);
5656 #define SPEC_in2_sh64 0
5658 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5661 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5663 #define SPEC_in2_m2_8u 0
5665 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5668 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5670 #define SPEC_in2_m2_16s 0
5672 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5675 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5677 #define SPEC_in2_m2_16u 0
5679 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5682 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5684 #define SPEC_in2_m2_32s 0
5686 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5689 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5691 #define SPEC_in2_m2_32u 0
5693 #ifndef CONFIG_USER_ONLY
5694 static void in2_m2_32ua(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5697 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5699 #define SPEC_in2_m2_32ua 0
5702 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5705 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5707 #define SPEC_in2_m2_64 0
5709 #ifndef CONFIG_USER_ONLY
5710 static void in2_m2_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5713 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5715 #define SPEC_in2_m2_64a 0
5718 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5721 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5723 #define SPEC_in2_mri2_16u 0
5725 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5728 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5730 #define SPEC_in2_mri2_32s 0
5732 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5735 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5737 #define SPEC_in2_mri2_32u 0
5739 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5742 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5744 #define SPEC_in2_mri2_64 0
5746 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5748 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5750 #define SPEC_in2_i2 0
5752 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5754 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5756 #define SPEC_in2_i2_8u 0
5758 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5760 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5762 #define SPEC_in2_i2_16u 0
5764 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5766 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5768 #define SPEC_in2_i2_32u 0
5770 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5772 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5773 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5775 #define SPEC_in2_i2_16u_shl 0
5777 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5779 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5780 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5782 #define SPEC_in2_i2_32u_shl 0
5784 #ifndef CONFIG_USER_ONLY
5785 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5787 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5789 #define SPEC_in2_insn 0
5792 /* ====================================================================== */
5794 /* Find opc within the table of insns. This is formulated as a switch
5795 statement so that (1) we get compile-time notice of cut-paste errors
5796 for duplicated opcodes, and (2) the compiler generates the binary
5797 search tree, rather than us having to post-process the table. */
5799 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5800 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5802 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5804 enum DisasInsnEnum
{
5805 #include "insn-data.def"
5809 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5813 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5815 .help_in1 = in1_##I1, \
5816 .help_in2 = in2_##I2, \
5817 .help_prep = prep_##P, \
5818 .help_wout = wout_##W, \
5819 .help_cout = cout_##CC, \
5820 .help_op = op_##OP, \
5824 /* Allow 0 to be used for NULL in the table below. */
5832 #define SPEC_in1_0 0
5833 #define SPEC_in2_0 0
5834 #define SPEC_prep_0 0
5835 #define SPEC_wout_0 0
5837 /* Give smaller names to the various facilities. */
5838 #define FAC_Z S390_FEAT_ZARCH
5839 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5840 #define FAC_DFP S390_FEAT_DFP
5841 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5842 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5843 #define FAC_EE S390_FEAT_EXECUTE_EXT
5844 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5845 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5846 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5847 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5848 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5849 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5850 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5851 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5852 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5853 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5854 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5855 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5856 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5857 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5858 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5859 #define FAC_SFLE S390_FEAT_STFLE
5860 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5861 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5862 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5863 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5864 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5865 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5866 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5867 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5868 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5869 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5870 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5871 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5872 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5873 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5874 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5875 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5877 static const DisasInsn insn_info
[] = {
5878 #include "insn-data.def"
5882 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5883 case OPC: return &insn_info[insn_ ## NM];
5885 static const DisasInsn
*lookup_opc(uint16_t opc
)
5888 #include "insn-data.def"
5897 /* Extract a field from the insn. The INSN should be left-aligned in
5898 the uint64_t so that we can more easily utilize the big-bit-endian
5899 definitions we extract from the Principals of Operation. */
5901 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5909 /* Zero extract the field from the insn. */
5910 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5912 /* Sign-extend, or un-swap the field as necessary. */
5914 case 0: /* unsigned */
5916 case 1: /* signed */
5917 assert(f
->size
<= 32);
5918 m
= 1u << (f
->size
- 1);
5921 case 2: /* dl+dh split, signed 20 bit. */
5922 r
= ((int8_t)r
<< 12) | (r
>> 8);
5928 /* Validate that the "compressed" encoding we selected above is valid.
5929 I.e. we havn't make two different original fields overlap. */
5930 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5931 o
->presentC
|= 1 << f
->indexC
;
5932 o
->presentO
|= 1 << f
->indexO
;
5934 o
->c
[f
->indexC
] = r
;
5937 /* Lookup the insn at the current PC, extracting the operands into O and
5938 returning the info struct for the insn. Returns NULL for invalid insn. */
5940 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5943 uint64_t insn
, pc
= s
->base
.pc_next
;
5945 const DisasInsn
*info
;
5947 if (unlikely(s
->ex_value
)) {
5948 /* Drop the EX data now, so that it's clear on exception paths. */
5949 TCGv_i64 zero
= tcg_const_i64(0);
5950 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5951 tcg_temp_free_i64(zero
);
5953 /* Extract the values saved by EXECUTE. */
5954 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5955 ilen
= s
->ex_value
& 0xf;
5958 insn
= ld_code2(env
, pc
);
5959 op
= (insn
>> 8) & 0xff;
5960 ilen
= get_ilen(op
);
5966 insn
= ld_code4(env
, pc
) << 32;
5969 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5972 g_assert_not_reached();
5975 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
5978 /* We can't actually determine the insn format until we've looked up
5979 the full insn opcode. Which we can't do without locating the
5980 secondary opcode. Assume by default that OP2 is at bit 40; for
5981 those smaller insns that don't actually have a secondary opcode
5982 this will correctly result in OP2 = 0. */
5988 case 0xb2: /* S, RRF, RRE, IE */
5989 case 0xb3: /* RRE, RRD, RRF */
5990 case 0xb9: /* RRE, RRF */
5991 case 0xe5: /* SSE, SIL */
5992 op2
= (insn
<< 8) >> 56;
5996 case 0xc0: /* RIL */
5997 case 0xc2: /* RIL */
5998 case 0xc4: /* RIL */
5999 case 0xc6: /* RIL */
6000 case 0xc8: /* SSF */
6001 case 0xcc: /* RIL */
6002 op2
= (insn
<< 12) >> 60;
6004 case 0xc5: /* MII */
6005 case 0xc7: /* SMI */
6006 case 0xd0 ... 0xdf: /* SS */
6012 case 0xee ... 0xf3: /* SS */
6013 case 0xf8 ... 0xfd: /* SS */
6017 op2
= (insn
<< 40) >> 56;
6021 memset(f
, 0, sizeof(*f
));
6026 /* Lookup the instruction. */
6027 info
= lookup_opc(op
<< 8 | op2
);
6029 /* If we found it, extract the operands. */
6031 DisasFormat fmt
= info
->fmt
;
6034 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6035 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
6041 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6043 const DisasInsn
*insn
;
6044 DisasJumpType ret
= DISAS_NEXT
;
6048 /* Search for the insn in the table. */
6049 insn
= extract_insn(env
, s
, &f
);
6051 /* Not found means unimplemented/illegal opcode. */
6053 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6055 gen_illegal_opcode(s
);
6056 return DISAS_NORETURN
;
6059 #ifndef CONFIG_USER_ONLY
6060 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6061 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6062 gen_helper_per_ifetch(cpu_env
, addr
);
6063 tcg_temp_free_i64(addr
);
6067 /* Check for insn specification exceptions. */
6069 int spec
= insn
->spec
, excp
= 0, r
;
6071 if (spec
& SPEC_r1_even
) {
6072 r
= get_field(&f
, r1
);
6074 excp
= PGM_SPECIFICATION
;
6077 if (spec
& SPEC_r2_even
) {
6078 r
= get_field(&f
, r2
);
6080 excp
= PGM_SPECIFICATION
;
6083 if (spec
& SPEC_r3_even
) {
6084 r
= get_field(&f
, r3
);
6086 excp
= PGM_SPECIFICATION
;
6089 if (spec
& SPEC_r1_f128
) {
6090 r
= get_field(&f
, r1
);
6092 excp
= PGM_SPECIFICATION
;
6095 if (spec
& SPEC_r2_f128
) {
6096 r
= get_field(&f
, r2
);
6098 excp
= PGM_SPECIFICATION
;
6102 gen_program_exception(s
, excp
);
6103 return DISAS_NORETURN
;
6107 /* Set up the strutures we use to communicate with the helpers. */
6110 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
6117 /* Implement the instruction. */
6118 if (insn
->help_in1
) {
6119 insn
->help_in1(s
, &f
, &o
);
6121 if (insn
->help_in2
) {
6122 insn
->help_in2(s
, &f
, &o
);
6124 if (insn
->help_prep
) {
6125 insn
->help_prep(s
, &f
, &o
);
6127 if (insn
->help_op
) {
6128 ret
= insn
->help_op(s
, &o
);
6130 if (insn
->help_wout
) {
6131 insn
->help_wout(s
, &f
, &o
);
6133 if (insn
->help_cout
) {
6134 insn
->help_cout(s
, &o
);
6137 /* Free any temporaries created by the helpers. */
6138 if (o
.out
&& !o
.g_out
) {
6139 tcg_temp_free_i64(o
.out
);
6141 if (o
.out2
&& !o
.g_out2
) {
6142 tcg_temp_free_i64(o
.out2
);
6144 if (o
.in1
&& !o
.g_in1
) {
6145 tcg_temp_free_i64(o
.in1
);
6147 if (o
.in2
&& !o
.g_in2
) {
6148 tcg_temp_free_i64(o
.in2
);
6151 tcg_temp_free_i64(o
.addr1
);
6154 #ifndef CONFIG_USER_ONLY
6155 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6156 /* An exception might be triggered, save PSW if not already done. */
6157 if (ret
== DISAS_NEXT
|| ret
== DISAS_PC_STALE
) {
6158 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6161 /* Call the helper to check for a possible PER exception. */
6162 gen_helper_per_check_exception(cpu_env
);
6166 /* Advance to the next instruction. */
6167 s
->base
.pc_next
= s
->pc_tmp
;
6171 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6173 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6176 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6177 dc
->base
.pc_first
&= 0x7fffffff;
6178 dc
->base
.pc_next
= dc
->base
.pc_first
;
6181 dc
->cc_op
= CC_OP_DYNAMIC
;
6182 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6183 dc
->do_debug
= dc
->base
.singlestep_enabled
;
6186 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6190 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6192 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6194 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
);
6197 static bool s390x_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
6198 const CPUBreakpoint
*bp
)
6200 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6202 dc
->base
.is_jmp
= DISAS_PC_STALE
;
6203 dc
->do_debug
= true;
6204 /* The address covered by the breakpoint must be included in
6205 [tb->pc, tb->pc + tb->size) in order to for it to be
6206 properly cleared -- thus we increment the PC here so that
6207 the logic setting tb->size does the right thing. */
6208 dc
->base
.pc_next
+= 2;
6212 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6214 CPUS390XState
*env
= cs
->env_ptr
;
6215 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6217 dc
->base
.is_jmp
= translate_one(env
, dc
);
6218 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6219 uint64_t page_start
;
6221 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6222 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6223 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6228 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6230 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6232 switch (dc
->base
.is_jmp
) {
6234 case DISAS_NORETURN
:
6236 case DISAS_TOO_MANY
:
6237 case DISAS_PC_STALE
:
6238 case DISAS_PC_STALE_NOCHAIN
:
6239 update_psw_addr(dc
);
6241 case DISAS_PC_UPDATED
:
6242 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6243 cc op type is in env */
6246 case DISAS_PC_CC_UPDATED
:
6247 /* Exit the TB, either by raising a debug exception or by return. */
6249 gen_exception(EXCP_DEBUG
);
6250 } else if (use_exit_tb(dc
) ||
6251 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6252 tcg_gen_exit_tb(NULL
, 0);
6254 tcg_gen_lookup_and_goto_ptr();
6258 g_assert_not_reached();
6262 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
6264 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6266 if (unlikely(dc
->ex_value
)) {
6267 /* ??? Unfortunately log_target_disas can't use host memory. */
6268 qemu_log("IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6270 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6271 log_target_disas(cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6275 static const TranslatorOps s390x_tr_ops
= {
6276 .init_disas_context
= s390x_tr_init_disas_context
,
6277 .tb_start
= s390x_tr_tb_start
,
6278 .insn_start
= s390x_tr_insn_start
,
6279 .breakpoint_check
= s390x_tr_breakpoint_check
,
6280 .translate_insn
= s390x_tr_translate_insn
,
6281 .tb_stop
= s390x_tr_tb_stop
,
6282 .disas_log
= s390x_tr_disas_log
,
6285 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
)
6289 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
);
6292 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6295 int cc_op
= data
[1];
6296 env
->psw
.addr
= data
[0];
6297 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {