4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext
;
50 typedef struct DisasInsn DisasInsn
;
51 typedef struct DisasFields DisasFields
;
54 struct TranslationBlock
*tb
;
55 const DisasInsn
*insn
;
61 bool singlestep_enabled
;
64 /* Information carried about a condition to be evaluated. */
71 struct { TCGv_i64 a
, b
; } s64
;
72 struct { TCGv_i32 a
, b
; } s32
;
76 /* is_jmp field values */
77 #define DISAS_EXCP DISAS_TARGET_0
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit
[CC_OP_MAX
];
81 static uint64_t inline_branch_miss
[CC_OP_MAX
];
84 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
86 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
87 if (s
->tb
->flags
& FLAG_MASK_32
) {
88 return pc
| 0x80000000;
94 static TCGv_i64 psw_addr
;
95 static TCGv_i64 psw_mask
;
98 static TCGv_i32 cc_op
;
99 static TCGv_i64 cc_src
;
100 static TCGv_i64 cc_dst
;
101 static TCGv_i64 cc_vr
;
103 static char cpu_reg_names
[32][4];
104 static TCGv_i64 regs
[16];
105 static TCGv_i64 fregs
[16];
107 void s390x_translate_init(void)
111 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
112 offsetof(CPUS390XState
, psw
.addr
),
114 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
115 offsetof(CPUS390XState
, psw
.mask
),
117 gbea
= tcg_global_mem_new_i64(cpu_env
,
118 offsetof(CPUS390XState
, gbea
),
121 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
123 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
125 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
127 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
130 for (i
= 0; i
< 16; i
++) {
131 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
132 regs
[i
] = tcg_global_mem_new(cpu_env
,
133 offsetof(CPUS390XState
, regs
[i
]),
137 for (i
= 0; i
< 16; i
++) {
138 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
139 fregs
[i
] = tcg_global_mem_new(cpu_env
,
140 offsetof(CPUS390XState
, vregs
[i
][0].d
),
141 cpu_reg_names
[i
+ 16]);
145 static TCGv_i64
load_reg(int reg
)
147 TCGv_i64 r
= tcg_temp_new_i64();
148 tcg_gen_mov_i64(r
, regs
[reg
]);
152 static TCGv_i64
load_freg32_i64(int reg
)
154 TCGv_i64 r
= tcg_temp_new_i64();
155 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
159 static void store_reg(int reg
, TCGv_i64 v
)
161 tcg_gen_mov_i64(regs
[reg
], v
);
164 static void store_freg(int reg
, TCGv_i64 v
)
166 tcg_gen_mov_i64(fregs
[reg
], v
);
169 static void store_reg32_i64(int reg
, TCGv_i64 v
)
171 /* 32 bit register writes keep the upper half */
172 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
175 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
177 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
180 static void store_freg32_i64(int reg
, TCGv_i64 v
)
182 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
185 static void return_low128(TCGv_i64 dest
)
187 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
190 static void update_psw_addr(DisasContext
*s
)
193 tcg_gen_movi_i64(psw_addr
, s
->pc
);
196 static void per_branch(DisasContext
*s
, bool to_next
)
198 #ifndef CONFIG_USER_ONLY
199 tcg_gen_movi_i64(gbea
, s
->pc
);
201 if (s
->tb
->flags
& FLAG_MASK_PER
) {
202 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
203 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
205 tcg_temp_free_i64(next_pc
);
211 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
212 TCGv_i64 arg1
, TCGv_i64 arg2
)
214 #ifndef CONFIG_USER_ONLY
215 if (s
->tb
->flags
& FLAG_MASK_PER
) {
216 TCGLabel
*lab
= gen_new_label();
217 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
219 tcg_gen_movi_i64(gbea
, s
->pc
);
220 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
224 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
225 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
226 tcg_temp_free_i64(pc
);
231 static void per_breaking_event(DisasContext
*s
)
233 tcg_gen_movi_i64(gbea
, s
->pc
);
236 static void update_cc_op(DisasContext
*s
)
238 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
239 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
243 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
245 return (uint64_t)cpu_lduw_code(env
, pc
);
248 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
250 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
253 static int get_mem_index(DisasContext
*s
)
255 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
256 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
258 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
260 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
268 static void gen_exception(int excp
)
270 TCGv_i32 tmp
= tcg_const_i32(excp
);
271 gen_helper_exception(cpu_env
, tmp
);
272 tcg_temp_free_i32(tmp
);
275 static void gen_program_exception(DisasContext
*s
, int code
)
279 /* Remember what pgm exeption this was. */
280 tmp
= tcg_const_i32(code
);
281 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
282 tcg_temp_free_i32(tmp
);
284 tmp
= tcg_const_i32(s
->ilen
);
285 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
286 tcg_temp_free_i32(tmp
);
294 /* Trigger exception. */
295 gen_exception(EXCP_PGM
);
298 static inline void gen_illegal_opcode(DisasContext
*s
)
300 gen_program_exception(s
, PGM_OPERATION
);
303 static inline void gen_trap(DisasContext
*s
)
307 /* Set DXC to 0xff. */
308 t
= tcg_temp_new_i32();
309 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
310 tcg_gen_ori_i32(t
, t
, 0xff00);
311 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
312 tcg_temp_free_i32(t
);
314 gen_program_exception(s
, PGM_DATA
);
317 #ifndef CONFIG_USER_ONLY
318 static void check_privileged(DisasContext
*s
)
320 if (s
->tb
->flags
& FLAG_MASK_PSTATE
) {
321 gen_program_exception(s
, PGM_PRIVILEGED
);
326 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
328 TCGv_i64 tmp
= tcg_temp_new_i64();
329 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
331 /* Note that d2 is limited to 20 bits, signed. If we crop negative
332 displacements early we create larger immedate addends. */
334 /* Note that addi optimizes the imm==0 case. */
336 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
337 tcg_gen_addi_i64(tmp
, tmp
, d2
);
339 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
341 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
347 tcg_gen_movi_i64(tmp
, d2
);
350 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
356 static inline bool live_cc_data(DisasContext
*s
)
358 return (s
->cc_op
!= CC_OP_DYNAMIC
359 && s
->cc_op
!= CC_OP_STATIC
363 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
365 if (live_cc_data(s
)) {
366 tcg_gen_discard_i64(cc_src
);
367 tcg_gen_discard_i64(cc_dst
);
368 tcg_gen_discard_i64(cc_vr
);
370 s
->cc_op
= CC_OP_CONST0
+ val
;
373 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
375 if (live_cc_data(s
)) {
376 tcg_gen_discard_i64(cc_src
);
377 tcg_gen_discard_i64(cc_vr
);
379 tcg_gen_mov_i64(cc_dst
, dst
);
383 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
386 if (live_cc_data(s
)) {
387 tcg_gen_discard_i64(cc_vr
);
389 tcg_gen_mov_i64(cc_src
, src
);
390 tcg_gen_mov_i64(cc_dst
, dst
);
394 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
395 TCGv_i64 dst
, TCGv_i64 vr
)
397 tcg_gen_mov_i64(cc_src
, src
);
398 tcg_gen_mov_i64(cc_dst
, dst
);
399 tcg_gen_mov_i64(cc_vr
, vr
);
403 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
405 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
408 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
410 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
413 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
415 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
418 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
420 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
423 /* CC value is in env->cc_op */
424 static void set_cc_static(DisasContext
*s
)
426 if (live_cc_data(s
)) {
427 tcg_gen_discard_i64(cc_src
);
428 tcg_gen_discard_i64(cc_dst
);
429 tcg_gen_discard_i64(cc_vr
);
431 s
->cc_op
= CC_OP_STATIC
;
434 /* calculates cc into cc_op */
435 static void gen_op_calc_cc(DisasContext
*s
)
437 TCGv_i32 local_cc_op
= NULL
;
438 TCGv_i64 dummy
= NULL
;
442 dummy
= tcg_const_i64(0);
456 local_cc_op
= tcg_const_i32(s
->cc_op
);
472 /* s->cc_op is the cc value */
473 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
476 /* env->cc_op already is the cc value */
491 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
496 case CC_OP_LTUGTU_32
:
497 case CC_OP_LTUGTU_64
:
504 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
519 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
522 /* unknown operation - assume 3 arguments and cc_op in env */
523 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
530 tcg_temp_free_i32(local_cc_op
);
533 tcg_temp_free_i64(dummy
);
536 /* We now have cc in cc_op as constant */
540 static bool use_exit_tb(DisasContext
*s
)
542 return (s
->singlestep_enabled
||
543 (tb_cflags(s
->tb
) & CF_LAST_IO
) ||
544 (s
->tb
->flags
& FLAG_MASK_PER
));
547 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
549 if (unlikely(use_exit_tb(s
))) {
552 #ifndef CONFIG_USER_ONLY
553 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
554 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
560 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
562 #ifdef DEBUG_INLINE_BRANCHES
563 inline_branch_miss
[cc_op
]++;
567 static void account_inline_branch(DisasContext
*s
, int cc_op
)
569 #ifdef DEBUG_INLINE_BRANCHES
570 inline_branch_hit
[cc_op
]++;
574 /* Table of mask values to comparison codes, given a comparison as input.
575 For such, CC=3 should not be possible. */
576 static const TCGCond ltgt_cond
[16] = {
577 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
578 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
579 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
580 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
581 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
582 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
583 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
584 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
587 /* Table of mask values to comparison codes, given a logic op as input.
588 For such, only CC=0 and CC=1 should be possible. */
589 static const TCGCond nz_cond
[16] = {
590 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
591 TCG_COND_NEVER
, TCG_COND_NEVER
,
592 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
593 TCG_COND_NE
, TCG_COND_NE
,
594 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
595 TCG_COND_EQ
, TCG_COND_EQ
,
596 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
597 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
600 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
601 details required to generate a TCG comparison. */
602 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
605 enum cc_op old_cc_op
= s
->cc_op
;
607 if (mask
== 15 || mask
== 0) {
608 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
611 c
->g1
= c
->g2
= true;
616 /* Find the TCG condition for the mask + cc op. */
622 cond
= ltgt_cond
[mask
];
623 if (cond
== TCG_COND_NEVER
) {
626 account_inline_branch(s
, old_cc_op
);
629 case CC_OP_LTUGTU_32
:
630 case CC_OP_LTUGTU_64
:
631 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
632 if (cond
== TCG_COND_NEVER
) {
635 account_inline_branch(s
, old_cc_op
);
639 cond
= nz_cond
[mask
];
640 if (cond
== TCG_COND_NEVER
) {
643 account_inline_branch(s
, old_cc_op
);
658 account_inline_branch(s
, old_cc_op
);
673 account_inline_branch(s
, old_cc_op
);
677 switch (mask
& 0xa) {
678 case 8: /* src == 0 -> no one bit found */
681 case 2: /* src != 0 -> one bit found */
687 account_inline_branch(s
, old_cc_op
);
693 case 8 | 2: /* vr == 0 */
696 case 4 | 1: /* vr != 0 */
699 case 8 | 4: /* no carry -> vr >= src */
702 case 2 | 1: /* carry -> vr < src */
708 account_inline_branch(s
, old_cc_op
);
713 /* Note that CC=0 is impossible; treat it as dont-care. */
715 case 2: /* zero -> op1 == op2 */
718 case 4 | 1: /* !zero -> op1 != op2 */
721 case 4: /* borrow (!carry) -> op1 < op2 */
724 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
730 account_inline_branch(s
, old_cc_op
);
735 /* Calculate cc value. */
740 /* Jump based on CC. We'll load up the real cond below;
741 the assignment here merely avoids a compiler warning. */
742 account_noninline_branch(s
, old_cc_op
);
743 old_cc_op
= CC_OP_STATIC
;
744 cond
= TCG_COND_NEVER
;
748 /* Load up the arguments of the comparison. */
750 c
->g1
= c
->g2
= false;
754 c
->u
.s32
.a
= tcg_temp_new_i32();
755 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
756 c
->u
.s32
.b
= tcg_const_i32(0);
759 case CC_OP_LTUGTU_32
:
762 c
->u
.s32
.a
= tcg_temp_new_i32();
763 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
764 c
->u
.s32
.b
= tcg_temp_new_i32();
765 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
772 c
->u
.s64
.b
= tcg_const_i64(0);
776 case CC_OP_LTUGTU_64
:
780 c
->g1
= c
->g2
= true;
786 c
->u
.s64
.a
= tcg_temp_new_i64();
787 c
->u
.s64
.b
= tcg_const_i64(0);
788 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
793 c
->u
.s32
.a
= tcg_temp_new_i32();
794 c
->u
.s32
.b
= tcg_temp_new_i32();
795 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
796 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
797 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
799 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
806 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
807 c
->u
.s64
.b
= tcg_const_i64(0);
819 case 0x8 | 0x4 | 0x2: /* cc != 3 */
821 c
->u
.s32
.b
= tcg_const_i32(3);
823 case 0x8 | 0x4 | 0x1: /* cc != 2 */
825 c
->u
.s32
.b
= tcg_const_i32(2);
827 case 0x8 | 0x2 | 0x1: /* cc != 1 */
829 c
->u
.s32
.b
= tcg_const_i32(1);
831 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
834 c
->u
.s32
.a
= tcg_temp_new_i32();
835 c
->u
.s32
.b
= tcg_const_i32(0);
836 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
838 case 0x8 | 0x4: /* cc < 2 */
840 c
->u
.s32
.b
= tcg_const_i32(2);
842 case 0x8: /* cc == 0 */
844 c
->u
.s32
.b
= tcg_const_i32(0);
846 case 0x4 | 0x2 | 0x1: /* cc != 0 */
848 c
->u
.s32
.b
= tcg_const_i32(0);
850 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
853 c
->u
.s32
.a
= tcg_temp_new_i32();
854 c
->u
.s32
.b
= tcg_const_i32(0);
855 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
857 case 0x4: /* cc == 1 */
859 c
->u
.s32
.b
= tcg_const_i32(1);
861 case 0x2 | 0x1: /* cc > 1 */
863 c
->u
.s32
.b
= tcg_const_i32(1);
865 case 0x2: /* cc == 2 */
867 c
->u
.s32
.b
= tcg_const_i32(2);
869 case 0x1: /* cc == 3 */
871 c
->u
.s32
.b
= tcg_const_i32(3);
874 /* CC is masked by something else: (8 >> cc) & mask. */
877 c
->u
.s32
.a
= tcg_const_i32(8);
878 c
->u
.s32
.b
= tcg_const_i32(0);
879 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
880 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
891 static void free_compare(DisasCompare
*c
)
895 tcg_temp_free_i64(c
->u
.s64
.a
);
897 tcg_temp_free_i32(c
->u
.s32
.a
);
902 tcg_temp_free_i64(c
->u
.s64
.b
);
904 tcg_temp_free_i32(c
->u
.s32
.b
);
909 /* ====================================================================== */
910 /* Define the insn format enumeration. */
911 #define F0(N) FMT_##N,
912 #define F1(N, X1) F0(N)
913 #define F2(N, X1, X2) F0(N)
914 #define F3(N, X1, X2, X3) F0(N)
915 #define F4(N, X1, X2, X3, X4) F0(N)
916 #define F5(N, X1, X2, X3, X4, X5) F0(N)
919 #include "insn-format.def"
929 /* Define a structure to hold the decoded fields. We'll store each inside
930 an array indexed by an enum. In order to conserve memory, we'll arrange
931 for fields that do not exist at the same time to overlap, thus the "C"
932 for compact. For checking purposes there is an "O" for original index
933 as well that will be applied to availability bitmaps. */
935 enum DisasFieldIndexO
{
958 enum DisasFieldIndexC
{
993 unsigned presentC
:16;
994 unsigned int presentO
;
998 /* This is the way fields are to be accessed out of DisasFields. */
999 #define have_field(S, F) have_field1((S), FLD_O_##F)
1000 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1002 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1004 return (f
->presentO
>> c
) & 1;
1007 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1008 enum DisasFieldIndexC c
)
1010 assert(have_field1(f
, o
));
1014 /* Describe the layout of each field in each format. */
1015 typedef struct DisasField
{
1017 unsigned int size
:8;
1018 unsigned int type
:2;
1019 unsigned int indexC
:6;
1020 enum DisasFieldIndexO indexO
:8;
1023 typedef struct DisasFormatInfo
{
1024 DisasField op
[NUM_C_FIELD
];
1027 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1028 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1029 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1030 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1031 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1032 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1033 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1034 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1035 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1036 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1037 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1038 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1039 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1040 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1042 #define F0(N) { { } },
1043 #define F1(N, X1) { { X1 } },
1044 #define F2(N, X1, X2) { { X1, X2 } },
1045 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1046 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1047 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1049 static const DisasFormatInfo format_info
[] = {
1050 #include "insn-format.def"
1068 /* Generally, we'll extract operands into this structures, operate upon
1069 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1070 of routines below for more details. */
1072 bool g_out
, g_out2
, g_in1
, g_in2
;
1073 TCGv_i64 out
, out2
, in1
, in2
;
1077 /* Instructions can place constraints on their operands, raising specification
1078 exceptions if they are violated. To make this easy to automate, each "in1",
1079 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1080 of the following, or 0. To make this easy to document, we'll put the
1081 SPEC_<name> defines next to <name>. */
1083 #define SPEC_r1_even 1
1084 #define SPEC_r2_even 2
1085 #define SPEC_r3_even 4
1086 #define SPEC_r1_f128 8
1087 #define SPEC_r2_f128 16
1089 /* Return values from translate_one, indicating the state of the TB. */
1091 /* Continue the TB. */
1093 /* We have emitted one or more goto_tb. No fixup required. */
1095 /* We are not using a goto_tb (for whatever reason), but have updated
1096 the PC (for whatever reason), so there's no need to do it again on
1099 /* We have updated the PC and CC values. */
1101 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1102 updated the PC for the next instruction to be executed. */
1104 /* We are exiting the TB to the main loop. */
1105 EXIT_PC_STALE_NOCHAIN
,
1106 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1107 No following code will be executed. */
1119 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1120 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1121 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1122 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1123 void (*help_cout
)(DisasContext
*, DisasOps
*);
1124 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1129 /* ====================================================================== */
1130 /* Miscellaneous helpers, used by several operations. */
1132 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1133 DisasOps
*o
, int mask
)
1135 int b2
= get_field(f
, b2
);
1136 int d2
= get_field(f
, d2
);
1139 o
->in2
= tcg_const_i64(d2
& mask
);
1141 o
->in2
= get_address(s
, 0, b2
, d2
);
1142 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1146 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1148 if (dest
== s
->next_pc
) {
1149 per_branch(s
, true);
1152 if (use_goto_tb(s
, dest
)) {
1154 per_breaking_event(s
);
1156 tcg_gen_movi_i64(psw_addr
, dest
);
1157 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1158 return EXIT_GOTO_TB
;
1160 tcg_gen_movi_i64(psw_addr
, dest
);
1161 per_branch(s
, false);
1162 return EXIT_PC_UPDATED
;
1166 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1167 bool is_imm
, int imm
, TCGv_i64 cdest
)
1170 uint64_t dest
= s
->pc
+ 2 * imm
;
1173 /* Take care of the special cases first. */
1174 if (c
->cond
== TCG_COND_NEVER
) {
1179 if (dest
== s
->next_pc
) {
1180 /* Branch to next. */
1181 per_branch(s
, true);
1185 if (c
->cond
== TCG_COND_ALWAYS
) {
1186 ret
= help_goto_direct(s
, dest
);
1191 /* E.g. bcr %r0 -> no branch. */
1195 if (c
->cond
== TCG_COND_ALWAYS
) {
1196 tcg_gen_mov_i64(psw_addr
, cdest
);
1197 per_branch(s
, false);
1198 ret
= EXIT_PC_UPDATED
;
1203 if (use_goto_tb(s
, s
->next_pc
)) {
1204 if (is_imm
&& use_goto_tb(s
, dest
)) {
1205 /* Both exits can use goto_tb. */
1208 lab
= gen_new_label();
1210 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1212 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1215 /* Branch not taken. */
1217 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1218 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1222 per_breaking_event(s
);
1224 tcg_gen_movi_i64(psw_addr
, dest
);
1225 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1229 /* Fallthru can use goto_tb, but taken branch cannot. */
1230 /* Store taken branch destination before the brcond. This
1231 avoids having to allocate a new local temp to hold it.
1232 We'll overwrite this in the not taken case anyway. */
1234 tcg_gen_mov_i64(psw_addr
, cdest
);
1237 lab
= gen_new_label();
1239 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1241 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1244 /* Branch not taken. */
1247 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1248 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1252 tcg_gen_movi_i64(psw_addr
, dest
);
1254 per_breaking_event(s
);
1255 ret
= EXIT_PC_UPDATED
;
1258 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1259 Most commonly we're single-stepping or some other condition that
1260 disables all use of goto_tb. Just update the PC and exit. */
1262 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1264 cdest
= tcg_const_i64(dest
);
1268 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1270 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1272 TCGv_i32 t0
= tcg_temp_new_i32();
1273 TCGv_i64 t1
= tcg_temp_new_i64();
1274 TCGv_i64 z
= tcg_const_i64(0);
1275 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1276 tcg_gen_extu_i32_i64(t1
, t0
);
1277 tcg_temp_free_i32(t0
);
1278 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1279 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1280 tcg_temp_free_i64(t1
);
1281 tcg_temp_free_i64(z
);
1285 tcg_temp_free_i64(cdest
);
1287 tcg_temp_free_i64(next
);
1289 ret
= EXIT_PC_UPDATED
;
1297 /* ====================================================================== */
1298 /* The operations. These perform the bulk of the work for any insn,
1299 usually after the operands have been loaded and output initialized. */
1301 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1304 z
= tcg_const_i64(0);
1305 n
= tcg_temp_new_i64();
1306 tcg_gen_neg_i64(n
, o
->in2
);
1307 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1308 tcg_temp_free_i64(n
);
1309 tcg_temp_free_i64(z
);
1313 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1315 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1319 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1321 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1325 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1327 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1328 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1332 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1334 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1338 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1343 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1345 /* The carry flag is the msb of CC, therefore the branch mask that would
1346 create that comparison is 3. Feeding the generated comparison to
1347 setcond produces the carry flag that we desire. */
1348 disas_jcc(s
, &cmp
, 3);
1349 carry
= tcg_temp_new_i64();
1351 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1353 TCGv_i32 t
= tcg_temp_new_i32();
1354 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1355 tcg_gen_extu_i32_i64(carry
, t
);
1356 tcg_temp_free_i32(t
);
1360 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1361 tcg_temp_free_i64(carry
);
1365 static ExitStatus
op_asi(DisasContext
*s
, DisasOps
*o
)
1367 o
->in1
= tcg_temp_new_i64();
1369 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1370 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1372 /* Perform the atomic addition in memory. */
1373 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1377 /* Recompute also for atomic case: needed for setting CC. */
1378 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1380 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1381 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1386 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1388 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1392 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1394 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1398 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1400 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1401 return_low128(o
->out2
);
1405 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1407 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1411 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1413 int shift
= s
->insn
->data
& 0xff;
1414 int size
= s
->insn
->data
>> 8;
1415 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1418 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1419 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1420 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1422 /* Produce the CC from only the bits manipulated. */
1423 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1424 set_cc_nz_u64(s
, cc_dst
);
1428 static ExitStatus
op_ni(DisasContext
*s
, DisasOps
*o
)
1430 o
->in1
= tcg_temp_new_i64();
1432 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1433 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1435 /* Perform the atomic operation in memory. */
1436 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1440 /* Recompute also for atomic case: needed for setting CC. */
1441 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1443 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1444 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1449 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1451 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1453 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1454 per_branch(s
, false);
1455 return EXIT_PC_UPDATED
;
1461 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1463 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1464 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1467 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1469 int m1
= get_field(s
->fields
, m1
);
1470 bool is_imm
= have_field(s
->fields
, i2
);
1471 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1474 /* BCR with R2 = 0 causes no branching */
1475 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1477 /* Perform serialization */
1478 /* FIXME: check for fast-BCR-serialization facility */
1479 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1482 /* Perform serialization */
1483 /* FIXME: perform checkpoint-synchronisation */
1484 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1489 disas_jcc(s
, &c
, m1
);
1490 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1493 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1495 int r1
= get_field(s
->fields
, r1
);
1496 bool is_imm
= have_field(s
->fields
, i2
);
1497 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1501 c
.cond
= TCG_COND_NE
;
1506 t
= tcg_temp_new_i64();
1507 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1508 store_reg32_i64(r1
, t
);
1509 c
.u
.s32
.a
= tcg_temp_new_i32();
1510 c
.u
.s32
.b
= tcg_const_i32(0);
1511 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1512 tcg_temp_free_i64(t
);
1514 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1517 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1519 int r1
= get_field(s
->fields
, r1
);
1520 int imm
= get_field(s
->fields
, i2
);
1524 c
.cond
= TCG_COND_NE
;
1529 t
= tcg_temp_new_i64();
1530 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1531 tcg_gen_subi_i64(t
, t
, 1);
1532 store_reg32h_i64(r1
, t
);
1533 c
.u
.s32
.a
= tcg_temp_new_i32();
1534 c
.u
.s32
.b
= tcg_const_i32(0);
1535 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1536 tcg_temp_free_i64(t
);
1538 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1541 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1543 int r1
= get_field(s
->fields
, r1
);
1544 bool is_imm
= have_field(s
->fields
, i2
);
1545 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1548 c
.cond
= TCG_COND_NE
;
1553 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1554 c
.u
.s64
.a
= regs
[r1
];
1555 c
.u
.s64
.b
= tcg_const_i64(0);
1557 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1560 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1562 int r1
= get_field(s
->fields
, r1
);
1563 int r3
= get_field(s
->fields
, r3
);
1564 bool is_imm
= have_field(s
->fields
, i2
);
1565 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1569 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1574 t
= tcg_temp_new_i64();
1575 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1576 c
.u
.s32
.a
= tcg_temp_new_i32();
1577 c
.u
.s32
.b
= tcg_temp_new_i32();
1578 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1579 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1580 store_reg32_i64(r1
, t
);
1581 tcg_temp_free_i64(t
);
1583 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1586 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1588 int r1
= get_field(s
->fields
, r1
);
1589 int r3
= get_field(s
->fields
, r3
);
1590 bool is_imm
= have_field(s
->fields
, i2
);
1591 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1594 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1597 if (r1
== (r3
| 1)) {
1598 c
.u
.s64
.b
= load_reg(r3
| 1);
1601 c
.u
.s64
.b
= regs
[r3
| 1];
1605 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1606 c
.u
.s64
.a
= regs
[r1
];
1609 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1612 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1614 int imm
, m3
= get_field(s
->fields
, m3
);
1618 c
.cond
= ltgt_cond
[m3
];
1619 if (s
->insn
->data
) {
1620 c
.cond
= tcg_unsigned_cond(c
.cond
);
1622 c
.is_64
= c
.g1
= c
.g2
= true;
1626 is_imm
= have_field(s
->fields
, i4
);
1628 imm
= get_field(s
->fields
, i4
);
1631 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1632 get_field(s
->fields
, d4
));
1635 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1638 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1640 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1645 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1647 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1652 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1654 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1659 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1661 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1662 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1663 tcg_temp_free_i32(m3
);
1664 gen_set_cc_nz_f32(s
, o
->in2
);
1668 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1670 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1671 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1672 tcg_temp_free_i32(m3
);
1673 gen_set_cc_nz_f64(s
, o
->in2
);
1677 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1679 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1680 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1681 tcg_temp_free_i32(m3
);
1682 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1686 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1688 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1689 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1690 tcg_temp_free_i32(m3
);
1691 gen_set_cc_nz_f32(s
, o
->in2
);
1695 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1697 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1698 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1699 tcg_temp_free_i32(m3
);
1700 gen_set_cc_nz_f64(s
, o
->in2
);
1704 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1706 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1707 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1708 tcg_temp_free_i32(m3
);
1709 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1713 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1715 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1716 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1717 tcg_temp_free_i32(m3
);
1718 gen_set_cc_nz_f32(s
, o
->in2
);
1722 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1724 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1725 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1726 tcg_temp_free_i32(m3
);
1727 gen_set_cc_nz_f64(s
, o
->in2
);
1731 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1733 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1734 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1735 tcg_temp_free_i32(m3
);
1736 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1740 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1742 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1743 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1744 tcg_temp_free_i32(m3
);
1745 gen_set_cc_nz_f32(s
, o
->in2
);
1749 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1751 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1752 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1753 tcg_temp_free_i32(m3
);
1754 gen_set_cc_nz_f64(s
, o
->in2
);
1758 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1760 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1761 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1762 tcg_temp_free_i32(m3
);
1763 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1767 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1769 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1770 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1771 tcg_temp_free_i32(m3
);
1775 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1777 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1778 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1779 tcg_temp_free_i32(m3
);
1783 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1785 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1786 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1787 tcg_temp_free_i32(m3
);
1788 return_low128(o
->out2
);
1792 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1794 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1795 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1796 tcg_temp_free_i32(m3
);
1800 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1802 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1803 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1804 tcg_temp_free_i32(m3
);
1808 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1810 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1811 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1812 tcg_temp_free_i32(m3
);
1813 return_low128(o
->out2
);
1817 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1819 int r2
= get_field(s
->fields
, r2
);
1820 TCGv_i64 len
= tcg_temp_new_i64();
1822 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1824 return_low128(o
->out
);
1826 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1827 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1828 tcg_temp_free_i64(len
);
1833 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1835 int l
= get_field(s
->fields
, l1
);
1840 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1841 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1844 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1845 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1848 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1849 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1852 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1853 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1856 vl
= tcg_const_i32(l
);
1857 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1858 tcg_temp_free_i32(vl
);
1862 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1866 static ExitStatus
op_clcl(DisasContext
*s
, DisasOps
*o
)
1868 int r1
= get_field(s
->fields
, r1
);
1869 int r2
= get_field(s
->fields
, r2
);
1872 /* r1 and r2 must be even. */
1873 if (r1
& 1 || r2
& 1) {
1874 gen_program_exception(s
, PGM_SPECIFICATION
);
1875 return EXIT_NORETURN
;
1878 t1
= tcg_const_i32(r1
);
1879 t2
= tcg_const_i32(r2
);
1880 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1881 tcg_temp_free_i32(t1
);
1882 tcg_temp_free_i32(t2
);
1887 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1889 int r1
= get_field(s
->fields
, r1
);
1890 int r3
= get_field(s
->fields
, r3
);
1893 /* r1 and r3 must be even. */
1894 if (r1
& 1 || r3
& 1) {
1895 gen_program_exception(s
, PGM_SPECIFICATION
);
1896 return EXIT_NORETURN
;
1899 t1
= tcg_const_i32(r1
);
1900 t3
= tcg_const_i32(r3
);
1901 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1902 tcg_temp_free_i32(t1
);
1903 tcg_temp_free_i32(t3
);
1908 static ExitStatus
op_clclu(DisasContext
*s
, DisasOps
*o
)
1910 int r1
= get_field(s
->fields
, r1
);
1911 int r3
= get_field(s
->fields
, r3
);
1914 /* r1 and r3 must be even. */
1915 if (r1
& 1 || r3
& 1) {
1916 gen_program_exception(s
, PGM_SPECIFICATION
);
1917 return EXIT_NORETURN
;
1920 t1
= tcg_const_i32(r1
);
1921 t3
= tcg_const_i32(r3
);
1922 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1923 tcg_temp_free_i32(t1
);
1924 tcg_temp_free_i32(t3
);
1929 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1931 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1932 TCGv_i32 t1
= tcg_temp_new_i32();
1933 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1934 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1936 tcg_temp_free_i32(t1
);
1937 tcg_temp_free_i32(m3
);
1941 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1943 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1945 return_low128(o
->in2
);
1949 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1951 TCGv_i64 t
= tcg_temp_new_i64();
1952 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1953 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1954 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1955 tcg_temp_free_i64(t
);
1959 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1961 int d2
= get_field(s
->fields
, d2
);
1962 int b2
= get_field(s
->fields
, b2
);
1965 /* Note that in1 = R3 (new value) and
1966 in2 = (zero-extended) R1 (expected value). */
1968 addr
= get_address(s
, 0, b2
, d2
);
1969 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1970 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1971 tcg_temp_free_i64(addr
);
1973 /* Are the memory and expected values (un)equal? Note that this setcond
1974 produces the output CC value, thus the NE sense of the test. */
1975 cc
= tcg_temp_new_i64();
1976 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1977 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1978 tcg_temp_free_i64(cc
);
1984 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1986 int r1
= get_field(s
->fields
, r1
);
1987 int r3
= get_field(s
->fields
, r3
);
1988 int d2
= get_field(s
->fields
, d2
);
1989 int b2
= get_field(s
->fields
, b2
);
1991 TCGv_i32 t_r1
, t_r3
;
1993 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1994 addr
= get_address(s
, 0, b2
, d2
);
1995 t_r1
= tcg_const_i32(r1
);
1996 t_r3
= tcg_const_i32(r3
);
1997 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
1998 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2000 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2002 tcg_temp_free_i64(addr
);
2003 tcg_temp_free_i32(t_r1
);
2004 tcg_temp_free_i32(t_r3
);
2010 static ExitStatus
op_csst(DisasContext
*s
, DisasOps
*o
)
2012 int r3
= get_field(s
->fields
, r3
);
2013 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2015 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
2016 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
2018 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
2020 tcg_temp_free_i32(t_r3
);
2026 #ifndef CONFIG_USER_ONLY
2027 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2029 TCGMemOp mop
= s
->insn
->data
;
2030 TCGv_i64 addr
, old
, cc
;
2031 TCGLabel
*lab
= gen_new_label();
2033 /* Note that in1 = R1 (zero-extended expected value),
2034 out = R1 (original reg), out2 = R1+1 (new value). */
2036 check_privileged(s
);
2037 addr
= tcg_temp_new_i64();
2038 old
= tcg_temp_new_i64();
2039 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2040 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2041 get_mem_index(s
), mop
| MO_ALIGN
);
2042 tcg_temp_free_i64(addr
);
2044 /* Are the memory and expected values (un)equal? */
2045 cc
= tcg_temp_new_i64();
2046 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2047 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2049 /* Write back the output now, so that it happens before the
2050 following branch, so that we don't need local temps. */
2051 if ((mop
& MO_SIZE
) == MO_32
) {
2052 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2054 tcg_gen_mov_i64(o
->out
, old
);
2056 tcg_temp_free_i64(old
);
2058 /* If the comparison was equal, and the LSB of R2 was set,
2059 then we need to flush the TLB (for all cpus). */
2060 tcg_gen_xori_i64(cc
, cc
, 1);
2061 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2062 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2063 tcg_temp_free_i64(cc
);
2065 gen_helper_purge(cpu_env
);
2072 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2074 TCGv_i64 t1
= tcg_temp_new_i64();
2075 TCGv_i32 t2
= tcg_temp_new_i32();
2076 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2077 gen_helper_cvd(t1
, t2
);
2078 tcg_temp_free_i32(t2
);
2079 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2080 tcg_temp_free_i64(t1
);
2084 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2086 int m3
= get_field(s
->fields
, m3
);
2087 TCGLabel
*lab
= gen_new_label();
2090 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2091 if (s
->insn
->data
) {
2092 c
= tcg_unsigned_cond(c
);
2094 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2103 static ExitStatus
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2105 int m3
= get_field(s
->fields
, m3
);
2106 int r1
= get_field(s
->fields
, r1
);
2107 int r2
= get_field(s
->fields
, r2
);
2108 TCGv_i32 tr1
, tr2
, chk
;
2110 /* R1 and R2 must both be even. */
2111 if ((r1
| r2
) & 1) {
2112 gen_program_exception(s
, PGM_SPECIFICATION
);
2113 return EXIT_NORETURN
;
2115 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2119 tr1
= tcg_const_i32(r1
);
2120 tr2
= tcg_const_i32(r2
);
2121 chk
= tcg_const_i32(m3
);
2123 switch (s
->insn
->data
) {
2125 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2128 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2131 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2134 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2137 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2140 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2143 g_assert_not_reached();
2146 tcg_temp_free_i32(tr1
);
2147 tcg_temp_free_i32(tr2
);
2148 tcg_temp_free_i32(chk
);
2153 #ifndef CONFIG_USER_ONLY
2154 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2156 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2157 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2158 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2160 check_privileged(s
);
2161 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2163 tcg_temp_free_i32(func_code
);
2164 tcg_temp_free_i32(r3
);
2165 tcg_temp_free_i32(r1
);
2170 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2172 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2173 return_low128(o
->out
);
2177 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2179 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2180 return_low128(o
->out
);
2184 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2186 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2187 return_low128(o
->out
);
2191 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2193 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2194 return_low128(o
->out
);
2198 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2200 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2204 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2206 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2210 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2212 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2213 return_low128(o
->out2
);
2217 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2219 int r2
= get_field(s
->fields
, r2
);
2220 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2224 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2226 /* No cache information provided. */
2227 tcg_gen_movi_i64(o
->out
, -1);
2231 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2233 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2237 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2239 int r1
= get_field(s
->fields
, r1
);
2240 int r2
= get_field(s
->fields
, r2
);
2241 TCGv_i64 t
= tcg_temp_new_i64();
2243 /* Note the "subsequently" in the PoO, which implies a defined result
2244 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2245 tcg_gen_shri_i64(t
, psw_mask
, 32);
2246 store_reg32_i64(r1
, t
);
2248 store_reg32_i64(r2
, psw_mask
);
2251 tcg_temp_free_i64(t
);
2255 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2257 int r1
= get_field(s
->fields
, r1
);
2261 /* Nested EXECUTE is not allowed. */
2262 if (unlikely(s
->ex_value
)) {
2263 gen_program_exception(s
, PGM_EXECUTE
);
2264 return EXIT_NORETURN
;
2271 v1
= tcg_const_i64(0);
2276 ilen
= tcg_const_i32(s
->ilen
);
2277 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2278 tcg_temp_free_i32(ilen
);
2281 tcg_temp_free_i64(v1
);
2284 return EXIT_PC_CC_UPDATED
;
2287 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2289 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2290 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2291 tcg_temp_free_i32(m3
);
2295 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2297 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2298 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2299 tcg_temp_free_i32(m3
);
2303 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2305 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2306 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2307 return_low128(o
->out2
);
2308 tcg_temp_free_i32(m3
);
2312 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2314 /* We'll use the original input for cc computation, since we get to
2315 compare that against 0, which ought to be better than comparing
2316 the real output against 64. It also lets cc_dst be a convenient
2317 temporary during our computation. */
2318 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2320 /* R1 = IN ? CLZ(IN) : 64. */
2321 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2323 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2324 value by 64, which is undefined. But since the shift is 64 iff the
2325 input is zero, we still get the correct result after and'ing. */
2326 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2327 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2328 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2332 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2334 int m3
= get_field(s
->fields
, m3
);
2335 int pos
, len
, base
= s
->insn
->data
;
2336 TCGv_i64 tmp
= tcg_temp_new_i64();
2341 /* Effectively a 32-bit load. */
2342 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2349 /* Effectively a 16-bit load. */
2350 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2358 /* Effectively an 8-bit load. */
2359 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2364 pos
= base
+ ctz32(m3
) * 8;
2365 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2366 ccm
= ((1ull << len
) - 1) << pos
;
2370 /* This is going to be a sequence of loads and inserts. */
2371 pos
= base
+ 32 - 8;
2375 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2376 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2377 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2380 m3
= (m3
<< 1) & 0xf;
2386 tcg_gen_movi_i64(tmp
, ccm
);
2387 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2388 tcg_temp_free_i64(tmp
);
2392 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2394 int shift
= s
->insn
->data
& 0xff;
2395 int size
= s
->insn
->data
>> 8;
2396 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2400 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2405 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2407 t1
= tcg_temp_new_i64();
2408 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2409 tcg_gen_shri_i64(t1
, t1
, 36);
2410 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2412 tcg_gen_extu_i32_i64(t1
, cc_op
);
2413 tcg_gen_shli_i64(t1
, t1
, 28);
2414 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2415 tcg_temp_free_i64(t1
);
2419 #ifndef CONFIG_USER_ONLY
2420 static ExitStatus
op_idte(DisasContext
*s
, DisasOps
*o
)
2424 check_privileged(s
);
2425 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2426 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2428 m4
= tcg_const_i32(0);
2430 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2431 tcg_temp_free_i32(m4
);
2435 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2439 check_privileged(s
);
2440 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2441 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2443 m4
= tcg_const_i32(0);
2445 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2446 tcg_temp_free_i32(m4
);
2450 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2452 check_privileged(s
);
2453 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2458 static ExitStatus
op_msa(DisasContext
*s
, DisasOps
*o
)
2460 int r1
= have_field(s
->fields
, r1
) ? get_field(s
->fields
, r1
) : 0;
2461 int r2
= have_field(s
->fields
, r2
) ? get_field(s
->fields
, r2
) : 0;
2462 int r3
= have_field(s
->fields
, r3
) ? get_field(s
->fields
, r3
) : 0;
2463 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2465 switch (s
->insn
->data
) {
2466 case S390_FEAT_TYPE_KMCTR
:
2467 if (r3
& 1 || !r3
) {
2468 gen_program_exception(s
, PGM_SPECIFICATION
);
2469 return EXIT_NORETURN
;
2472 case S390_FEAT_TYPE_PPNO
:
2473 case S390_FEAT_TYPE_KMF
:
2474 case S390_FEAT_TYPE_KMC
:
2475 case S390_FEAT_TYPE_KMO
:
2476 case S390_FEAT_TYPE_KM
:
2477 if (r1
& 1 || !r1
) {
2478 gen_program_exception(s
, PGM_SPECIFICATION
);
2479 return EXIT_NORETURN
;
2482 case S390_FEAT_TYPE_KMAC
:
2483 case S390_FEAT_TYPE_KIMD
:
2484 case S390_FEAT_TYPE_KLMD
:
2485 if (r2
& 1 || !r2
) {
2486 gen_program_exception(s
, PGM_SPECIFICATION
);
2487 return EXIT_NORETURN
;
2490 case S390_FEAT_TYPE_PCKMO
:
2491 case S390_FEAT_TYPE_PCC
:
2494 g_assert_not_reached();
2497 t_r1
= tcg_const_i32(r1
);
2498 t_r2
= tcg_const_i32(r2
);
2499 t_r3
= tcg_const_i32(r3
);
2500 type
= tcg_const_i32(s
->insn
->data
);
2501 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2503 tcg_temp_free_i32(t_r1
);
2504 tcg_temp_free_i32(t_r2
);
2505 tcg_temp_free_i32(t_r3
);
2506 tcg_temp_free_i32(type
);
2510 static ExitStatus
op_keb(DisasContext
*s
, DisasOps
*o
)
2512 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2517 static ExitStatus
op_kdb(DisasContext
*s
, DisasOps
*o
)
2519 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2524 static ExitStatus
op_kxb(DisasContext
*s
, DisasOps
*o
)
2526 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2531 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2533 /* The real output is indeed the original value in memory;
2534 recompute the addition for the computation of CC. */
2535 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2536 s
->insn
->data
| MO_ALIGN
);
2537 /* However, we need to recompute the addition for setting CC. */
2538 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2542 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2544 /* The real output is indeed the original value in memory;
2545 recompute the addition for the computation of CC. */
2546 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2547 s
->insn
->data
| MO_ALIGN
);
2548 /* However, we need to recompute the operation for setting CC. */
2549 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2553 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2555 /* The real output is indeed the original value in memory;
2556 recompute the addition for the computation of CC. */
2557 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2558 s
->insn
->data
| MO_ALIGN
);
2559 /* However, we need to recompute the operation for setting CC. */
2560 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2564 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2566 /* The real output is indeed the original value in memory;
2567 recompute the addition for the computation of CC. */
2568 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2569 s
->insn
->data
| MO_ALIGN
);
2570 /* However, we need to recompute the operation for setting CC. */
2571 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2575 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2577 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2581 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2583 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2587 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2589 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2593 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2595 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2599 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2601 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2602 return_low128(o
->out2
);
2606 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2608 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2609 return_low128(o
->out2
);
2613 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2615 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2619 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2621 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2625 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2627 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2631 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2633 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2637 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2639 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2643 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2645 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2649 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2651 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2655 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2657 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2661 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2663 TCGLabel
*lab
= gen_new_label();
2664 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2665 /* The value is stored even in case of trap. */
2666 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2672 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2674 TCGLabel
*lab
= gen_new_label();
2675 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2676 /* The value is stored even in case of trap. */
2677 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2683 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2685 TCGLabel
*lab
= gen_new_label();
2686 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2687 /* The value is stored even in case of trap. */
2688 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2694 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2696 TCGLabel
*lab
= gen_new_label();
2697 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2698 /* The value is stored even in case of trap. */
2699 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2705 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2707 TCGLabel
*lab
= gen_new_label();
2708 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2709 /* The value is stored even in case of trap. */
2710 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2716 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2720 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2723 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2727 TCGv_i32 t32
= tcg_temp_new_i32();
2730 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2733 t
= tcg_temp_new_i64();
2734 tcg_gen_extu_i32_i64(t
, t32
);
2735 tcg_temp_free_i32(t32
);
2737 z
= tcg_const_i64(0);
2738 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2739 tcg_temp_free_i64(t
);
2740 tcg_temp_free_i64(z
);
2746 #ifndef CONFIG_USER_ONLY
2747 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2749 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2750 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2751 check_privileged(s
);
2752 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2753 tcg_temp_free_i32(r1
);
2754 tcg_temp_free_i32(r3
);
2755 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2756 return EXIT_PC_STALE_NOCHAIN
;
2759 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2761 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2762 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2763 check_privileged(s
);
2764 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2765 tcg_temp_free_i32(r1
);
2766 tcg_temp_free_i32(r3
);
2767 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2768 return EXIT_PC_STALE_NOCHAIN
;
2771 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2773 check_privileged(s
);
2774 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2779 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2781 check_privileged(s
);
2783 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2787 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2791 check_privileged(s
);
2792 per_breaking_event(s
);
2794 t1
= tcg_temp_new_i64();
2795 t2
= tcg_temp_new_i64();
2796 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2797 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2798 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2799 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2800 tcg_gen_shli_i64(t1
, t1
, 32);
2801 gen_helper_load_psw(cpu_env
, t1
, t2
);
2802 tcg_temp_free_i64(t1
);
2803 tcg_temp_free_i64(t2
);
2804 return EXIT_NORETURN
;
2807 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2811 check_privileged(s
);
2812 per_breaking_event(s
);
2814 t1
= tcg_temp_new_i64();
2815 t2
= tcg_temp_new_i64();
2816 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2817 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2818 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2819 gen_helper_load_psw(cpu_env
, t1
, t2
);
2820 tcg_temp_free_i64(t1
);
2821 tcg_temp_free_i64(t2
);
2822 return EXIT_NORETURN
;
2826 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2828 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2829 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2830 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2831 tcg_temp_free_i32(r1
);
2832 tcg_temp_free_i32(r3
);
2836 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2838 int r1
= get_field(s
->fields
, r1
);
2839 int r3
= get_field(s
->fields
, r3
);
2842 /* Only one register to read. */
2843 t1
= tcg_temp_new_i64();
2844 if (unlikely(r1
== r3
)) {
2845 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2846 store_reg32_i64(r1
, t1
);
2851 /* First load the values of the first and last registers to trigger
2852 possible page faults. */
2853 t2
= tcg_temp_new_i64();
2854 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2855 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2856 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2857 store_reg32_i64(r1
, t1
);
2858 store_reg32_i64(r3
, t2
);
2860 /* Only two registers to read. */
2861 if (((r1
+ 1) & 15) == r3
) {
2867 /* Then load the remaining registers. Page fault can't occur. */
2869 tcg_gen_movi_i64(t2
, 4);
2872 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2873 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2874 store_reg32_i64(r1
, t1
);
2882 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2884 int r1
= get_field(s
->fields
, r1
);
2885 int r3
= get_field(s
->fields
, r3
);
2888 /* Only one register to read. */
2889 t1
= tcg_temp_new_i64();
2890 if (unlikely(r1
== r3
)) {
2891 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2892 store_reg32h_i64(r1
, t1
);
2897 /* First load the values of the first and last registers to trigger
2898 possible page faults. */
2899 t2
= tcg_temp_new_i64();
2900 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2901 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2902 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2903 store_reg32h_i64(r1
, t1
);
2904 store_reg32h_i64(r3
, t2
);
2906 /* Only two registers to read. */
2907 if (((r1
+ 1) & 15) == r3
) {
2913 /* Then load the remaining registers. Page fault can't occur. */
2915 tcg_gen_movi_i64(t2
, 4);
2918 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2919 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2920 store_reg32h_i64(r1
, t1
);
2928 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2930 int r1
= get_field(s
->fields
, r1
);
2931 int r3
= get_field(s
->fields
, r3
);
2934 /* Only one register to read. */
2935 if (unlikely(r1
== r3
)) {
2936 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2940 /* First load the values of the first and last registers to trigger
2941 possible page faults. */
2942 t1
= tcg_temp_new_i64();
2943 t2
= tcg_temp_new_i64();
2944 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2945 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2946 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2947 tcg_gen_mov_i64(regs
[r1
], t1
);
2950 /* Only two registers to read. */
2951 if (((r1
+ 1) & 15) == r3
) {
2956 /* Then load the remaining registers. Page fault can't occur. */
2958 tcg_gen_movi_i64(t1
, 8);
2961 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2962 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2969 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2972 TCGMemOp mop
= s
->insn
->data
;
2974 /* In a parallel context, stop the world and single step. */
2975 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
2978 gen_exception(EXCP_ATOMIC
);
2979 return EXIT_NORETURN
;
2982 /* In a serial context, perform the two loads ... */
2983 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2984 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2985 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2986 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2987 tcg_temp_free_i64(a1
);
2988 tcg_temp_free_i64(a2
);
2990 /* ... and indicate that we performed them while interlocked. */
2991 gen_op_movi_cc(s
, 0);
2995 static ExitStatus
op_lpq(DisasContext
*s
, DisasOps
*o
)
2997 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
2998 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3000 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3002 return_low128(o
->out2
);
3006 #ifndef CONFIG_USER_ONLY
3007 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
3009 check_privileged(s
);
3010 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
3014 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
3016 check_privileged(s
);
3017 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
3022 static ExitStatus
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3024 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3028 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
3031 o
->g_out
= o
->g_in2
;
3037 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3039 int b2
= get_field(s
->fields
, b2
);
3040 TCGv ar1
= tcg_temp_new_i64();
3043 o
->g_out
= o
->g_in2
;
3047 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
3048 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3049 tcg_gen_movi_i64(ar1
, 0);
3051 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3052 tcg_gen_movi_i64(ar1
, 1);
3054 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3056 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3058 tcg_gen_movi_i64(ar1
, 0);
3061 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3062 tcg_gen_movi_i64(ar1
, 2);
3066 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3067 tcg_temp_free_i64(ar1
);
3072 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
3076 o
->g_out
= o
->g_in1
;
3077 o
->g_out2
= o
->g_in2
;
3080 o
->g_in1
= o
->g_in2
= false;
3084 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
3086 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3087 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3088 tcg_temp_free_i32(l
);
3092 static ExitStatus
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3094 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3095 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3096 tcg_temp_free_i32(l
);
3100 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3102 int r1
= get_field(s
->fields
, r1
);
3103 int r2
= get_field(s
->fields
, r2
);
3106 /* r1 and r2 must be even. */
3107 if (r1
& 1 || r2
& 1) {
3108 gen_program_exception(s
, PGM_SPECIFICATION
);
3109 return EXIT_NORETURN
;
3112 t1
= tcg_const_i32(r1
);
3113 t2
= tcg_const_i32(r2
);
3114 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3115 tcg_temp_free_i32(t1
);
3116 tcg_temp_free_i32(t2
);
3121 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3123 int r1
= get_field(s
->fields
, r1
);
3124 int r3
= get_field(s
->fields
, r3
);
3127 /* r1 and r3 must be even. */
3128 if (r1
& 1 || r3
& 1) {
3129 gen_program_exception(s
, PGM_SPECIFICATION
);
3130 return EXIT_NORETURN
;
3133 t1
= tcg_const_i32(r1
);
3134 t3
= tcg_const_i32(r3
);
3135 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3136 tcg_temp_free_i32(t1
);
3137 tcg_temp_free_i32(t3
);
3142 static ExitStatus
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3144 int r1
= get_field(s
->fields
, r1
);
3145 int r3
= get_field(s
->fields
, r3
);
3148 /* r1 and r3 must be even. */
3149 if (r1
& 1 || r3
& 1) {
3150 gen_program_exception(s
, PGM_SPECIFICATION
);
3151 return EXIT_NORETURN
;
3154 t1
= tcg_const_i32(r1
);
3155 t3
= tcg_const_i32(r3
);
3156 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3157 tcg_temp_free_i32(t1
);
3158 tcg_temp_free_i32(t3
);
3163 static ExitStatus
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3165 int r3
= get_field(s
->fields
, r3
);
3166 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3171 #ifndef CONFIG_USER_ONLY
3172 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3174 int r1
= get_field(s
->fields
, l1
);
3175 check_privileged(s
);
3176 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3181 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3183 int r1
= get_field(s
->fields
, l1
);
3184 check_privileged(s
);
3185 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3191 static ExitStatus
op_mvn(DisasContext
*s
, DisasOps
*o
)
3193 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3194 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3195 tcg_temp_free_i32(l
);
3199 static ExitStatus
op_mvo(DisasContext
*s
, DisasOps
*o
)
3201 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3202 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3203 tcg_temp_free_i32(l
);
3207 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3209 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3214 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
3216 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3218 return_low128(o
->in2
);
3222 static ExitStatus
op_mvz(DisasContext
*s
, DisasOps
*o
)
3224 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3225 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3226 tcg_temp_free_i32(l
);
3230 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3232 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3236 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3238 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3242 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
3244 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3248 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3250 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3254 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
3256 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3260 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
3262 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3263 return_low128(o
->out2
);
3267 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3269 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3270 return_low128(o
->out2
);
3274 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
3276 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3277 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3278 tcg_temp_free_i64(r3
);
3282 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
3284 int r3
= get_field(s
->fields
, r3
);
3285 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3289 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
3291 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3292 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3293 tcg_temp_free_i64(r3
);
3297 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3299 int r3
= get_field(s
->fields
, r3
);
3300 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3304 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3307 z
= tcg_const_i64(0);
3308 n
= tcg_temp_new_i64();
3309 tcg_gen_neg_i64(n
, o
->in2
);
3310 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3311 tcg_temp_free_i64(n
);
3312 tcg_temp_free_i64(z
);
3316 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3318 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3322 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3324 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3328 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3330 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3331 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3335 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3337 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3338 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3339 tcg_temp_free_i32(l
);
3344 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3346 tcg_gen_neg_i64(o
->out
, o
->in2
);
3350 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3352 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3356 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3358 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3362 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3364 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3365 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3369 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3371 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3372 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3373 tcg_temp_free_i32(l
);
3378 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3380 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3384 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3386 int shift
= s
->insn
->data
& 0xff;
3387 int size
= s
->insn
->data
>> 8;
3388 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3391 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3392 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3394 /* Produce the CC from only the bits manipulated. */
3395 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3396 set_cc_nz_u64(s
, cc_dst
);
3400 static ExitStatus
op_oi(DisasContext
*s
, DisasOps
*o
)
3402 o
->in1
= tcg_temp_new_i64();
3404 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3405 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3407 /* Perform the atomic operation in memory. */
3408 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3412 /* Recompute also for atomic case: needed for setting CC. */
3413 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3415 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3416 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3421 static ExitStatus
op_pack(DisasContext
*s
, DisasOps
*o
)
3423 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3424 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3425 tcg_temp_free_i32(l
);
3429 static ExitStatus
op_pka(DisasContext
*s
, DisasOps
*o
)
3431 int l2
= get_field(s
->fields
, l2
) + 1;
3434 /* The length must not exceed 32 bytes. */
3436 gen_program_exception(s
, PGM_SPECIFICATION
);
3437 return EXIT_NORETURN
;
3439 l
= tcg_const_i32(l2
);
3440 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3441 tcg_temp_free_i32(l
);
3445 static ExitStatus
op_pku(DisasContext
*s
, DisasOps
*o
)
3447 int l2
= get_field(s
->fields
, l2
) + 1;
3450 /* The length must be even and should not exceed 64 bytes. */
3451 if ((l2
& 1) || (l2
> 64)) {
3452 gen_program_exception(s
, PGM_SPECIFICATION
);
3453 return EXIT_NORETURN
;
3455 l
= tcg_const_i32(l2
);
3456 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3457 tcg_temp_free_i32(l
);
3461 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3463 gen_helper_popcnt(o
->out
, o
->in2
);
3467 #ifndef CONFIG_USER_ONLY
3468 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3470 check_privileged(s
);
3471 gen_helper_ptlb(cpu_env
);
3476 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3478 int i3
= get_field(s
->fields
, i3
);
3479 int i4
= get_field(s
->fields
, i4
);
3480 int i5
= get_field(s
->fields
, i5
);
3481 int do_zero
= i4
& 0x80;
3482 uint64_t mask
, imask
, pmask
;
3485 /* Adjust the arguments for the specific insn. */
3486 switch (s
->fields
->op2
) {
3487 case 0x55: /* risbg */
3488 case 0x59: /* risbgn */
3493 case 0x5d: /* risbhg */
3496 pmask
= 0xffffffff00000000ull
;
3498 case 0x51: /* risblg */
3501 pmask
= 0x00000000ffffffffull
;
3504 g_assert_not_reached();
3507 /* MASK is the set of bits to be inserted from R2.
3508 Take care for I3/I4 wraparound. */
3511 mask
^= pmask
>> i4
>> 1;
3513 mask
|= ~(pmask
>> i4
>> 1);
3517 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3518 insns, we need to keep the other half of the register. */
3519 imask
= ~mask
| ~pmask
;
3527 if (s
->fields
->op2
== 0x5d) {
3531 /* In some cases we can implement this with extract. */
3532 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3533 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3537 /* In some cases we can implement this with deposit. */
3538 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3539 /* Note that we rotate the bits to be inserted to the lsb, not to
3540 the position as described in the PoO. */
3541 rot
= (rot
- pos
) & 63;
3546 /* Rotate the input as necessary. */
3547 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3549 /* Insert the selected bits into the output. */
3552 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3554 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3556 } else if (imask
== 0) {
3557 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3559 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3560 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3561 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3566 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3568 int i3
= get_field(s
->fields
, i3
);
3569 int i4
= get_field(s
->fields
, i4
);
3570 int i5
= get_field(s
->fields
, i5
);
3573 /* If this is a test-only form, arrange to discard the result. */
3575 o
->out
= tcg_temp_new_i64();
3583 /* MASK is the set of bits to be operated on from R2.
3584 Take care for I3/I4 wraparound. */
3587 mask
^= ~0ull >> i4
>> 1;
3589 mask
|= ~(~0ull >> i4
>> 1);
3592 /* Rotate the input as necessary. */
3593 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3596 switch (s
->fields
->op2
) {
3597 case 0x55: /* AND */
3598 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3599 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3602 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3603 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3605 case 0x57: /* XOR */
3606 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3607 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3614 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3615 set_cc_nz_u64(s
, cc_dst
);
3619 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3621 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3625 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3627 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3631 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3633 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3637 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3639 TCGv_i32 t1
= tcg_temp_new_i32();
3640 TCGv_i32 t2
= tcg_temp_new_i32();
3641 TCGv_i32 to
= tcg_temp_new_i32();
3642 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3643 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3644 tcg_gen_rotl_i32(to
, t1
, t2
);
3645 tcg_gen_extu_i32_i64(o
->out
, to
);
3646 tcg_temp_free_i32(t1
);
3647 tcg_temp_free_i32(t2
);
3648 tcg_temp_free_i32(to
);
3652 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3654 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3658 #ifndef CONFIG_USER_ONLY
3659 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3661 check_privileged(s
);
3662 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3667 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3669 check_privileged(s
);
3670 gen_helper_sacf(cpu_env
, o
->in2
);
3671 /* Addressing mode has changed, so end the block. */
3672 return EXIT_PC_STALE
;
3676 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3678 int sam
= s
->insn
->data
;
3694 /* Bizarre but true, we check the address of the current insn for the
3695 specification exception, not the next to be executed. Thus the PoO
3696 documents that Bad Things Happen two bytes before the end. */
3697 if (s
->pc
& ~mask
) {
3698 gen_program_exception(s
, PGM_SPECIFICATION
);
3699 return EXIT_NORETURN
;
3703 tsam
= tcg_const_i64(sam
);
3704 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3705 tcg_temp_free_i64(tsam
);
3707 /* Always exit the TB, since we (may have) changed execution mode. */
3708 return EXIT_PC_STALE
;
3711 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3713 int r1
= get_field(s
->fields
, r1
);
3714 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3718 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3720 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3724 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3726 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3730 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3732 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3733 return_low128(o
->out2
);
3737 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3739 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3743 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3745 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3749 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3751 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3752 return_low128(o
->out2
);
3756 #ifndef CONFIG_USER_ONLY
3757 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3759 check_privileged(s
);
3760 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3765 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3767 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3768 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3769 check_privileged(s
);
3770 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
3772 tcg_temp_free_i32(r1
);
3773 tcg_temp_free_i32(r3
);
3778 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3785 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3787 /* We want to store when the condition is fulfilled, so branch
3788 out when it's not */
3789 c
.cond
= tcg_invert_cond(c
.cond
);
3791 lab
= gen_new_label();
3793 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3795 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3799 r1
= get_field(s
->fields
, r1
);
3800 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3801 switch (s
->insn
->data
) {
3803 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3806 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3808 case 2: /* STOCFH */
3809 h
= tcg_temp_new_i64();
3810 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3811 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3812 tcg_temp_free_i64(h
);
3815 g_assert_not_reached();
3817 tcg_temp_free_i64(a
);
3823 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3825 uint64_t sign
= 1ull << s
->insn
->data
;
3826 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3827 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3828 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3829 /* The arithmetic left shift is curious in that it does not affect
3830 the sign bit. Copy that over from the source unchanged. */
3831 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3832 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3833 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3837 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3839 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3843 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3845 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3849 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3851 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3855 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3857 gen_helper_sfpc(cpu_env
, o
->in2
);
3861 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3863 gen_helper_sfas(cpu_env
, o
->in2
);
3867 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3869 int b2
= get_field(s
->fields
, b2
);
3870 int d2
= get_field(s
->fields
, d2
);
3871 TCGv_i64 t1
= tcg_temp_new_i64();
3872 TCGv_i64 t2
= tcg_temp_new_i64();
3875 switch (s
->fields
->op2
) {
3876 case 0x99: /* SRNM */
3879 case 0xb8: /* SRNMB */
3882 case 0xb9: /* SRNMT */
3888 mask
= (1 << len
) - 1;
3890 /* Insert the value into the appropriate field of the FPC. */
3892 tcg_gen_movi_i64(t1
, d2
& mask
);
3894 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3895 tcg_gen_andi_i64(t1
, t1
, mask
);
3897 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3898 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3899 tcg_temp_free_i64(t1
);
3901 /* Then install the new FPC to set the rounding mode in fpu_status. */
3902 gen_helper_sfpc(cpu_env
, t2
);
3903 tcg_temp_free_i64(t2
);
3907 static ExitStatus
op_spm(DisasContext
*s
, DisasOps
*o
)
3909 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
3910 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
3913 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
3914 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
3918 static ExitStatus
op_ectg(DisasContext
*s
, DisasOps
*o
)
3920 int b1
= get_field(s
->fields
, b1
);
3921 int d1
= get_field(s
->fields
, d1
);
3922 int b2
= get_field(s
->fields
, b2
);
3923 int d2
= get_field(s
->fields
, d2
);
3924 int r3
= get_field(s
->fields
, r3
);
3925 TCGv_i64 tmp
= tcg_temp_new_i64();
3927 /* fetch all operands first */
3928 o
->in1
= tcg_temp_new_i64();
3929 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
3930 o
->in2
= tcg_temp_new_i64();
3931 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
3932 o
->addr1
= get_address(s
, 0, r3
, 0);
3934 /* load the third operand into r3 before modifying anything */
3935 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
3937 /* subtract CPU timer from first operand and store in GR0 */
3938 gen_helper_stpt(tmp
, cpu_env
);
3939 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
3941 /* store second operand in GR1 */
3942 tcg_gen_mov_i64(regs
[1], o
->in2
);
3944 tcg_temp_free_i64(tmp
);
3948 #ifndef CONFIG_USER_ONLY
3949 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3951 check_privileged(s
);
3952 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3953 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3957 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3959 check_privileged(s
);
3960 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3964 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3966 check_privileged(s
);
3967 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3968 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3969 return EXIT_PC_STALE_NOCHAIN
;
3972 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3974 check_privileged(s
);
3975 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
3979 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3981 gen_helper_stck(o
->out
, cpu_env
);
3982 /* ??? We don't implement clock states. */
3983 gen_op_movi_cc(s
, 0);
3987 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3989 TCGv_i64 c1
= tcg_temp_new_i64();
3990 TCGv_i64 c2
= tcg_temp_new_i64();
3991 TCGv_i64 todpr
= tcg_temp_new_i64();
3992 gen_helper_stck(c1
, cpu_env
);
3993 /* 16 bit value store in an uint32_t (only valid bits set) */
3994 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
3995 /* Shift the 64-bit value into its place as a zero-extended
3996 104-bit value. Note that "bit positions 64-103 are always
3997 non-zero so that they compare differently to STCK"; we set
3998 the least significant bit to 1. */
3999 tcg_gen_shli_i64(c2
, c1
, 56);
4000 tcg_gen_shri_i64(c1
, c1
, 8);
4001 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4002 tcg_gen_or_i64(c2
, c2
, todpr
);
4003 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4004 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4005 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4006 tcg_temp_free_i64(c1
);
4007 tcg_temp_free_i64(c2
);
4008 tcg_temp_free_i64(todpr
);
4009 /* ??? We don't implement clock states. */
4010 gen_op_movi_cc(s
, 0);
4014 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
4016 check_privileged(s
);
4017 gen_helper_sckc(cpu_env
, o
->in2
);
4021 static ExitStatus
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4023 check_privileged(s
);
4024 gen_helper_sckpf(cpu_env
, regs
[0]);
4028 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
4030 check_privileged(s
);
4031 gen_helper_stckc(o
->out
, cpu_env
);
4035 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
4037 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4038 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4039 check_privileged(s
);
4040 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4041 tcg_temp_free_i32(r1
);
4042 tcg_temp_free_i32(r3
);
4046 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
4048 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4049 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4050 check_privileged(s
);
4051 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4052 tcg_temp_free_i32(r1
);
4053 tcg_temp_free_i32(r3
);
4057 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
4059 check_privileged(s
);
4060 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4061 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4065 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
4067 check_privileged(s
);
4068 gen_helper_spt(cpu_env
, o
->in2
);
4072 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
4074 check_privileged(s
);
4075 gen_helper_stfl(cpu_env
);
4079 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
4081 check_privileged(s
);
4082 gen_helper_stpt(o
->out
, cpu_env
);
4086 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
4088 check_privileged(s
);
4089 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4094 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
4096 check_privileged(s
);
4097 gen_helper_spx(cpu_env
, o
->in2
);
4101 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
4103 check_privileged(s
);
4104 gen_helper_xsch(cpu_env
, regs
[1]);
4109 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
4111 check_privileged(s
);
4112 gen_helper_csch(cpu_env
, regs
[1]);
4117 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
4119 check_privileged(s
);
4120 gen_helper_hsch(cpu_env
, regs
[1]);
4125 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
4127 check_privileged(s
);
4128 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4133 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
4135 check_privileged(s
);
4136 gen_helper_rchp(cpu_env
, regs
[1]);
4141 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
4143 check_privileged(s
);
4144 gen_helper_rsch(cpu_env
, regs
[1]);
4149 static ExitStatus
op_sal(DisasContext
*s
, DisasOps
*o
)
4151 check_privileged(s
);
4152 gen_helper_sal(cpu_env
, regs
[1]);
4156 static ExitStatus
op_schm(DisasContext
*s
, DisasOps
*o
)
4158 check_privileged(s
);
4159 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4163 static ExitStatus
op_siga(DisasContext
*s
, DisasOps
*o
)
4165 check_privileged(s
);
4166 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4167 gen_op_movi_cc(s
, 3);
4171 static ExitStatus
op_stcps(DisasContext
*s
, DisasOps
*o
)
4173 check_privileged(s
);
4174 /* The instruction is suppressed if not provided. */
4178 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
4180 check_privileged(s
);
4181 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4186 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
4188 check_privileged(s
);
4189 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4194 static ExitStatus
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4196 check_privileged(s
);
4197 gen_helper_stcrw(cpu_env
, o
->in2
);
4202 static ExitStatus
op_tpi(DisasContext
*s
, DisasOps
*o
)
4204 check_privileged(s
);
4205 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4210 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
4212 check_privileged(s
);
4213 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4218 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
4220 check_privileged(s
);
4221 gen_helper_chsc(cpu_env
, o
->in2
);
4226 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
4228 check_privileged(s
);
4229 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4230 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4234 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4236 uint64_t i2
= get_field(s
->fields
, i2
);
4239 check_privileged(s
);
4241 /* It is important to do what the instruction name says: STORE THEN.
4242 If we let the output hook perform the store then if we fault and
4243 restart, we'll have the wrong SYSTEM MASK in place. */
4244 t
= tcg_temp_new_i64();
4245 tcg_gen_shri_i64(t
, psw_mask
, 56);
4246 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4247 tcg_temp_free_i64(t
);
4249 if (s
->fields
->op
== 0xac) {
4250 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4251 (i2
<< 56) | 0x00ffffffffffffffull
);
4253 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4256 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4257 return EXIT_PC_STALE_NOCHAIN
;
4260 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
4262 check_privileged(s
);
4263 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4267 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
4269 check_privileged(s
);
4270 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4275 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
4277 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4282 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
4284 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4288 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
4290 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4294 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
4296 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4300 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
4302 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4306 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
4308 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4309 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4310 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4311 tcg_temp_free_i32(r1
);
4312 tcg_temp_free_i32(r3
);
4316 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
4318 int m3
= get_field(s
->fields
, m3
);
4319 int pos
, base
= s
->insn
->data
;
4320 TCGv_i64 tmp
= tcg_temp_new_i64();
4322 pos
= base
+ ctz32(m3
) * 8;
4325 /* Effectively a 32-bit store. */
4326 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4327 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4333 /* Effectively a 16-bit store. */
4334 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4335 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4342 /* Effectively an 8-bit store. */
4343 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4344 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4348 /* This is going to be a sequence of shifts and stores. */
4349 pos
= base
+ 32 - 8;
4352 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4353 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4354 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4356 m3
= (m3
<< 1) & 0xf;
4361 tcg_temp_free_i64(tmp
);
4365 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
4367 int r1
= get_field(s
->fields
, r1
);
4368 int r3
= get_field(s
->fields
, r3
);
4369 int size
= s
->insn
->data
;
4370 TCGv_i64 tsize
= tcg_const_i64(size
);
4374 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4376 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4381 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4385 tcg_temp_free_i64(tsize
);
4389 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
4391 int r1
= get_field(s
->fields
, r1
);
4392 int r3
= get_field(s
->fields
, r3
);
4393 TCGv_i64 t
= tcg_temp_new_i64();
4394 TCGv_i64 t4
= tcg_const_i64(4);
4395 TCGv_i64 t32
= tcg_const_i64(32);
4398 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4399 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4403 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4407 tcg_temp_free_i64(t
);
4408 tcg_temp_free_i64(t4
);
4409 tcg_temp_free_i64(t32
);
4413 static ExitStatus
op_stpq(DisasContext
*s
, DisasOps
*o
)
4415 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
4416 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4418 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4423 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
4425 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4426 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4428 gen_helper_srst(cpu_env
, r1
, r2
);
4430 tcg_temp_free_i32(r1
);
4431 tcg_temp_free_i32(r2
);
4436 static ExitStatus
op_srstu(DisasContext
*s
, DisasOps
*o
)
4438 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4439 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4441 gen_helper_srstu(cpu_env
, r1
, r2
);
4443 tcg_temp_free_i32(r1
);
4444 tcg_temp_free_i32(r2
);
4449 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
4451 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4455 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
4460 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4462 /* The !borrow flag is the msb of CC. Since we want the inverse of
4463 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4464 disas_jcc(s
, &cmp
, 8 | 4);
4465 borrow
= tcg_temp_new_i64();
4467 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4469 TCGv_i32 t
= tcg_temp_new_i32();
4470 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4471 tcg_gen_extu_i32_i64(borrow
, t
);
4472 tcg_temp_free_i32(t
);
4476 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4477 tcg_temp_free_i64(borrow
);
4481 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4488 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4489 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4490 tcg_temp_free_i32(t
);
4492 t
= tcg_const_i32(s
->ilen
);
4493 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4494 tcg_temp_free_i32(t
);
4496 gen_exception(EXCP_SVC
);
4497 return EXIT_NORETURN
;
4500 static ExitStatus
op_tam(DisasContext
*s
, DisasOps
*o
)
4504 cc
|= (s
->tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4505 cc
|= (s
->tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4506 gen_op_movi_cc(s
, cc
);
4510 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4512 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4517 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4519 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4524 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4526 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4531 #ifndef CONFIG_USER_ONLY
4533 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4535 check_privileged(s
);
4536 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4541 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4543 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4550 static ExitStatus
op_tp(DisasContext
*s
, DisasOps
*o
)
4552 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4553 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4554 tcg_temp_free_i32(l1
);
4559 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4561 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4562 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4563 tcg_temp_free_i32(l
);
4568 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4570 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4571 return_low128(o
->out2
);
4576 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4578 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4579 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4580 tcg_temp_free_i32(l
);
4585 static ExitStatus
op_trtr(DisasContext
*s
, DisasOps
*o
)
4587 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4588 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4589 tcg_temp_free_i32(l
);
4594 static ExitStatus
op_trXX(DisasContext
*s
, DisasOps
*o
)
4596 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4597 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4598 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4599 TCGv_i32 tst
= tcg_temp_new_i32();
4600 int m3
= get_field(s
->fields
, m3
);
4602 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4606 tcg_gen_movi_i32(tst
, -1);
4608 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4609 if (s
->insn
->opc
& 3) {
4610 tcg_gen_ext8u_i32(tst
, tst
);
4612 tcg_gen_ext16u_i32(tst
, tst
);
4615 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4617 tcg_temp_free_i32(r1
);
4618 tcg_temp_free_i32(r2
);
4619 tcg_temp_free_i32(sizes
);
4620 tcg_temp_free_i32(tst
);
4625 static ExitStatus
op_ts(DisasContext
*s
, DisasOps
*o
)
4627 TCGv_i32 t1
= tcg_const_i32(0xff);
4628 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4629 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4630 tcg_temp_free_i32(t1
);
4635 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4637 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4638 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4639 tcg_temp_free_i32(l
);
4643 static ExitStatus
op_unpka(DisasContext
*s
, DisasOps
*o
)
4645 int l1
= get_field(s
->fields
, l1
) + 1;
4648 /* The length must not exceed 32 bytes. */
4650 gen_program_exception(s
, PGM_SPECIFICATION
);
4651 return EXIT_NORETURN
;
4653 l
= tcg_const_i32(l1
);
4654 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4655 tcg_temp_free_i32(l
);
4660 static ExitStatus
op_unpku(DisasContext
*s
, DisasOps
*o
)
4662 int l1
= get_field(s
->fields
, l1
) + 1;
4665 /* The length must be even and should not exceed 64 bytes. */
4666 if ((l1
& 1) || (l1
> 64)) {
4667 gen_program_exception(s
, PGM_SPECIFICATION
);
4668 return EXIT_NORETURN
;
4670 l
= tcg_const_i32(l1
);
4671 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4672 tcg_temp_free_i32(l
);
4678 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4680 int d1
= get_field(s
->fields
, d1
);
4681 int d2
= get_field(s
->fields
, d2
);
4682 int b1
= get_field(s
->fields
, b1
);
4683 int b2
= get_field(s
->fields
, b2
);
4684 int l
= get_field(s
->fields
, l1
);
4687 o
->addr1
= get_address(s
, 0, b1
, d1
);
4689 /* If the addresses are identical, this is a store/memset of zero. */
4690 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4691 o
->in2
= tcg_const_i64(0);
4695 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4698 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4702 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4705 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4709 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4712 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4716 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4718 gen_op_movi_cc(s
, 0);
4722 /* But in general we'll defer to a helper. */
4723 o
->in2
= get_address(s
, 0, b2
, d2
);
4724 t32
= tcg_const_i32(l
);
4725 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4726 tcg_temp_free_i32(t32
);
4731 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4733 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4737 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4739 int shift
= s
->insn
->data
& 0xff;
4740 int size
= s
->insn
->data
>> 8;
4741 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4744 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4745 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4747 /* Produce the CC from only the bits manipulated. */
4748 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4749 set_cc_nz_u64(s
, cc_dst
);
4753 static ExitStatus
op_xi(DisasContext
*s
, DisasOps
*o
)
4755 o
->in1
= tcg_temp_new_i64();
4757 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4758 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4760 /* Perform the atomic operation in memory. */
4761 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
4765 /* Recompute also for atomic case: needed for setting CC. */
4766 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4768 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4769 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4774 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4776 o
->out
= tcg_const_i64(0);
4780 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4782 o
->out
= tcg_const_i64(0);
4788 #ifndef CONFIG_USER_ONLY
4789 static ExitStatus
op_clp(DisasContext
*s
, DisasOps
*o
)
4791 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4793 check_privileged(s
);
4794 gen_helper_clp(cpu_env
, r2
);
4795 tcg_temp_free_i32(r2
);
4800 static ExitStatus
op_pcilg(DisasContext
*s
, DisasOps
*o
)
4802 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4803 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4805 check_privileged(s
);
4806 gen_helper_pcilg(cpu_env
, r1
, r2
);
4807 tcg_temp_free_i32(r1
);
4808 tcg_temp_free_i32(r2
);
4813 static ExitStatus
op_pcistg(DisasContext
*s
, DisasOps
*o
)
4815 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4816 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4818 check_privileged(s
);
4819 gen_helper_pcistg(cpu_env
, r1
, r2
);
4820 tcg_temp_free_i32(r1
);
4821 tcg_temp_free_i32(r2
);
4826 static ExitStatus
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
4828 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4829 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4831 check_privileged(s
);
4832 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4833 tcg_temp_free_i32(ar
);
4834 tcg_temp_free_i32(r1
);
4839 static ExitStatus
op_sic(DisasContext
*s
, DisasOps
*o
)
4841 check_privileged(s
);
4842 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
4846 static ExitStatus
op_rpcit(DisasContext
*s
, DisasOps
*o
)
4848 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4849 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4851 check_privileged(s
);
4852 gen_helper_rpcit(cpu_env
, r1
, r2
);
4853 tcg_temp_free_i32(r1
);
4854 tcg_temp_free_i32(r2
);
4859 static ExitStatus
op_pcistb(DisasContext
*s
, DisasOps
*o
)
4861 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4862 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4863 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4865 check_privileged(s
);
4866 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
4867 tcg_temp_free_i32(ar
);
4868 tcg_temp_free_i32(r1
);
4869 tcg_temp_free_i32(r3
);
4874 static ExitStatus
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
4876 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4877 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4879 check_privileged(s
);
4880 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4881 tcg_temp_free_i32(ar
);
4882 tcg_temp_free_i32(r1
);
4888 /* ====================================================================== */
4889 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4890 the original inputs), update the various cc data structures in order to
4891 be able to compute the new condition code. */
4893 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4895 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4898 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4900 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4903 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4905 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4908 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4910 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4913 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4915 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4918 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4920 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4923 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4925 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4928 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4930 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4933 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4935 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4938 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4940 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4943 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4945 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4948 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4950 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4953 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4955 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4958 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4960 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4963 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4965 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4968 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4970 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4973 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4975 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4978 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4980 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4983 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4985 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4988 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4990 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4991 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4994 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4996 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4999 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5001 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5004 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5006 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5009 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5011 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5014 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5016 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5019 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5021 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
5024 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5026 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
5029 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
5031 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
5034 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
5036 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
5039 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5041 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5044 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5046 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5049 /* ====================================================================== */
5050 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5051 with the TCG register to which we will write. Used in combination with
5052 the "wout" generators, in some cases we need a new temporary, and in
5053 some cases we can write to a TCG global. */
5055 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5057 o
->out
= tcg_temp_new_i64();
5059 #define SPEC_prep_new 0
5061 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5063 o
->out
= tcg_temp_new_i64();
5064 o
->out2
= tcg_temp_new_i64();
5066 #define SPEC_prep_new_P 0
5068 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5070 o
->out
= regs
[get_field(f
, r1
)];
5073 #define SPEC_prep_r1 0
5075 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5077 int r1
= get_field(f
, r1
);
5079 o
->out2
= regs
[r1
+ 1];
5080 o
->g_out
= o
->g_out2
= true;
5082 #define SPEC_prep_r1_P SPEC_r1_even
5084 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5086 o
->out
= fregs
[get_field(f
, r1
)];
5089 #define SPEC_prep_f1 0
5091 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5093 int r1
= get_field(f
, r1
);
5095 o
->out2
= fregs
[r1
+ 2];
5096 o
->g_out
= o
->g_out2
= true;
5098 #define SPEC_prep_x1 SPEC_r1_f128
5100 /* ====================================================================== */
5101 /* The "Write OUTput" generators. These generally perform some non-trivial
5102 copy of data to TCG globals, or to main memory. The trivial cases are
5103 generally handled by having a "prep" generator install the TCG global
5104 as the destination of the operation. */
5106 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5108 store_reg(get_field(f
, r1
), o
->out
);
5110 #define SPEC_wout_r1 0
5112 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5114 int r1
= get_field(f
, r1
);
5115 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5117 #define SPEC_wout_r1_8 0
5119 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5121 int r1
= get_field(f
, r1
);
5122 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5124 #define SPEC_wout_r1_16 0
5126 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5128 store_reg32_i64(get_field(f
, r1
), o
->out
);
5130 #define SPEC_wout_r1_32 0
5132 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5134 store_reg32h_i64(get_field(f
, r1
), o
->out
);
5136 #define SPEC_wout_r1_32h 0
5138 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5140 int r1
= get_field(f
, r1
);
5141 store_reg32_i64(r1
, o
->out
);
5142 store_reg32_i64(r1
+ 1, o
->out2
);
5144 #define SPEC_wout_r1_P32 SPEC_r1_even
5146 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5148 int r1
= get_field(f
, r1
);
5149 store_reg32_i64(r1
+ 1, o
->out
);
5150 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5151 store_reg32_i64(r1
, o
->out
);
5153 #define SPEC_wout_r1_D32 SPEC_r1_even
5155 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5157 int r3
= get_field(f
, r3
);
5158 store_reg32_i64(r3
, o
->out
);
5159 store_reg32_i64(r3
+ 1, o
->out2
);
5161 #define SPEC_wout_r3_P32 SPEC_r3_even
5163 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5165 int r3
= get_field(f
, r3
);
5166 store_reg(r3
, o
->out
);
5167 store_reg(r3
+ 1, o
->out2
);
5169 #define SPEC_wout_r3_P64 SPEC_r3_even
5171 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5173 store_freg32_i64(get_field(f
, r1
), o
->out
);
5175 #define SPEC_wout_e1 0
5177 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5179 store_freg(get_field(f
, r1
), o
->out
);
5181 #define SPEC_wout_f1 0
5183 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5185 int f1
= get_field(s
->fields
, r1
);
5186 store_freg(f1
, o
->out
);
5187 store_freg(f1
+ 2, o
->out2
);
5189 #define SPEC_wout_x1 SPEC_r1_f128
5191 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5193 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5194 store_reg32_i64(get_field(f
, r1
), o
->out
);
5197 #define SPEC_wout_cond_r1r2_32 0
5199 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5201 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5202 store_freg32_i64(get_field(f
, r1
), o
->out
);
5205 #define SPEC_wout_cond_e1e2 0
5207 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5209 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5211 #define SPEC_wout_m1_8 0
5213 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5215 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5217 #define SPEC_wout_m1_16 0
5219 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5221 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5223 #define SPEC_wout_m1_32 0
5225 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5227 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5229 #define SPEC_wout_m1_64 0
5231 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5233 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5235 #define SPEC_wout_m2_32 0
5237 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5239 store_reg(get_field(f
, r1
), o
->in2
);
5241 #define SPEC_wout_in2_r1 0
5243 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5245 store_reg32_i64(get_field(f
, r1
), o
->in2
);
5247 #define SPEC_wout_in2_r1_32 0
5249 /* ====================================================================== */
5250 /* The "INput 1" generators. These load the first operand to an insn. */
5252 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5254 o
->in1
= load_reg(get_field(f
, r1
));
5256 #define SPEC_in1_r1 0
5258 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5260 o
->in1
= regs
[get_field(f
, r1
)];
5263 #define SPEC_in1_r1_o 0
5265 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5267 o
->in1
= tcg_temp_new_i64();
5268 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5270 #define SPEC_in1_r1_32s 0
5272 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5274 o
->in1
= tcg_temp_new_i64();
5275 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5277 #define SPEC_in1_r1_32u 0
5279 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5281 o
->in1
= tcg_temp_new_i64();
5282 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
5284 #define SPEC_in1_r1_sr32 0
5286 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5288 o
->in1
= load_reg(get_field(f
, r1
) + 1);
5290 #define SPEC_in1_r1p1 SPEC_r1_even
5292 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5294 o
->in1
= tcg_temp_new_i64();
5295 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5297 #define SPEC_in1_r1p1_32s SPEC_r1_even
5299 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5301 o
->in1
= tcg_temp_new_i64();
5302 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5304 #define SPEC_in1_r1p1_32u SPEC_r1_even
5306 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5308 int r1
= get_field(f
, r1
);
5309 o
->in1
= tcg_temp_new_i64();
5310 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5312 #define SPEC_in1_r1_D32 SPEC_r1_even
5314 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5316 o
->in1
= load_reg(get_field(f
, r2
));
5318 #define SPEC_in1_r2 0
5320 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5322 o
->in1
= tcg_temp_new_i64();
5323 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5325 #define SPEC_in1_r2_sr32 0
5327 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5329 o
->in1
= load_reg(get_field(f
, r3
));
5331 #define SPEC_in1_r3 0
5333 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5335 o
->in1
= regs
[get_field(f
, r3
)];
5338 #define SPEC_in1_r3_o 0
5340 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5342 o
->in1
= tcg_temp_new_i64();
5343 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5345 #define SPEC_in1_r3_32s 0
5347 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5349 o
->in1
= tcg_temp_new_i64();
5350 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5352 #define SPEC_in1_r3_32u 0
5354 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5356 int r3
= get_field(f
, r3
);
5357 o
->in1
= tcg_temp_new_i64();
5358 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5360 #define SPEC_in1_r3_D32 SPEC_r3_even
5362 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5364 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5366 #define SPEC_in1_e1 0
5368 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5370 o
->in1
= fregs
[get_field(f
, r1
)];
5373 #define SPEC_in1_f1_o 0
5375 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5377 int r1
= get_field(f
, r1
);
5379 o
->out2
= fregs
[r1
+ 2];
5380 o
->g_out
= o
->g_out2
= true;
5382 #define SPEC_in1_x1_o SPEC_r1_f128
5384 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5386 o
->in1
= fregs
[get_field(f
, r3
)];
5389 #define SPEC_in1_f3_o 0
5391 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5393 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5395 #define SPEC_in1_la1 0
5397 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5399 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5400 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5402 #define SPEC_in1_la2 0
5404 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5407 o
->in1
= tcg_temp_new_i64();
5408 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5410 #define SPEC_in1_m1_8u 0
5412 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5415 o
->in1
= tcg_temp_new_i64();
5416 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5418 #define SPEC_in1_m1_16s 0
5420 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5423 o
->in1
= tcg_temp_new_i64();
5424 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5426 #define SPEC_in1_m1_16u 0
5428 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5431 o
->in1
= tcg_temp_new_i64();
5432 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5434 #define SPEC_in1_m1_32s 0
5436 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5439 o
->in1
= tcg_temp_new_i64();
5440 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5442 #define SPEC_in1_m1_32u 0
5444 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5447 o
->in1
= tcg_temp_new_i64();
5448 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5450 #define SPEC_in1_m1_64 0
5452 /* ====================================================================== */
5453 /* The "INput 2" generators. These load the second operand to an insn. */
5455 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5457 o
->in2
= regs
[get_field(f
, r1
)];
5460 #define SPEC_in2_r1_o 0
5462 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5464 o
->in2
= tcg_temp_new_i64();
5465 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5467 #define SPEC_in2_r1_16u 0
5469 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5471 o
->in2
= tcg_temp_new_i64();
5472 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5474 #define SPEC_in2_r1_32u 0
5476 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5478 int r1
= get_field(f
, r1
);
5479 o
->in2
= tcg_temp_new_i64();
5480 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5482 #define SPEC_in2_r1_D32 SPEC_r1_even
5484 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5486 o
->in2
= load_reg(get_field(f
, r2
));
5488 #define SPEC_in2_r2 0
5490 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5492 o
->in2
= regs
[get_field(f
, r2
)];
5495 #define SPEC_in2_r2_o 0
5497 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5499 int r2
= get_field(f
, r2
);
5501 o
->in2
= load_reg(r2
);
5504 #define SPEC_in2_r2_nz 0
5506 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5508 o
->in2
= tcg_temp_new_i64();
5509 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5511 #define SPEC_in2_r2_8s 0
5513 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5515 o
->in2
= tcg_temp_new_i64();
5516 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5518 #define SPEC_in2_r2_8u 0
5520 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5522 o
->in2
= tcg_temp_new_i64();
5523 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5525 #define SPEC_in2_r2_16s 0
5527 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5529 o
->in2
= tcg_temp_new_i64();
5530 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5532 #define SPEC_in2_r2_16u 0
5534 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5536 o
->in2
= load_reg(get_field(f
, r3
));
5538 #define SPEC_in2_r3 0
5540 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5542 o
->in2
= tcg_temp_new_i64();
5543 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5545 #define SPEC_in2_r3_sr32 0
5547 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5549 o
->in2
= tcg_temp_new_i64();
5550 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5552 #define SPEC_in2_r2_32s 0
5554 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5556 o
->in2
= tcg_temp_new_i64();
5557 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5559 #define SPEC_in2_r2_32u 0
5561 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5563 o
->in2
= tcg_temp_new_i64();
5564 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5566 #define SPEC_in2_r2_sr32 0
5568 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5570 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5572 #define SPEC_in2_e2 0
5574 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5576 o
->in2
= fregs
[get_field(f
, r2
)];
5579 #define SPEC_in2_f2_o 0
5581 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5583 int r2
= get_field(f
, r2
);
5585 o
->in2
= fregs
[r2
+ 2];
5586 o
->g_in1
= o
->g_in2
= true;
5588 #define SPEC_in2_x2_o SPEC_r2_f128
5590 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5592 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5594 #define SPEC_in2_ra2 0
5596 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5598 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5599 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5601 #define SPEC_in2_a2 0
5603 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5605 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
5607 #define SPEC_in2_ri2 0
5609 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5611 help_l2_shift(s
, f
, o
, 31);
5613 #define SPEC_in2_sh32 0
5615 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5617 help_l2_shift(s
, f
, o
, 63);
5619 #define SPEC_in2_sh64 0
5621 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5624 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5626 #define SPEC_in2_m2_8u 0
5628 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5631 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5633 #define SPEC_in2_m2_16s 0
5635 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5638 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5640 #define SPEC_in2_m2_16u 0
5642 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5645 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5647 #define SPEC_in2_m2_32s 0
5649 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5652 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5654 #define SPEC_in2_m2_32u 0
5656 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5659 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5661 #define SPEC_in2_m2_64 0
5663 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5666 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5668 #define SPEC_in2_mri2_16u 0
5670 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5673 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5675 #define SPEC_in2_mri2_32s 0
5677 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5680 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5682 #define SPEC_in2_mri2_32u 0
5684 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5687 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5689 #define SPEC_in2_mri2_64 0
5691 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5693 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5695 #define SPEC_in2_i2 0
5697 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5699 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5701 #define SPEC_in2_i2_8u 0
5703 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5705 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5707 #define SPEC_in2_i2_16u 0
5709 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5711 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5713 #define SPEC_in2_i2_32u 0
5715 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5717 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5718 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5720 #define SPEC_in2_i2_16u_shl 0
5722 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5724 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5725 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5727 #define SPEC_in2_i2_32u_shl 0
5729 #ifndef CONFIG_USER_ONLY
5730 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5732 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5734 #define SPEC_in2_insn 0
5737 /* ====================================================================== */
5739 /* Find opc within the table of insns. This is formulated as a switch
5740 statement so that (1) we get compile-time notice of cut-paste errors
5741 for duplicated opcodes, and (2) the compiler generates the binary
5742 search tree, rather than us having to post-process the table. */
5744 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5745 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5747 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5749 enum DisasInsnEnum
{
5750 #include "insn-data.def"
5754 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5758 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5760 .help_in1 = in1_##I1, \
5761 .help_in2 = in2_##I2, \
5762 .help_prep = prep_##P, \
5763 .help_wout = wout_##W, \
5764 .help_cout = cout_##CC, \
5765 .help_op = op_##OP, \
5769 /* Allow 0 to be used for NULL in the table below. */
5777 #define SPEC_in1_0 0
5778 #define SPEC_in2_0 0
5779 #define SPEC_prep_0 0
5780 #define SPEC_wout_0 0
5782 /* Give smaller names to the various facilities. */
5783 #define FAC_Z S390_FEAT_ZARCH
5784 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5785 #define FAC_DFP S390_FEAT_DFP
5786 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5787 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5788 #define FAC_EE S390_FEAT_EXECUTE_EXT
5789 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5790 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5791 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5792 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5793 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5794 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5795 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5796 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5797 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5798 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5799 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5800 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5801 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5802 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5803 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5804 #define FAC_SFLE S390_FEAT_STFLE
5805 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5806 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5807 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5808 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5809 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5810 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5811 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5812 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5813 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5814 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5815 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5816 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5817 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5818 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5819 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5820 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5822 static const DisasInsn insn_info
[] = {
5823 #include "insn-data.def"
5827 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5828 case OPC: return &insn_info[insn_ ## NM];
5830 static const DisasInsn
*lookup_opc(uint16_t opc
)
5833 #include "insn-data.def"
5842 /* Extract a field from the insn. The INSN should be left-aligned in
5843 the uint64_t so that we can more easily utilize the big-bit-endian
5844 definitions we extract from the Principals of Operation. */
5846 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5854 /* Zero extract the field from the insn. */
5855 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5857 /* Sign-extend, or un-swap the field as necessary. */
5859 case 0: /* unsigned */
5861 case 1: /* signed */
5862 assert(f
->size
<= 32);
5863 m
= 1u << (f
->size
- 1);
5866 case 2: /* dl+dh split, signed 20 bit. */
5867 r
= ((int8_t)r
<< 12) | (r
>> 8);
5873 /* Validate that the "compressed" encoding we selected above is valid.
5874 I.e. we havn't make two different original fields overlap. */
5875 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5876 o
->presentC
|= 1 << f
->indexC
;
5877 o
->presentO
|= 1 << f
->indexO
;
5879 o
->c
[f
->indexC
] = r
;
5882 /* Lookup the insn at the current PC, extracting the operands into O and
5883 returning the info struct for the insn. Returns NULL for invalid insn. */
5885 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5888 uint64_t insn
, pc
= s
->pc
;
5890 const DisasInsn
*info
;
5892 if (unlikely(s
->ex_value
)) {
5893 /* Drop the EX data now, so that it's clear on exception paths. */
5894 TCGv_i64 zero
= tcg_const_i64(0);
5895 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5896 tcg_temp_free_i64(zero
);
5898 /* Extract the values saved by EXECUTE. */
5899 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5900 ilen
= s
->ex_value
& 0xf;
5903 insn
= ld_code2(env
, pc
);
5904 op
= (insn
>> 8) & 0xff;
5905 ilen
= get_ilen(op
);
5911 insn
= ld_code4(env
, pc
) << 32;
5914 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5917 g_assert_not_reached();
5920 s
->next_pc
= s
->pc
+ ilen
;
5923 /* We can't actually determine the insn format until we've looked up
5924 the full insn opcode. Which we can't do without locating the
5925 secondary opcode. Assume by default that OP2 is at bit 40; for
5926 those smaller insns that don't actually have a secondary opcode
5927 this will correctly result in OP2 = 0. */
5933 case 0xb2: /* S, RRF, RRE, IE */
5934 case 0xb3: /* RRE, RRD, RRF */
5935 case 0xb9: /* RRE, RRF */
5936 case 0xe5: /* SSE, SIL */
5937 op2
= (insn
<< 8) >> 56;
5941 case 0xc0: /* RIL */
5942 case 0xc2: /* RIL */
5943 case 0xc4: /* RIL */
5944 case 0xc6: /* RIL */
5945 case 0xc8: /* SSF */
5946 case 0xcc: /* RIL */
5947 op2
= (insn
<< 12) >> 60;
5949 case 0xc5: /* MII */
5950 case 0xc7: /* SMI */
5951 case 0xd0 ... 0xdf: /* SS */
5957 case 0xee ... 0xf3: /* SS */
5958 case 0xf8 ... 0xfd: /* SS */
5962 op2
= (insn
<< 40) >> 56;
5966 memset(f
, 0, sizeof(*f
));
5971 /* Lookup the instruction. */
5972 info
= lookup_opc(op
<< 8 | op2
);
5974 /* If we found it, extract the operands. */
5976 DisasFormat fmt
= info
->fmt
;
5979 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5980 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5986 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5988 const DisasInsn
*insn
;
5989 ExitStatus ret
= NO_EXIT
;
5993 /* Search for the insn in the table. */
5994 insn
= extract_insn(env
, s
, &f
);
5996 /* Not found means unimplemented/illegal opcode. */
5998 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6000 gen_illegal_opcode(s
);
6001 return EXIT_NORETURN
;
6004 #ifndef CONFIG_USER_ONLY
6005 if (s
->tb
->flags
& FLAG_MASK_PER
) {
6006 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
6007 gen_helper_per_ifetch(cpu_env
, addr
);
6008 tcg_temp_free_i64(addr
);
6012 /* Check for insn specification exceptions. */
6014 int spec
= insn
->spec
, excp
= 0, r
;
6016 if (spec
& SPEC_r1_even
) {
6017 r
= get_field(&f
, r1
);
6019 excp
= PGM_SPECIFICATION
;
6022 if (spec
& SPEC_r2_even
) {
6023 r
= get_field(&f
, r2
);
6025 excp
= PGM_SPECIFICATION
;
6028 if (spec
& SPEC_r3_even
) {
6029 r
= get_field(&f
, r3
);
6031 excp
= PGM_SPECIFICATION
;
6034 if (spec
& SPEC_r1_f128
) {
6035 r
= get_field(&f
, r1
);
6037 excp
= PGM_SPECIFICATION
;
6040 if (spec
& SPEC_r2_f128
) {
6041 r
= get_field(&f
, r2
);
6043 excp
= PGM_SPECIFICATION
;
6047 gen_program_exception(s
, excp
);
6048 return EXIT_NORETURN
;
6052 /* Set up the strutures we use to communicate with the helpers. */
6055 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
6062 /* Implement the instruction. */
6063 if (insn
->help_in1
) {
6064 insn
->help_in1(s
, &f
, &o
);
6066 if (insn
->help_in2
) {
6067 insn
->help_in2(s
, &f
, &o
);
6069 if (insn
->help_prep
) {
6070 insn
->help_prep(s
, &f
, &o
);
6072 if (insn
->help_op
) {
6073 ret
= insn
->help_op(s
, &o
);
6075 if (insn
->help_wout
) {
6076 insn
->help_wout(s
, &f
, &o
);
6078 if (insn
->help_cout
) {
6079 insn
->help_cout(s
, &o
);
6082 /* Free any temporaries created by the helpers. */
6083 if (o
.out
&& !o
.g_out
) {
6084 tcg_temp_free_i64(o
.out
);
6086 if (o
.out2
&& !o
.g_out2
) {
6087 tcg_temp_free_i64(o
.out2
);
6089 if (o
.in1
&& !o
.g_in1
) {
6090 tcg_temp_free_i64(o
.in1
);
6092 if (o
.in2
&& !o
.g_in2
) {
6093 tcg_temp_free_i64(o
.in2
);
6096 tcg_temp_free_i64(o
.addr1
);
6099 #ifndef CONFIG_USER_ONLY
6100 if (s
->tb
->flags
& FLAG_MASK_PER
) {
6101 /* An exception might be triggered, save PSW if not already done. */
6102 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
6103 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
6106 /* Call the helper to check for a possible PER exception. */
6107 gen_helper_per_check_exception(cpu_env
);
6111 /* Advance to the next instruction. */
6116 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
6118 CPUS390XState
*env
= cs
->env_ptr
;
6120 target_ulong pc_start
;
6121 uint64_t next_page_start
;
6122 int num_insns
, max_insns
;
6129 if (!(tb
->flags
& FLAG_MASK_64
)) {
6130 pc_start
&= 0x7fffffff;
6135 dc
.cc_op
= CC_OP_DYNAMIC
;
6136 dc
.ex_value
= tb
->cs_base
;
6137 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
6139 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
6142 max_insns
= tb_cflags(tb
) & CF_COUNT_MASK
;
6143 if (max_insns
== 0) {
6144 max_insns
= CF_COUNT_MASK
;
6146 if (max_insns
> TCG_MAX_INSNS
) {
6147 max_insns
= TCG_MAX_INSNS
;
6153 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
6156 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
6157 status
= EXIT_PC_STALE
;
6159 /* The address covered by the breakpoint must be included in
6160 [tb->pc, tb->pc + tb->size) in order to for it to be
6161 properly cleared -- thus we increment the PC here so that
6162 the logic setting tb->size below does the right thing. */
6167 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
6171 status
= translate_one(env
, &dc
);
6173 /* If we reach a page boundary, are single stepping,
6174 or exhaust instruction count, stop generation. */
6175 if (status
== NO_EXIT
6176 && (dc
.pc
>= next_page_start
6177 || tcg_op_buf_full()
6178 || num_insns
>= max_insns
6180 || cs
->singlestep_enabled
6182 status
= EXIT_PC_STALE
;
6184 } while (status
== NO_EXIT
);
6186 if (tb_cflags(tb
) & CF_LAST_IO
) {
6195 case EXIT_PC_STALE_NOCHAIN
:
6196 update_psw_addr(&dc
);
6198 case EXIT_PC_UPDATED
:
6199 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6200 cc op type is in env */
6203 case EXIT_PC_CC_UPDATED
:
6204 /* Exit the TB, either by raising a debug exception or by return. */
6206 gen_exception(EXCP_DEBUG
);
6207 } else if (use_exit_tb(&dc
) || status
== EXIT_PC_STALE_NOCHAIN
) {
6210 tcg_gen_lookup_and_goto_ptr();
6214 g_assert_not_reached();
6217 gen_tb_end(tb
, num_insns
);
6219 tb
->size
= dc
.pc
- pc_start
;
6220 tb
->icount
= num_insns
;
6222 #if defined(S390X_DEBUG_DISAS)
6223 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
6224 && qemu_log_in_addr_range(pc_start
)) {
6226 if (unlikely(dc
.ex_value
)) {
6227 /* ??? Unfortunately log_target_disas can't use host memory. */
6228 qemu_log("IN: EXECUTE %016" PRIx64
"\n", dc
.ex_value
);
6230 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
6231 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
);
6239 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6242 int cc_op
= data
[1];
6243 env
->psw
.addr
= data
[0];
6244 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {