4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext
;
50 typedef struct DisasInsn DisasInsn
;
51 typedef struct DisasFields DisasFields
;
54 struct TranslationBlock
*tb
;
55 const DisasInsn
*insn
;
61 bool singlestep_enabled
;
64 /* Information carried about a condition to be evaluated. */
71 struct { TCGv_i64 a
, b
; } s64
;
72 struct { TCGv_i32 a
, b
; } s32
;
76 /* is_jmp field values */
77 #define DISAS_EXCP DISAS_TARGET_0
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit
[CC_OP_MAX
];
81 static uint64_t inline_branch_miss
[CC_OP_MAX
];
84 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
86 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
87 if (s
->tb
->flags
& FLAG_MASK_32
) {
88 return pc
| 0x80000000;
94 static TCGv_i64 psw_addr
;
95 static TCGv_i64 psw_mask
;
98 static TCGv_i32 cc_op
;
99 static TCGv_i64 cc_src
;
100 static TCGv_i64 cc_dst
;
101 static TCGv_i64 cc_vr
;
103 static char cpu_reg_names
[32][4];
104 static TCGv_i64 regs
[16];
105 static TCGv_i64 fregs
[16];
107 void s390x_translate_init(void)
111 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
112 offsetof(CPUS390XState
, psw
.addr
),
114 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
115 offsetof(CPUS390XState
, psw
.mask
),
117 gbea
= tcg_global_mem_new_i64(cpu_env
,
118 offsetof(CPUS390XState
, gbea
),
121 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
123 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
125 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
127 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
130 for (i
= 0; i
< 16; i
++) {
131 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
132 regs
[i
] = tcg_global_mem_new(cpu_env
,
133 offsetof(CPUS390XState
, regs
[i
]),
137 for (i
= 0; i
< 16; i
++) {
138 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
139 fregs
[i
] = tcg_global_mem_new(cpu_env
,
140 offsetof(CPUS390XState
, vregs
[i
][0].d
),
141 cpu_reg_names
[i
+ 16]);
145 static TCGv_i64
load_reg(int reg
)
147 TCGv_i64 r
= tcg_temp_new_i64();
148 tcg_gen_mov_i64(r
, regs
[reg
]);
152 static TCGv_i64
load_freg32_i64(int reg
)
154 TCGv_i64 r
= tcg_temp_new_i64();
155 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
159 static void store_reg(int reg
, TCGv_i64 v
)
161 tcg_gen_mov_i64(regs
[reg
], v
);
164 static void store_freg(int reg
, TCGv_i64 v
)
166 tcg_gen_mov_i64(fregs
[reg
], v
);
169 static void store_reg32_i64(int reg
, TCGv_i64 v
)
171 /* 32 bit register writes keep the upper half */
172 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
175 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
177 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
180 static void store_freg32_i64(int reg
, TCGv_i64 v
)
182 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
185 static void return_low128(TCGv_i64 dest
)
187 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
190 static void update_psw_addr(DisasContext
*s
)
193 tcg_gen_movi_i64(psw_addr
, s
->pc
);
196 static void per_branch(DisasContext
*s
, bool to_next
)
198 #ifndef CONFIG_USER_ONLY
199 tcg_gen_movi_i64(gbea
, s
->pc
);
201 if (s
->tb
->flags
& FLAG_MASK_PER
) {
202 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
203 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
205 tcg_temp_free_i64(next_pc
);
211 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
212 TCGv_i64 arg1
, TCGv_i64 arg2
)
214 #ifndef CONFIG_USER_ONLY
215 if (s
->tb
->flags
& FLAG_MASK_PER
) {
216 TCGLabel
*lab
= gen_new_label();
217 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
219 tcg_gen_movi_i64(gbea
, s
->pc
);
220 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
224 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
225 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
226 tcg_temp_free_i64(pc
);
231 static void per_breaking_event(DisasContext
*s
)
233 tcg_gen_movi_i64(gbea
, s
->pc
);
236 static void update_cc_op(DisasContext
*s
)
238 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
239 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
243 static void potential_page_fault(DisasContext
*s
)
249 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
251 return (uint64_t)cpu_lduw_code(env
, pc
);
254 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
256 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
259 static int get_mem_index(DisasContext
*s
)
261 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
262 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
264 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
266 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
274 static void gen_exception(int excp
)
276 TCGv_i32 tmp
= tcg_const_i32(excp
);
277 gen_helper_exception(cpu_env
, tmp
);
278 tcg_temp_free_i32(tmp
);
281 static void gen_program_exception(DisasContext
*s
, int code
)
285 /* Remember what pgm exeption this was. */
286 tmp
= tcg_const_i32(code
);
287 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
288 tcg_temp_free_i32(tmp
);
290 tmp
= tcg_const_i32(s
->ilen
);
291 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
292 tcg_temp_free_i32(tmp
);
300 /* Trigger exception. */
301 gen_exception(EXCP_PGM
);
304 static inline void gen_illegal_opcode(DisasContext
*s
)
306 gen_program_exception(s
, PGM_OPERATION
);
309 static inline void gen_trap(DisasContext
*s
)
313 /* Set DXC to 0xff. */
314 t
= tcg_temp_new_i32();
315 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
316 tcg_gen_ori_i32(t
, t
, 0xff00);
317 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
318 tcg_temp_free_i32(t
);
320 gen_program_exception(s
, PGM_DATA
);
323 #ifndef CONFIG_USER_ONLY
324 static void check_privileged(DisasContext
*s
)
326 if (s
->tb
->flags
& FLAG_MASK_PSTATE
) {
327 gen_program_exception(s
, PGM_PRIVILEGED
);
332 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
334 TCGv_i64 tmp
= tcg_temp_new_i64();
335 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
337 /* Note that d2 is limited to 20 bits, signed. If we crop negative
338 displacements early we create larger immedate addends. */
340 /* Note that addi optimizes the imm==0 case. */
342 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
343 tcg_gen_addi_i64(tmp
, tmp
, d2
);
345 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
347 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
353 tcg_gen_movi_i64(tmp
, d2
);
356 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
362 static inline bool live_cc_data(DisasContext
*s
)
364 return (s
->cc_op
!= CC_OP_DYNAMIC
365 && s
->cc_op
!= CC_OP_STATIC
369 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
371 if (live_cc_data(s
)) {
372 tcg_gen_discard_i64(cc_src
);
373 tcg_gen_discard_i64(cc_dst
);
374 tcg_gen_discard_i64(cc_vr
);
376 s
->cc_op
= CC_OP_CONST0
+ val
;
379 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
381 if (live_cc_data(s
)) {
382 tcg_gen_discard_i64(cc_src
);
383 tcg_gen_discard_i64(cc_vr
);
385 tcg_gen_mov_i64(cc_dst
, dst
);
389 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
392 if (live_cc_data(s
)) {
393 tcg_gen_discard_i64(cc_vr
);
395 tcg_gen_mov_i64(cc_src
, src
);
396 tcg_gen_mov_i64(cc_dst
, dst
);
400 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
401 TCGv_i64 dst
, TCGv_i64 vr
)
403 tcg_gen_mov_i64(cc_src
, src
);
404 tcg_gen_mov_i64(cc_dst
, dst
);
405 tcg_gen_mov_i64(cc_vr
, vr
);
409 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
411 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
414 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
416 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
419 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
421 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
424 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
426 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
429 /* CC value is in env->cc_op */
430 static void set_cc_static(DisasContext
*s
)
432 if (live_cc_data(s
)) {
433 tcg_gen_discard_i64(cc_src
);
434 tcg_gen_discard_i64(cc_dst
);
435 tcg_gen_discard_i64(cc_vr
);
437 s
->cc_op
= CC_OP_STATIC
;
440 /* calculates cc into cc_op */
441 static void gen_op_calc_cc(DisasContext
*s
)
443 TCGv_i32 local_cc_op
;
446 TCGV_UNUSED_I32(local_cc_op
);
447 TCGV_UNUSED_I64(dummy
);
450 dummy
= tcg_const_i64(0);
464 local_cc_op
= tcg_const_i32(s
->cc_op
);
480 /* s->cc_op is the cc value */
481 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
484 /* env->cc_op already is the cc value */
499 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
504 case CC_OP_LTUGTU_32
:
505 case CC_OP_LTUGTU_64
:
512 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
527 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
530 /* unknown operation - assume 3 arguments and cc_op in env */
531 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
537 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
538 tcg_temp_free_i32(local_cc_op
);
540 if (!TCGV_IS_UNUSED_I64(dummy
)) {
541 tcg_temp_free_i64(dummy
);
544 /* We now have cc in cc_op as constant */
548 static bool use_exit_tb(DisasContext
*s
)
550 return (s
->singlestep_enabled
||
551 (tb_cflags(s
->tb
) & CF_LAST_IO
) ||
552 (s
->tb
->flags
& FLAG_MASK_PER
));
555 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
557 if (unlikely(use_exit_tb(s
))) {
560 #ifndef CONFIG_USER_ONLY
561 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
562 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
568 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
570 #ifdef DEBUG_INLINE_BRANCHES
571 inline_branch_miss
[cc_op
]++;
575 static void account_inline_branch(DisasContext
*s
, int cc_op
)
577 #ifdef DEBUG_INLINE_BRANCHES
578 inline_branch_hit
[cc_op
]++;
582 /* Table of mask values to comparison codes, given a comparison as input.
583 For such, CC=3 should not be possible. */
584 static const TCGCond ltgt_cond
[16] = {
585 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
586 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
587 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
588 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
589 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
590 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
591 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
592 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
595 /* Table of mask values to comparison codes, given a logic op as input.
596 For such, only CC=0 and CC=1 should be possible. */
597 static const TCGCond nz_cond
[16] = {
598 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
599 TCG_COND_NEVER
, TCG_COND_NEVER
,
600 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
601 TCG_COND_NE
, TCG_COND_NE
,
602 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
603 TCG_COND_EQ
, TCG_COND_EQ
,
604 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
605 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
608 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
609 details required to generate a TCG comparison. */
610 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
613 enum cc_op old_cc_op
= s
->cc_op
;
615 if (mask
== 15 || mask
== 0) {
616 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
619 c
->g1
= c
->g2
= true;
624 /* Find the TCG condition for the mask + cc op. */
630 cond
= ltgt_cond
[mask
];
631 if (cond
== TCG_COND_NEVER
) {
634 account_inline_branch(s
, old_cc_op
);
637 case CC_OP_LTUGTU_32
:
638 case CC_OP_LTUGTU_64
:
639 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
640 if (cond
== TCG_COND_NEVER
) {
643 account_inline_branch(s
, old_cc_op
);
647 cond
= nz_cond
[mask
];
648 if (cond
== TCG_COND_NEVER
) {
651 account_inline_branch(s
, old_cc_op
);
666 account_inline_branch(s
, old_cc_op
);
681 account_inline_branch(s
, old_cc_op
);
685 switch (mask
& 0xa) {
686 case 8: /* src == 0 -> no one bit found */
689 case 2: /* src != 0 -> one bit found */
695 account_inline_branch(s
, old_cc_op
);
701 case 8 | 2: /* vr == 0 */
704 case 4 | 1: /* vr != 0 */
707 case 8 | 4: /* no carry -> vr >= src */
710 case 2 | 1: /* carry -> vr < src */
716 account_inline_branch(s
, old_cc_op
);
721 /* Note that CC=0 is impossible; treat it as dont-care. */
723 case 2: /* zero -> op1 == op2 */
726 case 4 | 1: /* !zero -> op1 != op2 */
729 case 4: /* borrow (!carry) -> op1 < op2 */
732 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
738 account_inline_branch(s
, old_cc_op
);
743 /* Calculate cc value. */
748 /* Jump based on CC. We'll load up the real cond below;
749 the assignment here merely avoids a compiler warning. */
750 account_noninline_branch(s
, old_cc_op
);
751 old_cc_op
= CC_OP_STATIC
;
752 cond
= TCG_COND_NEVER
;
756 /* Load up the arguments of the comparison. */
758 c
->g1
= c
->g2
= false;
762 c
->u
.s32
.a
= tcg_temp_new_i32();
763 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
764 c
->u
.s32
.b
= tcg_const_i32(0);
767 case CC_OP_LTUGTU_32
:
770 c
->u
.s32
.a
= tcg_temp_new_i32();
771 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
772 c
->u
.s32
.b
= tcg_temp_new_i32();
773 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
780 c
->u
.s64
.b
= tcg_const_i64(0);
784 case CC_OP_LTUGTU_64
:
788 c
->g1
= c
->g2
= true;
794 c
->u
.s64
.a
= tcg_temp_new_i64();
795 c
->u
.s64
.b
= tcg_const_i64(0);
796 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
801 c
->u
.s32
.a
= tcg_temp_new_i32();
802 c
->u
.s32
.b
= tcg_temp_new_i32();
803 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
804 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
805 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
807 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
814 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
815 c
->u
.s64
.b
= tcg_const_i64(0);
827 case 0x8 | 0x4 | 0x2: /* cc != 3 */
829 c
->u
.s32
.b
= tcg_const_i32(3);
831 case 0x8 | 0x4 | 0x1: /* cc != 2 */
833 c
->u
.s32
.b
= tcg_const_i32(2);
835 case 0x8 | 0x2 | 0x1: /* cc != 1 */
837 c
->u
.s32
.b
= tcg_const_i32(1);
839 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
842 c
->u
.s32
.a
= tcg_temp_new_i32();
843 c
->u
.s32
.b
= tcg_const_i32(0);
844 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
846 case 0x8 | 0x4: /* cc < 2 */
848 c
->u
.s32
.b
= tcg_const_i32(2);
850 case 0x8: /* cc == 0 */
852 c
->u
.s32
.b
= tcg_const_i32(0);
854 case 0x4 | 0x2 | 0x1: /* cc != 0 */
856 c
->u
.s32
.b
= tcg_const_i32(0);
858 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
861 c
->u
.s32
.a
= tcg_temp_new_i32();
862 c
->u
.s32
.b
= tcg_const_i32(0);
863 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
865 case 0x4: /* cc == 1 */
867 c
->u
.s32
.b
= tcg_const_i32(1);
869 case 0x2 | 0x1: /* cc > 1 */
871 c
->u
.s32
.b
= tcg_const_i32(1);
873 case 0x2: /* cc == 2 */
875 c
->u
.s32
.b
= tcg_const_i32(2);
877 case 0x1: /* cc == 3 */
879 c
->u
.s32
.b
= tcg_const_i32(3);
882 /* CC is masked by something else: (8 >> cc) & mask. */
885 c
->u
.s32
.a
= tcg_const_i32(8);
886 c
->u
.s32
.b
= tcg_const_i32(0);
887 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
888 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
899 static void free_compare(DisasCompare
*c
)
903 tcg_temp_free_i64(c
->u
.s64
.a
);
905 tcg_temp_free_i32(c
->u
.s32
.a
);
910 tcg_temp_free_i64(c
->u
.s64
.b
);
912 tcg_temp_free_i32(c
->u
.s32
.b
);
917 /* ====================================================================== */
918 /* Define the insn format enumeration. */
919 #define F0(N) FMT_##N,
920 #define F1(N, X1) F0(N)
921 #define F2(N, X1, X2) F0(N)
922 #define F3(N, X1, X2, X3) F0(N)
923 #define F4(N, X1, X2, X3, X4) F0(N)
924 #define F5(N, X1, X2, X3, X4, X5) F0(N)
927 #include "insn-format.def"
937 /* Define a structure to hold the decoded fields. We'll store each inside
938 an array indexed by an enum. In order to conserve memory, we'll arrange
939 for fields that do not exist at the same time to overlap, thus the "C"
940 for compact. For checking purposes there is an "O" for original index
941 as well that will be applied to availability bitmaps. */
943 enum DisasFieldIndexO
{
966 enum DisasFieldIndexC
{
1001 unsigned presentC
:16;
1002 unsigned int presentO
;
1006 /* This is the way fields are to be accessed out of DisasFields. */
1007 #define have_field(S, F) have_field1((S), FLD_O_##F)
1008 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1010 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1012 return (f
->presentO
>> c
) & 1;
1015 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1016 enum DisasFieldIndexC c
)
1018 assert(have_field1(f
, o
));
1022 /* Describe the layout of each field in each format. */
1023 typedef struct DisasField
{
1025 unsigned int size
:8;
1026 unsigned int type
:2;
1027 unsigned int indexC
:6;
1028 enum DisasFieldIndexO indexO
:8;
1031 typedef struct DisasFormatInfo
{
1032 DisasField op
[NUM_C_FIELD
];
1035 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1036 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1037 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1038 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1039 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1040 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1041 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1042 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1043 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1044 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1045 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1046 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1047 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1048 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1050 #define F0(N) { { } },
1051 #define F1(N, X1) { { X1 } },
1052 #define F2(N, X1, X2) { { X1, X2 } },
1053 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1054 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1055 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1057 static const DisasFormatInfo format_info
[] = {
1058 #include "insn-format.def"
1076 /* Generally, we'll extract operands into this structures, operate upon
1077 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1078 of routines below for more details. */
1080 bool g_out
, g_out2
, g_in1
, g_in2
;
1081 TCGv_i64 out
, out2
, in1
, in2
;
1085 /* Instructions can place constraints on their operands, raising specification
1086 exceptions if they are violated. To make this easy to automate, each "in1",
1087 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1088 of the following, or 0. To make this easy to document, we'll put the
1089 SPEC_<name> defines next to <name>. */
1091 #define SPEC_r1_even 1
1092 #define SPEC_r2_even 2
1093 #define SPEC_r3_even 4
1094 #define SPEC_r1_f128 8
1095 #define SPEC_r2_f128 16
1097 /* Return values from translate_one, indicating the state of the TB. */
1099 /* Continue the TB. */
1101 /* We have emitted one or more goto_tb. No fixup required. */
1103 /* We are not using a goto_tb (for whatever reason), but have updated
1104 the PC (for whatever reason), so there's no need to do it again on
1107 /* We have updated the PC and CC values. */
1109 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1110 updated the PC for the next instruction to be executed. */
1112 /* We are exiting the TB to the main loop. */
1113 EXIT_PC_STALE_NOCHAIN
,
1114 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1115 No following code will be executed. */
1127 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1128 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1129 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1130 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1131 void (*help_cout
)(DisasContext
*, DisasOps
*);
1132 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1137 /* ====================================================================== */
1138 /* Miscellaneous helpers, used by several operations. */
1140 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1141 DisasOps
*o
, int mask
)
1143 int b2
= get_field(f
, b2
);
1144 int d2
= get_field(f
, d2
);
1147 o
->in2
= tcg_const_i64(d2
& mask
);
1149 o
->in2
= get_address(s
, 0, b2
, d2
);
1150 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1154 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1156 if (dest
== s
->next_pc
) {
1157 per_branch(s
, true);
1160 if (use_goto_tb(s
, dest
)) {
1162 per_breaking_event(s
);
1164 tcg_gen_movi_i64(psw_addr
, dest
);
1165 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1166 return EXIT_GOTO_TB
;
1168 tcg_gen_movi_i64(psw_addr
, dest
);
1169 per_branch(s
, false);
1170 return EXIT_PC_UPDATED
;
1174 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1175 bool is_imm
, int imm
, TCGv_i64 cdest
)
1178 uint64_t dest
= s
->pc
+ 2 * imm
;
1181 /* Take care of the special cases first. */
1182 if (c
->cond
== TCG_COND_NEVER
) {
1187 if (dest
== s
->next_pc
) {
1188 /* Branch to next. */
1189 per_branch(s
, true);
1193 if (c
->cond
== TCG_COND_ALWAYS
) {
1194 ret
= help_goto_direct(s
, dest
);
1198 if (TCGV_IS_UNUSED_I64(cdest
)) {
1199 /* E.g. bcr %r0 -> no branch. */
1203 if (c
->cond
== TCG_COND_ALWAYS
) {
1204 tcg_gen_mov_i64(psw_addr
, cdest
);
1205 per_branch(s
, false);
1206 ret
= EXIT_PC_UPDATED
;
1211 if (use_goto_tb(s
, s
->next_pc
)) {
1212 if (is_imm
&& use_goto_tb(s
, dest
)) {
1213 /* Both exits can use goto_tb. */
1216 lab
= gen_new_label();
1218 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1220 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1223 /* Branch not taken. */
1225 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1226 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1230 per_breaking_event(s
);
1232 tcg_gen_movi_i64(psw_addr
, dest
);
1233 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1237 /* Fallthru can use goto_tb, but taken branch cannot. */
1238 /* Store taken branch destination before the brcond. This
1239 avoids having to allocate a new local temp to hold it.
1240 We'll overwrite this in the not taken case anyway. */
1242 tcg_gen_mov_i64(psw_addr
, cdest
);
1245 lab
= gen_new_label();
1247 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1249 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1252 /* Branch not taken. */
1255 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1256 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1260 tcg_gen_movi_i64(psw_addr
, dest
);
1262 per_breaking_event(s
);
1263 ret
= EXIT_PC_UPDATED
;
1266 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1267 Most commonly we're single-stepping or some other condition that
1268 disables all use of goto_tb. Just update the PC and exit. */
1270 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1272 cdest
= tcg_const_i64(dest
);
1276 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1278 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1280 TCGv_i32 t0
= tcg_temp_new_i32();
1281 TCGv_i64 t1
= tcg_temp_new_i64();
1282 TCGv_i64 z
= tcg_const_i64(0);
1283 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1284 tcg_gen_extu_i32_i64(t1
, t0
);
1285 tcg_temp_free_i32(t0
);
1286 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1287 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1288 tcg_temp_free_i64(t1
);
1289 tcg_temp_free_i64(z
);
1293 tcg_temp_free_i64(cdest
);
1295 tcg_temp_free_i64(next
);
1297 ret
= EXIT_PC_UPDATED
;
1305 /* ====================================================================== */
1306 /* The operations. These perform the bulk of the work for any insn,
1307 usually after the operands have been loaded and output initialized. */
1309 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1312 z
= tcg_const_i64(0);
1313 n
= tcg_temp_new_i64();
1314 tcg_gen_neg_i64(n
, o
->in2
);
1315 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1316 tcg_temp_free_i64(n
);
1317 tcg_temp_free_i64(z
);
1321 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1323 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1327 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1329 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1333 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1335 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1336 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1340 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1342 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1346 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1351 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1353 /* The carry flag is the msb of CC, therefore the branch mask that would
1354 create that comparison is 3. Feeding the generated comparison to
1355 setcond produces the carry flag that we desire. */
1356 disas_jcc(s
, &cmp
, 3);
1357 carry
= tcg_temp_new_i64();
1359 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1361 TCGv_i32 t
= tcg_temp_new_i32();
1362 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1363 tcg_gen_extu_i32_i64(carry
, t
);
1364 tcg_temp_free_i32(t
);
1368 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1369 tcg_temp_free_i64(carry
);
1373 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1375 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1379 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1381 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1385 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1387 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1388 return_low128(o
->out2
);
1392 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1394 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1398 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1400 int shift
= s
->insn
->data
& 0xff;
1401 int size
= s
->insn
->data
>> 8;
1402 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1405 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1406 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1407 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1409 /* Produce the CC from only the bits manipulated. */
1410 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1411 set_cc_nz_u64(s
, cc_dst
);
1415 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1417 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1418 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1419 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1420 per_branch(s
, false);
1421 return EXIT_PC_UPDATED
;
1427 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1429 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1430 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1433 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1435 int m1
= get_field(s
->fields
, m1
);
1436 bool is_imm
= have_field(s
->fields
, i2
);
1437 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1440 /* BCR with R2 = 0 causes no branching */
1441 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1443 /* Perform serialization */
1444 /* FIXME: check for fast-BCR-serialization facility */
1445 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1448 /* Perform serialization */
1449 /* FIXME: perform checkpoint-synchronisation */
1450 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1455 disas_jcc(s
, &c
, m1
);
1456 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1459 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1461 int r1
= get_field(s
->fields
, r1
);
1462 bool is_imm
= have_field(s
->fields
, i2
);
1463 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1467 c
.cond
= TCG_COND_NE
;
1472 t
= tcg_temp_new_i64();
1473 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1474 store_reg32_i64(r1
, t
);
1475 c
.u
.s32
.a
= tcg_temp_new_i32();
1476 c
.u
.s32
.b
= tcg_const_i32(0);
1477 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1478 tcg_temp_free_i64(t
);
1480 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1483 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1485 int r1
= get_field(s
->fields
, r1
);
1486 int imm
= get_field(s
->fields
, i2
);
1490 c
.cond
= TCG_COND_NE
;
1495 t
= tcg_temp_new_i64();
1496 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1497 tcg_gen_subi_i64(t
, t
, 1);
1498 store_reg32h_i64(r1
, t
);
1499 c
.u
.s32
.a
= tcg_temp_new_i32();
1500 c
.u
.s32
.b
= tcg_const_i32(0);
1501 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1502 tcg_temp_free_i64(t
);
1504 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1507 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1509 int r1
= get_field(s
->fields
, r1
);
1510 bool is_imm
= have_field(s
->fields
, i2
);
1511 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1514 c
.cond
= TCG_COND_NE
;
1519 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1520 c
.u
.s64
.a
= regs
[r1
];
1521 c
.u
.s64
.b
= tcg_const_i64(0);
1523 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1526 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1528 int r1
= get_field(s
->fields
, r1
);
1529 int r3
= get_field(s
->fields
, r3
);
1530 bool is_imm
= have_field(s
->fields
, i2
);
1531 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1535 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1540 t
= tcg_temp_new_i64();
1541 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1542 c
.u
.s32
.a
= tcg_temp_new_i32();
1543 c
.u
.s32
.b
= tcg_temp_new_i32();
1544 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1545 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1546 store_reg32_i64(r1
, t
);
1547 tcg_temp_free_i64(t
);
1549 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1552 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1554 int r1
= get_field(s
->fields
, r1
);
1555 int r3
= get_field(s
->fields
, r3
);
1556 bool is_imm
= have_field(s
->fields
, i2
);
1557 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1560 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1563 if (r1
== (r3
| 1)) {
1564 c
.u
.s64
.b
= load_reg(r3
| 1);
1567 c
.u
.s64
.b
= regs
[r3
| 1];
1571 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1572 c
.u
.s64
.a
= regs
[r1
];
1575 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1578 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1580 int imm
, m3
= get_field(s
->fields
, m3
);
1584 c
.cond
= ltgt_cond
[m3
];
1585 if (s
->insn
->data
) {
1586 c
.cond
= tcg_unsigned_cond(c
.cond
);
1588 c
.is_64
= c
.g1
= c
.g2
= true;
1592 is_imm
= have_field(s
->fields
, i4
);
1594 imm
= get_field(s
->fields
, i4
);
1597 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1598 get_field(s
->fields
, d4
));
1601 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1604 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1606 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1611 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1613 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1618 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1620 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1625 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1627 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1628 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1629 tcg_temp_free_i32(m3
);
1630 gen_set_cc_nz_f32(s
, o
->in2
);
1634 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1636 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1637 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1638 tcg_temp_free_i32(m3
);
1639 gen_set_cc_nz_f64(s
, o
->in2
);
1643 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1645 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1646 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1647 tcg_temp_free_i32(m3
);
1648 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1652 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1654 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1655 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1656 tcg_temp_free_i32(m3
);
1657 gen_set_cc_nz_f32(s
, o
->in2
);
1661 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1663 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1664 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1665 tcg_temp_free_i32(m3
);
1666 gen_set_cc_nz_f64(s
, o
->in2
);
1670 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1672 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1673 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1674 tcg_temp_free_i32(m3
);
1675 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1679 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1681 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1682 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1683 tcg_temp_free_i32(m3
);
1684 gen_set_cc_nz_f32(s
, o
->in2
);
1688 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1690 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1691 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1692 tcg_temp_free_i32(m3
);
1693 gen_set_cc_nz_f64(s
, o
->in2
);
1697 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1699 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1700 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1701 tcg_temp_free_i32(m3
);
1702 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1706 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1708 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1709 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1710 tcg_temp_free_i32(m3
);
1711 gen_set_cc_nz_f32(s
, o
->in2
);
1715 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1717 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1718 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1719 tcg_temp_free_i32(m3
);
1720 gen_set_cc_nz_f64(s
, o
->in2
);
1724 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1726 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1727 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1728 tcg_temp_free_i32(m3
);
1729 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1733 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1735 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1736 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1737 tcg_temp_free_i32(m3
);
1741 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1743 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1744 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1745 tcg_temp_free_i32(m3
);
1749 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1751 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1752 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1753 tcg_temp_free_i32(m3
);
1754 return_low128(o
->out2
);
1758 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1760 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1761 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1762 tcg_temp_free_i32(m3
);
1766 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1768 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1769 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1770 tcg_temp_free_i32(m3
);
1774 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1776 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1777 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1778 tcg_temp_free_i32(m3
);
1779 return_low128(o
->out2
);
1783 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1785 int r2
= get_field(s
->fields
, r2
);
1786 TCGv_i64 len
= tcg_temp_new_i64();
1788 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1790 return_low128(o
->out
);
1792 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1793 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1794 tcg_temp_free_i64(len
);
1799 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1801 int l
= get_field(s
->fields
, l1
);
1806 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1807 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1810 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1811 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1814 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1815 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1818 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1819 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1822 vl
= tcg_const_i32(l
);
1823 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1824 tcg_temp_free_i32(vl
);
1828 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1832 static ExitStatus
op_clcl(DisasContext
*s
, DisasOps
*o
)
1834 int r1
= get_field(s
->fields
, r1
);
1835 int r2
= get_field(s
->fields
, r2
);
1838 /* r1 and r2 must be even. */
1839 if (r1
& 1 || r2
& 1) {
1840 gen_program_exception(s
, PGM_SPECIFICATION
);
1841 return EXIT_NORETURN
;
1844 t1
= tcg_const_i32(r1
);
1845 t2
= tcg_const_i32(r2
);
1846 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1847 tcg_temp_free_i32(t1
);
1848 tcg_temp_free_i32(t2
);
1853 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1855 int r1
= get_field(s
->fields
, r1
);
1856 int r3
= get_field(s
->fields
, r3
);
1859 /* r1 and r3 must be even. */
1860 if (r1
& 1 || r3
& 1) {
1861 gen_program_exception(s
, PGM_SPECIFICATION
);
1862 return EXIT_NORETURN
;
1865 t1
= tcg_const_i32(r1
);
1866 t3
= tcg_const_i32(r3
);
1867 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1868 tcg_temp_free_i32(t1
);
1869 tcg_temp_free_i32(t3
);
1874 static ExitStatus
op_clclu(DisasContext
*s
, DisasOps
*o
)
1876 int r1
= get_field(s
->fields
, r1
);
1877 int r3
= get_field(s
->fields
, r3
);
1880 /* r1 and r3 must be even. */
1881 if (r1
& 1 || r3
& 1) {
1882 gen_program_exception(s
, PGM_SPECIFICATION
);
1883 return EXIT_NORETURN
;
1886 t1
= tcg_const_i32(r1
);
1887 t3
= tcg_const_i32(r3
);
1888 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1889 tcg_temp_free_i32(t1
);
1890 tcg_temp_free_i32(t3
);
1895 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1897 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1898 TCGv_i32 t1
= tcg_temp_new_i32();
1899 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1900 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1902 tcg_temp_free_i32(t1
);
1903 tcg_temp_free_i32(m3
);
1907 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1909 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1911 return_low128(o
->in2
);
1915 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1917 TCGv_i64 t
= tcg_temp_new_i64();
1918 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1919 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1920 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1921 tcg_temp_free_i64(t
);
1925 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1927 int d2
= get_field(s
->fields
, d2
);
1928 int b2
= get_field(s
->fields
, b2
);
1931 /* Note that in1 = R3 (new value) and
1932 in2 = (zero-extended) R1 (expected value). */
1934 addr
= get_address(s
, 0, b2
, d2
);
1935 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1936 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1937 tcg_temp_free_i64(addr
);
1939 /* Are the memory and expected values (un)equal? Note that this setcond
1940 produces the output CC value, thus the NE sense of the test. */
1941 cc
= tcg_temp_new_i64();
1942 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1943 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1944 tcg_temp_free_i64(cc
);
1950 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1952 int r1
= get_field(s
->fields
, r1
);
1953 int r3
= get_field(s
->fields
, r3
);
1954 int d2
= get_field(s
->fields
, d2
);
1955 int b2
= get_field(s
->fields
, b2
);
1957 TCGv_i32 t_r1
, t_r3
;
1959 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1960 addr
= get_address(s
, 0, b2
, d2
);
1961 t_r1
= tcg_const_i32(r1
);
1962 t_r3
= tcg_const_i32(r3
);
1963 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
1964 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
1966 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
1968 tcg_temp_free_i64(addr
);
1969 tcg_temp_free_i32(t_r1
);
1970 tcg_temp_free_i32(t_r3
);
1976 static ExitStatus
op_csst(DisasContext
*s
, DisasOps
*o
)
1978 int r3
= get_field(s
->fields
, r3
);
1979 TCGv_i32 t_r3
= tcg_const_i32(r3
);
1981 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
1982 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
1984 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
1986 tcg_temp_free_i32(t_r3
);
1992 #ifndef CONFIG_USER_ONLY
1993 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1995 TCGMemOp mop
= s
->insn
->data
;
1996 TCGv_i64 addr
, old
, cc
;
1997 TCGLabel
*lab
= gen_new_label();
1999 /* Note that in1 = R1 (zero-extended expected value),
2000 out = R1 (original reg), out2 = R1+1 (new value). */
2002 check_privileged(s
);
2003 addr
= tcg_temp_new_i64();
2004 old
= tcg_temp_new_i64();
2005 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2006 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2007 get_mem_index(s
), mop
| MO_ALIGN
);
2008 tcg_temp_free_i64(addr
);
2010 /* Are the memory and expected values (un)equal? */
2011 cc
= tcg_temp_new_i64();
2012 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2013 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2015 /* Write back the output now, so that it happens before the
2016 following branch, so that we don't need local temps. */
2017 if ((mop
& MO_SIZE
) == MO_32
) {
2018 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2020 tcg_gen_mov_i64(o
->out
, old
);
2022 tcg_temp_free_i64(old
);
2024 /* If the comparison was equal, and the LSB of R2 was set,
2025 then we need to flush the TLB (for all cpus). */
2026 tcg_gen_xori_i64(cc
, cc
, 1);
2027 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2028 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2029 tcg_temp_free_i64(cc
);
2031 gen_helper_purge(cpu_env
);
2038 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2040 TCGv_i64 t1
= tcg_temp_new_i64();
2041 TCGv_i32 t2
= tcg_temp_new_i32();
2042 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2043 gen_helper_cvd(t1
, t2
);
2044 tcg_temp_free_i32(t2
);
2045 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2046 tcg_temp_free_i64(t1
);
2050 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2052 int m3
= get_field(s
->fields
, m3
);
2053 TCGLabel
*lab
= gen_new_label();
2056 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2057 if (s
->insn
->data
) {
2058 c
= tcg_unsigned_cond(c
);
2060 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2069 static ExitStatus
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2071 int m3
= get_field(s
->fields
, m3
);
2072 int r1
= get_field(s
->fields
, r1
);
2073 int r2
= get_field(s
->fields
, r2
);
2074 TCGv_i32 tr1
, tr2
, chk
;
2076 /* R1 and R2 must both be even. */
2077 if ((r1
| r2
) & 1) {
2078 gen_program_exception(s
, PGM_SPECIFICATION
);
2079 return EXIT_NORETURN
;
2081 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2085 tr1
= tcg_const_i32(r1
);
2086 tr2
= tcg_const_i32(r2
);
2087 chk
= tcg_const_i32(m3
);
2089 switch (s
->insn
->data
) {
2091 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2094 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2097 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2100 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2103 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2106 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2109 g_assert_not_reached();
2112 tcg_temp_free_i32(tr1
);
2113 tcg_temp_free_i32(tr2
);
2114 tcg_temp_free_i32(chk
);
2119 #ifndef CONFIG_USER_ONLY
2120 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2122 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2123 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2124 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2126 check_privileged(s
);
2130 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2132 tcg_temp_free_i32(func_code
);
2133 tcg_temp_free_i32(r3
);
2134 tcg_temp_free_i32(r1
);
2139 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2141 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2142 return_low128(o
->out
);
2146 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2148 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2149 return_low128(o
->out
);
2153 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2155 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2156 return_low128(o
->out
);
2160 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2162 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2163 return_low128(o
->out
);
2167 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2169 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2173 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2175 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2179 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2181 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2182 return_low128(o
->out2
);
2186 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2188 int r2
= get_field(s
->fields
, r2
);
2189 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2193 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2195 /* No cache information provided. */
2196 tcg_gen_movi_i64(o
->out
, -1);
2200 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2202 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2206 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2208 int r1
= get_field(s
->fields
, r1
);
2209 int r2
= get_field(s
->fields
, r2
);
2210 TCGv_i64 t
= tcg_temp_new_i64();
2212 /* Note the "subsequently" in the PoO, which implies a defined result
2213 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2214 tcg_gen_shri_i64(t
, psw_mask
, 32);
2215 store_reg32_i64(r1
, t
);
2217 store_reg32_i64(r2
, psw_mask
);
2220 tcg_temp_free_i64(t
);
2224 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2226 int r1
= get_field(s
->fields
, r1
);
2230 /* Nested EXECUTE is not allowed. */
2231 if (unlikely(s
->ex_value
)) {
2232 gen_program_exception(s
, PGM_EXECUTE
);
2233 return EXIT_NORETURN
;
2240 v1
= tcg_const_i64(0);
2245 ilen
= tcg_const_i32(s
->ilen
);
2246 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2247 tcg_temp_free_i32(ilen
);
2250 tcg_temp_free_i64(v1
);
2253 return EXIT_PC_CC_UPDATED
;
2256 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2258 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2259 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2260 tcg_temp_free_i32(m3
);
2264 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2266 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2267 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2268 tcg_temp_free_i32(m3
);
2272 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2274 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2275 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2276 return_low128(o
->out2
);
2277 tcg_temp_free_i32(m3
);
2281 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2283 /* We'll use the original input for cc computation, since we get to
2284 compare that against 0, which ought to be better than comparing
2285 the real output against 64. It also lets cc_dst be a convenient
2286 temporary during our computation. */
2287 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2289 /* R1 = IN ? CLZ(IN) : 64. */
2290 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2292 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2293 value by 64, which is undefined. But since the shift is 64 iff the
2294 input is zero, we still get the correct result after and'ing. */
2295 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2296 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2297 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2301 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2303 int m3
= get_field(s
->fields
, m3
);
2304 int pos
, len
, base
= s
->insn
->data
;
2305 TCGv_i64 tmp
= tcg_temp_new_i64();
2310 /* Effectively a 32-bit load. */
2311 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2318 /* Effectively a 16-bit load. */
2319 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2327 /* Effectively an 8-bit load. */
2328 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2333 pos
= base
+ ctz32(m3
) * 8;
2334 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2335 ccm
= ((1ull << len
) - 1) << pos
;
2339 /* This is going to be a sequence of loads and inserts. */
2340 pos
= base
+ 32 - 8;
2344 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2345 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2346 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2349 m3
= (m3
<< 1) & 0xf;
2355 tcg_gen_movi_i64(tmp
, ccm
);
2356 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2357 tcg_temp_free_i64(tmp
);
2361 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2363 int shift
= s
->insn
->data
& 0xff;
2364 int size
= s
->insn
->data
>> 8;
2365 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2369 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2374 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2376 t1
= tcg_temp_new_i64();
2377 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2378 tcg_gen_shri_i64(t1
, t1
, 36);
2379 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2381 tcg_gen_extu_i32_i64(t1
, cc_op
);
2382 tcg_gen_shli_i64(t1
, t1
, 28);
2383 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2384 tcg_temp_free_i64(t1
);
2388 #ifndef CONFIG_USER_ONLY
2389 static ExitStatus
op_idte(DisasContext
*s
, DisasOps
*o
)
2393 check_privileged(s
);
2394 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2395 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2397 m4
= tcg_const_i32(0);
2399 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2400 tcg_temp_free_i32(m4
);
2404 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2408 check_privileged(s
);
2409 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2410 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2412 m4
= tcg_const_i32(0);
2414 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2415 tcg_temp_free_i32(m4
);
2419 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2421 check_privileged(s
);
2422 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2427 static ExitStatus
op_msa(DisasContext
*s
, DisasOps
*o
)
2429 int r1
= have_field(s
->fields
, r1
) ? get_field(s
->fields
, r1
) : 0;
2430 int r2
= have_field(s
->fields
, r2
) ? get_field(s
->fields
, r2
) : 0;
2431 int r3
= have_field(s
->fields
, r3
) ? get_field(s
->fields
, r3
) : 0;
2432 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2434 switch (s
->insn
->data
) {
2435 case S390_FEAT_TYPE_KMCTR
:
2436 if (r3
& 1 || !r3
) {
2437 gen_program_exception(s
, PGM_SPECIFICATION
);
2438 return EXIT_NORETURN
;
2441 case S390_FEAT_TYPE_PPNO
:
2442 case S390_FEAT_TYPE_KMF
:
2443 case S390_FEAT_TYPE_KMC
:
2444 case S390_FEAT_TYPE_KMO
:
2445 case S390_FEAT_TYPE_KM
:
2446 if (r1
& 1 || !r1
) {
2447 gen_program_exception(s
, PGM_SPECIFICATION
);
2448 return EXIT_NORETURN
;
2451 case S390_FEAT_TYPE_KMAC
:
2452 case S390_FEAT_TYPE_KIMD
:
2453 case S390_FEAT_TYPE_KLMD
:
2454 if (r2
& 1 || !r2
) {
2455 gen_program_exception(s
, PGM_SPECIFICATION
);
2456 return EXIT_NORETURN
;
2459 case S390_FEAT_TYPE_PCKMO
:
2460 case S390_FEAT_TYPE_PCC
:
2463 g_assert_not_reached();
2466 t_r1
= tcg_const_i32(r1
);
2467 t_r2
= tcg_const_i32(r2
);
2468 t_r3
= tcg_const_i32(r3
);
2469 type
= tcg_const_i32(s
->insn
->data
);
2470 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2472 tcg_temp_free_i32(t_r1
);
2473 tcg_temp_free_i32(t_r2
);
2474 tcg_temp_free_i32(t_r3
);
2475 tcg_temp_free_i32(type
);
2479 static ExitStatus
op_keb(DisasContext
*s
, DisasOps
*o
)
2481 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2486 static ExitStatus
op_kdb(DisasContext
*s
, DisasOps
*o
)
2488 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2493 static ExitStatus
op_kxb(DisasContext
*s
, DisasOps
*o
)
2495 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2500 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2502 /* The real output is indeed the original value in memory;
2503 recompute the addition for the computation of CC. */
2504 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2505 s
->insn
->data
| MO_ALIGN
);
2506 /* However, we need to recompute the addition for setting CC. */
2507 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2511 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2513 /* The real output is indeed the original value in memory;
2514 recompute the addition for the computation of CC. */
2515 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2516 s
->insn
->data
| MO_ALIGN
);
2517 /* However, we need to recompute the operation for setting CC. */
2518 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2522 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2524 /* The real output is indeed the original value in memory;
2525 recompute the addition for the computation of CC. */
2526 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2527 s
->insn
->data
| MO_ALIGN
);
2528 /* However, we need to recompute the operation for setting CC. */
2529 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2533 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2535 /* The real output is indeed the original value in memory;
2536 recompute the addition for the computation of CC. */
2537 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2538 s
->insn
->data
| MO_ALIGN
);
2539 /* However, we need to recompute the operation for setting CC. */
2540 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2544 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2546 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2550 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2552 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2556 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2558 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2562 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2564 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2568 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2570 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2571 return_low128(o
->out2
);
2575 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2577 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2578 return_low128(o
->out2
);
2582 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2584 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2588 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2590 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2594 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2596 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2600 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2602 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2606 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2608 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2612 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2614 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2618 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2620 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2624 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2626 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2630 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2632 TCGLabel
*lab
= gen_new_label();
2633 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2634 /* The value is stored even in case of trap. */
2635 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2641 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2643 TCGLabel
*lab
= gen_new_label();
2644 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2645 /* The value is stored even in case of trap. */
2646 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2652 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2654 TCGLabel
*lab
= gen_new_label();
2655 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2656 /* The value is stored even in case of trap. */
2657 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2663 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2665 TCGLabel
*lab
= gen_new_label();
2666 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2667 /* The value is stored even in case of trap. */
2668 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2674 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2676 TCGLabel
*lab
= gen_new_label();
2677 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2678 /* The value is stored even in case of trap. */
2679 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2685 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2689 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2692 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2696 TCGv_i32 t32
= tcg_temp_new_i32();
2699 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2702 t
= tcg_temp_new_i64();
2703 tcg_gen_extu_i32_i64(t
, t32
);
2704 tcg_temp_free_i32(t32
);
2706 z
= tcg_const_i64(0);
2707 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2708 tcg_temp_free_i64(t
);
2709 tcg_temp_free_i64(z
);
2715 #ifndef CONFIG_USER_ONLY
2716 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2718 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2719 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2720 check_privileged(s
);
2721 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2722 tcg_temp_free_i32(r1
);
2723 tcg_temp_free_i32(r3
);
2724 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2725 return EXIT_PC_STALE_NOCHAIN
;
2728 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2730 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2731 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2732 check_privileged(s
);
2733 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2734 tcg_temp_free_i32(r1
);
2735 tcg_temp_free_i32(r3
);
2736 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2737 return EXIT_PC_STALE_NOCHAIN
;
2740 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2742 check_privileged(s
);
2743 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2748 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2750 check_privileged(s
);
2752 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2756 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2760 check_privileged(s
);
2761 per_breaking_event(s
);
2763 t1
= tcg_temp_new_i64();
2764 t2
= tcg_temp_new_i64();
2765 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2766 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2767 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2768 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2769 tcg_gen_shli_i64(t1
, t1
, 32);
2770 gen_helper_load_psw(cpu_env
, t1
, t2
);
2771 tcg_temp_free_i64(t1
);
2772 tcg_temp_free_i64(t2
);
2773 return EXIT_NORETURN
;
2776 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2780 check_privileged(s
);
2781 per_breaking_event(s
);
2783 t1
= tcg_temp_new_i64();
2784 t2
= tcg_temp_new_i64();
2785 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2786 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2787 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2788 gen_helper_load_psw(cpu_env
, t1
, t2
);
2789 tcg_temp_free_i64(t1
);
2790 tcg_temp_free_i64(t2
);
2791 return EXIT_NORETURN
;
2795 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2797 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2798 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2799 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2800 tcg_temp_free_i32(r1
);
2801 tcg_temp_free_i32(r3
);
2805 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2807 int r1
= get_field(s
->fields
, r1
);
2808 int r3
= get_field(s
->fields
, r3
);
2811 /* Only one register to read. */
2812 t1
= tcg_temp_new_i64();
2813 if (unlikely(r1
== r3
)) {
2814 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2815 store_reg32_i64(r1
, t1
);
2820 /* First load the values of the first and last registers to trigger
2821 possible page faults. */
2822 t2
= tcg_temp_new_i64();
2823 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2824 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2825 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2826 store_reg32_i64(r1
, t1
);
2827 store_reg32_i64(r3
, t2
);
2829 /* Only two registers to read. */
2830 if (((r1
+ 1) & 15) == r3
) {
2836 /* Then load the remaining registers. Page fault can't occur. */
2838 tcg_gen_movi_i64(t2
, 4);
2841 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2842 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2843 store_reg32_i64(r1
, t1
);
2851 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2853 int r1
= get_field(s
->fields
, r1
);
2854 int r3
= get_field(s
->fields
, r3
);
2857 /* Only one register to read. */
2858 t1
= tcg_temp_new_i64();
2859 if (unlikely(r1
== r3
)) {
2860 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2861 store_reg32h_i64(r1
, t1
);
2866 /* First load the values of the first and last registers to trigger
2867 possible page faults. */
2868 t2
= tcg_temp_new_i64();
2869 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2870 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2871 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2872 store_reg32h_i64(r1
, t1
);
2873 store_reg32h_i64(r3
, t2
);
2875 /* Only two registers to read. */
2876 if (((r1
+ 1) & 15) == r3
) {
2882 /* Then load the remaining registers. Page fault can't occur. */
2884 tcg_gen_movi_i64(t2
, 4);
2887 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2888 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2889 store_reg32h_i64(r1
, t1
);
2897 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2899 int r1
= get_field(s
->fields
, r1
);
2900 int r3
= get_field(s
->fields
, r3
);
2903 /* Only one register to read. */
2904 if (unlikely(r1
== r3
)) {
2905 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2909 /* First load the values of the first and last registers to trigger
2910 possible page faults. */
2911 t1
= tcg_temp_new_i64();
2912 t2
= tcg_temp_new_i64();
2913 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2914 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2915 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2916 tcg_gen_mov_i64(regs
[r1
], t1
);
2919 /* Only two registers to read. */
2920 if (((r1
+ 1) & 15) == r3
) {
2925 /* Then load the remaining registers. Page fault can't occur. */
2927 tcg_gen_movi_i64(t1
, 8);
2930 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2931 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2938 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2941 TCGMemOp mop
= s
->insn
->data
;
2943 /* In a parallel context, stop the world and single step. */
2944 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
2945 potential_page_fault(s
);
2946 gen_exception(EXCP_ATOMIC
);
2947 return EXIT_NORETURN
;
2950 /* In a serial context, perform the two loads ... */
2951 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2952 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2953 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2954 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2955 tcg_temp_free_i64(a1
);
2956 tcg_temp_free_i64(a2
);
2958 /* ... and indicate that we performed them while interlocked. */
2959 gen_op_movi_cc(s
, 0);
2963 static ExitStatus
op_lpq(DisasContext
*s
, DisasOps
*o
)
2965 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
2966 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
2968 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
2970 return_low128(o
->out2
);
2974 #ifndef CONFIG_USER_ONLY
2975 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2977 check_privileged(s
);
2978 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2982 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2984 check_privileged(s
);
2985 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2990 static ExitStatus
op_lzrb(DisasContext
*s
, DisasOps
*o
)
2992 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
2996 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2999 o
->g_out
= o
->g_in2
;
3000 TCGV_UNUSED_I64(o
->in2
);
3005 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3007 int b2
= get_field(s
->fields
, b2
);
3008 TCGv ar1
= tcg_temp_new_i64();
3011 o
->g_out
= o
->g_in2
;
3012 TCGV_UNUSED_I64(o
->in2
);
3015 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
3016 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3017 tcg_gen_movi_i64(ar1
, 0);
3019 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3020 tcg_gen_movi_i64(ar1
, 1);
3022 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3024 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3026 tcg_gen_movi_i64(ar1
, 0);
3029 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3030 tcg_gen_movi_i64(ar1
, 2);
3034 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3035 tcg_temp_free_i64(ar1
);
3040 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
3044 o
->g_out
= o
->g_in1
;
3045 o
->g_out2
= o
->g_in2
;
3046 TCGV_UNUSED_I64(o
->in1
);
3047 TCGV_UNUSED_I64(o
->in2
);
3048 o
->g_in1
= o
->g_in2
= false;
3052 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
3054 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3055 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3056 tcg_temp_free_i32(l
);
3060 static ExitStatus
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3062 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3063 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3064 tcg_temp_free_i32(l
);
3068 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3070 int r1
= get_field(s
->fields
, r1
);
3071 int r2
= get_field(s
->fields
, r2
);
3074 /* r1 and r2 must be even. */
3075 if (r1
& 1 || r2
& 1) {
3076 gen_program_exception(s
, PGM_SPECIFICATION
);
3077 return EXIT_NORETURN
;
3080 t1
= tcg_const_i32(r1
);
3081 t2
= tcg_const_i32(r2
);
3082 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3083 tcg_temp_free_i32(t1
);
3084 tcg_temp_free_i32(t2
);
3089 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3091 int r1
= get_field(s
->fields
, r1
);
3092 int r3
= get_field(s
->fields
, r3
);
3095 /* r1 and r3 must be even. */
3096 if (r1
& 1 || r3
& 1) {
3097 gen_program_exception(s
, PGM_SPECIFICATION
);
3098 return EXIT_NORETURN
;
3101 t1
= tcg_const_i32(r1
);
3102 t3
= tcg_const_i32(r3
);
3103 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3104 tcg_temp_free_i32(t1
);
3105 tcg_temp_free_i32(t3
);
3110 static ExitStatus
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3112 int r1
= get_field(s
->fields
, r1
);
3113 int r3
= get_field(s
->fields
, r3
);
3116 /* r1 and r3 must be even. */
3117 if (r1
& 1 || r3
& 1) {
3118 gen_program_exception(s
, PGM_SPECIFICATION
);
3119 return EXIT_NORETURN
;
3122 t1
= tcg_const_i32(r1
);
3123 t3
= tcg_const_i32(r3
);
3124 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3125 tcg_temp_free_i32(t1
);
3126 tcg_temp_free_i32(t3
);
3131 static ExitStatus
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3133 int r3
= get_field(s
->fields
, r3
);
3134 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3139 #ifndef CONFIG_USER_ONLY
3140 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3142 int r1
= get_field(s
->fields
, l1
);
3143 check_privileged(s
);
3144 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3149 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3151 int r1
= get_field(s
->fields
, l1
);
3152 check_privileged(s
);
3153 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3159 static ExitStatus
op_mvn(DisasContext
*s
, DisasOps
*o
)
3161 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3162 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3163 tcg_temp_free_i32(l
);
3167 static ExitStatus
op_mvo(DisasContext
*s
, DisasOps
*o
)
3169 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3170 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3171 tcg_temp_free_i32(l
);
3175 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3177 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3182 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
3184 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3186 return_low128(o
->in2
);
3190 static ExitStatus
op_mvz(DisasContext
*s
, DisasOps
*o
)
3192 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3193 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3194 tcg_temp_free_i32(l
);
3198 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3200 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3204 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3206 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3210 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
3212 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3216 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3218 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3222 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
3224 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3228 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
3230 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3231 return_low128(o
->out2
);
3235 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3237 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3238 return_low128(o
->out2
);
3242 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
3244 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3245 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3246 tcg_temp_free_i64(r3
);
3250 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
3252 int r3
= get_field(s
->fields
, r3
);
3253 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3257 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
3259 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3260 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3261 tcg_temp_free_i64(r3
);
3265 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3267 int r3
= get_field(s
->fields
, r3
);
3268 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3272 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3275 z
= tcg_const_i64(0);
3276 n
= tcg_temp_new_i64();
3277 tcg_gen_neg_i64(n
, o
->in2
);
3278 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3279 tcg_temp_free_i64(n
);
3280 tcg_temp_free_i64(z
);
3284 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3286 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3290 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3292 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3296 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3298 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3299 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3303 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3305 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3306 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3307 tcg_temp_free_i32(l
);
3312 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3314 tcg_gen_neg_i64(o
->out
, o
->in2
);
3318 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3320 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3324 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3326 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3330 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3332 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3333 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3337 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3339 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3340 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3341 tcg_temp_free_i32(l
);
3346 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3348 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3352 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3354 int shift
= s
->insn
->data
& 0xff;
3355 int size
= s
->insn
->data
>> 8;
3356 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3359 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3360 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3362 /* Produce the CC from only the bits manipulated. */
3363 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3364 set_cc_nz_u64(s
, cc_dst
);
3368 static ExitStatus
op_pack(DisasContext
*s
, DisasOps
*o
)
3370 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3371 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3372 tcg_temp_free_i32(l
);
3376 static ExitStatus
op_pka(DisasContext
*s
, DisasOps
*o
)
3378 int l2
= get_field(s
->fields
, l2
) + 1;
3381 /* The length must not exceed 32 bytes. */
3383 gen_program_exception(s
, PGM_SPECIFICATION
);
3384 return EXIT_NORETURN
;
3386 l
= tcg_const_i32(l2
);
3387 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3388 tcg_temp_free_i32(l
);
3392 static ExitStatus
op_pku(DisasContext
*s
, DisasOps
*o
)
3394 int l2
= get_field(s
->fields
, l2
) + 1;
3397 /* The length must be even and should not exceed 64 bytes. */
3398 if ((l2
& 1) || (l2
> 64)) {
3399 gen_program_exception(s
, PGM_SPECIFICATION
);
3400 return EXIT_NORETURN
;
3402 l
= tcg_const_i32(l2
);
3403 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3404 tcg_temp_free_i32(l
);
3408 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3410 gen_helper_popcnt(o
->out
, o
->in2
);
3414 #ifndef CONFIG_USER_ONLY
3415 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3417 check_privileged(s
);
3418 gen_helper_ptlb(cpu_env
);
3423 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3425 int i3
= get_field(s
->fields
, i3
);
3426 int i4
= get_field(s
->fields
, i4
);
3427 int i5
= get_field(s
->fields
, i5
);
3428 int do_zero
= i4
& 0x80;
3429 uint64_t mask
, imask
, pmask
;
3432 /* Adjust the arguments for the specific insn. */
3433 switch (s
->fields
->op2
) {
3434 case 0x55: /* risbg */
3435 case 0x59: /* risbgn */
3440 case 0x5d: /* risbhg */
3443 pmask
= 0xffffffff00000000ull
;
3445 case 0x51: /* risblg */
3448 pmask
= 0x00000000ffffffffull
;
3451 g_assert_not_reached();
3454 /* MASK is the set of bits to be inserted from R2.
3455 Take care for I3/I4 wraparound. */
3458 mask
^= pmask
>> i4
>> 1;
3460 mask
|= ~(pmask
>> i4
>> 1);
3464 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3465 insns, we need to keep the other half of the register. */
3466 imask
= ~mask
| ~pmask
;
3474 if (s
->fields
->op2
== 0x5d) {
3478 /* In some cases we can implement this with extract. */
3479 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3480 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3484 /* In some cases we can implement this with deposit. */
3485 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3486 /* Note that we rotate the bits to be inserted to the lsb, not to
3487 the position as described in the PoO. */
3488 rot
= (rot
- pos
) & 63;
3493 /* Rotate the input as necessary. */
3494 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3496 /* Insert the selected bits into the output. */
3499 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3501 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3503 } else if (imask
== 0) {
3504 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3506 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3507 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3508 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3513 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3515 int i3
= get_field(s
->fields
, i3
);
3516 int i4
= get_field(s
->fields
, i4
);
3517 int i5
= get_field(s
->fields
, i5
);
3520 /* If this is a test-only form, arrange to discard the result. */
3522 o
->out
= tcg_temp_new_i64();
3530 /* MASK is the set of bits to be operated on from R2.
3531 Take care for I3/I4 wraparound. */
3534 mask
^= ~0ull >> i4
>> 1;
3536 mask
|= ~(~0ull >> i4
>> 1);
3539 /* Rotate the input as necessary. */
3540 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3543 switch (s
->fields
->op2
) {
3544 case 0x55: /* AND */
3545 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3546 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3549 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3550 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3552 case 0x57: /* XOR */
3553 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3554 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3561 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3562 set_cc_nz_u64(s
, cc_dst
);
3566 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3568 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3572 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3574 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3578 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3580 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3584 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3586 TCGv_i32 t1
= tcg_temp_new_i32();
3587 TCGv_i32 t2
= tcg_temp_new_i32();
3588 TCGv_i32 to
= tcg_temp_new_i32();
3589 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3590 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3591 tcg_gen_rotl_i32(to
, t1
, t2
);
3592 tcg_gen_extu_i32_i64(o
->out
, to
);
3593 tcg_temp_free_i32(t1
);
3594 tcg_temp_free_i32(t2
);
3595 tcg_temp_free_i32(to
);
3599 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3601 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3605 #ifndef CONFIG_USER_ONLY
3606 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3608 check_privileged(s
);
3609 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3614 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3616 check_privileged(s
);
3617 gen_helper_sacf(cpu_env
, o
->in2
);
3618 /* Addressing mode has changed, so end the block. */
3619 return EXIT_PC_STALE
;
3623 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3625 int sam
= s
->insn
->data
;
3641 /* Bizarre but true, we check the address of the current insn for the
3642 specification exception, not the next to be executed. Thus the PoO
3643 documents that Bad Things Happen two bytes before the end. */
3644 if (s
->pc
& ~mask
) {
3645 gen_program_exception(s
, PGM_SPECIFICATION
);
3646 return EXIT_NORETURN
;
3650 tsam
= tcg_const_i64(sam
);
3651 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3652 tcg_temp_free_i64(tsam
);
3654 /* Always exit the TB, since we (may have) changed execution mode. */
3655 return EXIT_PC_STALE
;
3658 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3660 int r1
= get_field(s
->fields
, r1
);
3661 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3665 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3667 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3671 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3673 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3677 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3679 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3680 return_low128(o
->out2
);
3684 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3686 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3690 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3692 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3696 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3698 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3699 return_low128(o
->out2
);
3703 #ifndef CONFIG_USER_ONLY
3704 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3706 check_privileged(s
);
3707 potential_page_fault(s
);
3708 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3713 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3715 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3716 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3717 check_privileged(s
);
3718 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
3720 tcg_temp_free_i32(r1
);
3721 tcg_temp_free_i32(r3
);
3726 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3733 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3735 /* We want to store when the condition is fulfilled, so branch
3736 out when it's not */
3737 c
.cond
= tcg_invert_cond(c
.cond
);
3739 lab
= gen_new_label();
3741 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3743 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3747 r1
= get_field(s
->fields
, r1
);
3748 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3749 switch (s
->insn
->data
) {
3751 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3754 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3756 case 2: /* STOCFH */
3757 h
= tcg_temp_new_i64();
3758 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3759 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3760 tcg_temp_free_i64(h
);
3763 g_assert_not_reached();
3765 tcg_temp_free_i64(a
);
3771 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3773 uint64_t sign
= 1ull << s
->insn
->data
;
3774 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3775 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3776 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3777 /* The arithmetic left shift is curious in that it does not affect
3778 the sign bit. Copy that over from the source unchanged. */
3779 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3780 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3781 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3785 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3787 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3791 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3793 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3797 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3799 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3803 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3805 gen_helper_sfpc(cpu_env
, o
->in2
);
3809 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3811 gen_helper_sfas(cpu_env
, o
->in2
);
3815 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3817 int b2
= get_field(s
->fields
, b2
);
3818 int d2
= get_field(s
->fields
, d2
);
3819 TCGv_i64 t1
= tcg_temp_new_i64();
3820 TCGv_i64 t2
= tcg_temp_new_i64();
3823 switch (s
->fields
->op2
) {
3824 case 0x99: /* SRNM */
3827 case 0xb8: /* SRNMB */
3830 case 0xb9: /* SRNMT */
3836 mask
= (1 << len
) - 1;
3838 /* Insert the value into the appropriate field of the FPC. */
3840 tcg_gen_movi_i64(t1
, d2
& mask
);
3842 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3843 tcg_gen_andi_i64(t1
, t1
, mask
);
3845 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3846 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3847 tcg_temp_free_i64(t1
);
3849 /* Then install the new FPC to set the rounding mode in fpu_status. */
3850 gen_helper_sfpc(cpu_env
, t2
);
3851 tcg_temp_free_i64(t2
);
3855 static ExitStatus
op_spm(DisasContext
*s
, DisasOps
*o
)
3857 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
3858 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
3861 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
3862 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
3866 #ifndef CONFIG_USER_ONLY
3867 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3869 check_privileged(s
);
3870 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3871 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3875 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3877 check_privileged(s
);
3878 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3882 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3884 check_privileged(s
);
3885 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3886 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3887 return EXIT_PC_STALE_NOCHAIN
;
3890 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3892 check_privileged(s
);
3893 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
3897 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3899 gen_helper_stck(o
->out
, cpu_env
);
3900 /* ??? We don't implement clock states. */
3901 gen_op_movi_cc(s
, 0);
3905 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3907 TCGv_i64 c1
= tcg_temp_new_i64();
3908 TCGv_i64 c2
= tcg_temp_new_i64();
3909 gen_helper_stck(c1
, cpu_env
);
3910 /* Shift the 64-bit value into its place as a zero-extended
3911 104-bit value. Note that "bit positions 64-103 are always
3912 non-zero so that they compare differently to STCK"; we set
3913 the least significant bit to 1. */
3914 tcg_gen_shli_i64(c2
, c1
, 56);
3915 tcg_gen_shri_i64(c1
, c1
, 8);
3916 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3917 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3918 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3919 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3920 tcg_temp_free_i64(c1
);
3921 tcg_temp_free_i64(c2
);
3922 /* ??? We don't implement clock states. */
3923 gen_op_movi_cc(s
, 0);
3927 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3929 check_privileged(s
);
3930 gen_helper_sckc(cpu_env
, o
->in2
);
3934 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3936 check_privileged(s
);
3937 gen_helper_stckc(o
->out
, cpu_env
);
3941 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3943 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3944 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3945 check_privileged(s
);
3946 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3947 tcg_temp_free_i32(r1
);
3948 tcg_temp_free_i32(r3
);
3952 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3954 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3955 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3956 check_privileged(s
);
3957 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3958 tcg_temp_free_i32(r1
);
3959 tcg_temp_free_i32(r3
);
3963 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3965 check_privileged(s
);
3966 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
3967 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
3971 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3973 check_privileged(s
);
3974 gen_helper_spt(cpu_env
, o
->in2
);
3978 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3980 check_privileged(s
);
3981 gen_helper_stfl(cpu_env
);
3985 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3987 check_privileged(s
);
3988 gen_helper_stpt(o
->out
, cpu_env
);
3992 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3994 check_privileged(s
);
3995 potential_page_fault(s
);
3996 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4001 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
4003 check_privileged(s
);
4004 gen_helper_spx(cpu_env
, o
->in2
);
4008 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
4010 check_privileged(s
);
4011 potential_page_fault(s
);
4012 gen_helper_xsch(cpu_env
, regs
[1]);
4017 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
4019 check_privileged(s
);
4020 potential_page_fault(s
);
4021 gen_helper_csch(cpu_env
, regs
[1]);
4026 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
4028 check_privileged(s
);
4029 potential_page_fault(s
);
4030 gen_helper_hsch(cpu_env
, regs
[1]);
4035 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
4037 check_privileged(s
);
4038 potential_page_fault(s
);
4039 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4044 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
4046 check_privileged(s
);
4047 potential_page_fault(s
);
4048 gen_helper_rchp(cpu_env
, regs
[1]);
4053 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
4055 check_privileged(s
);
4056 potential_page_fault(s
);
4057 gen_helper_rsch(cpu_env
, regs
[1]);
4062 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
4064 check_privileged(s
);
4065 potential_page_fault(s
);
4066 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4071 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
4073 check_privileged(s
);
4074 potential_page_fault(s
);
4075 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4080 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
4082 check_privileged(s
);
4083 potential_page_fault(s
);
4084 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4089 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
4091 check_privileged(s
);
4092 potential_page_fault(s
);
4093 gen_helper_chsc(cpu_env
, o
->in2
);
4098 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
4100 check_privileged(s
);
4101 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4102 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4106 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4108 uint64_t i2
= get_field(s
->fields
, i2
);
4111 check_privileged(s
);
4113 /* It is important to do what the instruction name says: STORE THEN.
4114 If we let the output hook perform the store then if we fault and
4115 restart, we'll have the wrong SYSTEM MASK in place. */
4116 t
= tcg_temp_new_i64();
4117 tcg_gen_shri_i64(t
, psw_mask
, 56);
4118 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4119 tcg_temp_free_i64(t
);
4121 if (s
->fields
->op
== 0xac) {
4122 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4123 (i2
<< 56) | 0x00ffffffffffffffull
);
4125 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4128 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4129 return EXIT_PC_STALE_NOCHAIN
;
4132 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
4134 check_privileged(s
);
4135 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4139 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
4141 check_privileged(s
);
4142 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4147 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
4149 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4154 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
4156 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4160 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
4162 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4166 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
4168 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4172 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
4174 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4178 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
4180 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4181 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4182 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4183 tcg_temp_free_i32(r1
);
4184 tcg_temp_free_i32(r3
);
4188 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
4190 int m3
= get_field(s
->fields
, m3
);
4191 int pos
, base
= s
->insn
->data
;
4192 TCGv_i64 tmp
= tcg_temp_new_i64();
4194 pos
= base
+ ctz32(m3
) * 8;
4197 /* Effectively a 32-bit store. */
4198 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4199 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4205 /* Effectively a 16-bit store. */
4206 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4207 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4214 /* Effectively an 8-bit store. */
4215 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4216 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4220 /* This is going to be a sequence of shifts and stores. */
4221 pos
= base
+ 32 - 8;
4224 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4225 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4226 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4228 m3
= (m3
<< 1) & 0xf;
4233 tcg_temp_free_i64(tmp
);
4237 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
4239 int r1
= get_field(s
->fields
, r1
);
4240 int r3
= get_field(s
->fields
, r3
);
4241 int size
= s
->insn
->data
;
4242 TCGv_i64 tsize
= tcg_const_i64(size
);
4246 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4248 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4253 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4257 tcg_temp_free_i64(tsize
);
4261 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
4263 int r1
= get_field(s
->fields
, r1
);
4264 int r3
= get_field(s
->fields
, r3
);
4265 TCGv_i64 t
= tcg_temp_new_i64();
4266 TCGv_i64 t4
= tcg_const_i64(4);
4267 TCGv_i64 t32
= tcg_const_i64(32);
4270 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4271 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4275 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4279 tcg_temp_free_i64(t
);
4280 tcg_temp_free_i64(t4
);
4281 tcg_temp_free_i64(t32
);
4285 static ExitStatus
op_stpq(DisasContext
*s
, DisasOps
*o
)
4287 if (tb_cflags(s
->tb
) & CF_PARALLEL
) {
4288 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4290 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4295 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
4297 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4298 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4300 gen_helper_srst(cpu_env
, r1
, r2
);
4302 tcg_temp_free_i32(r1
);
4303 tcg_temp_free_i32(r2
);
4308 static ExitStatus
op_srstu(DisasContext
*s
, DisasOps
*o
)
4310 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4311 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4313 gen_helper_srstu(cpu_env
, r1
, r2
);
4315 tcg_temp_free_i32(r1
);
4316 tcg_temp_free_i32(r2
);
4321 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
4323 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4327 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
4332 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4334 /* The !borrow flag is the msb of CC. Since we want the inverse of
4335 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4336 disas_jcc(s
, &cmp
, 8 | 4);
4337 borrow
= tcg_temp_new_i64();
4339 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4341 TCGv_i32 t
= tcg_temp_new_i32();
4342 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4343 tcg_gen_extu_i32_i64(borrow
, t
);
4344 tcg_temp_free_i32(t
);
4348 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4349 tcg_temp_free_i64(borrow
);
4353 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4360 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4361 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4362 tcg_temp_free_i32(t
);
4364 t
= tcg_const_i32(s
->ilen
);
4365 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4366 tcg_temp_free_i32(t
);
4368 gen_exception(EXCP_SVC
);
4369 return EXIT_NORETURN
;
4372 static ExitStatus
op_tam(DisasContext
*s
, DisasOps
*o
)
4376 cc
|= (s
->tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4377 cc
|= (s
->tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4378 gen_op_movi_cc(s
, cc
);
4382 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4384 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4389 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4391 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4396 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4398 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4403 #ifndef CONFIG_USER_ONLY
4405 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4407 check_privileged(s
);
4408 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4413 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4415 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4422 static ExitStatus
op_tp(DisasContext
*s
, DisasOps
*o
)
4424 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4425 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4426 tcg_temp_free_i32(l1
);
4431 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4433 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4434 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4435 tcg_temp_free_i32(l
);
4440 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4442 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4443 return_low128(o
->out2
);
4448 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4450 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4451 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4452 tcg_temp_free_i32(l
);
4457 static ExitStatus
op_trtr(DisasContext
*s
, DisasOps
*o
)
4459 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4460 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4461 tcg_temp_free_i32(l
);
4466 static ExitStatus
op_trXX(DisasContext
*s
, DisasOps
*o
)
4468 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4469 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4470 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4471 TCGv_i32 tst
= tcg_temp_new_i32();
4472 int m3
= get_field(s
->fields
, m3
);
4474 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4478 tcg_gen_movi_i32(tst
, -1);
4480 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4481 if (s
->insn
->opc
& 3) {
4482 tcg_gen_ext8u_i32(tst
, tst
);
4484 tcg_gen_ext16u_i32(tst
, tst
);
4487 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4489 tcg_temp_free_i32(r1
);
4490 tcg_temp_free_i32(r2
);
4491 tcg_temp_free_i32(sizes
);
4492 tcg_temp_free_i32(tst
);
4497 static ExitStatus
op_ts(DisasContext
*s
, DisasOps
*o
)
4499 TCGv_i32 t1
= tcg_const_i32(0xff);
4500 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4501 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4502 tcg_temp_free_i32(t1
);
4507 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4509 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4510 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4511 tcg_temp_free_i32(l
);
4515 static ExitStatus
op_unpka(DisasContext
*s
, DisasOps
*o
)
4517 int l1
= get_field(s
->fields
, l1
) + 1;
4520 /* The length must not exceed 32 bytes. */
4522 gen_program_exception(s
, PGM_SPECIFICATION
);
4523 return EXIT_NORETURN
;
4525 l
= tcg_const_i32(l1
);
4526 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4527 tcg_temp_free_i32(l
);
4532 static ExitStatus
op_unpku(DisasContext
*s
, DisasOps
*o
)
4534 int l1
= get_field(s
->fields
, l1
) + 1;
4537 /* The length must be even and should not exceed 64 bytes. */
4538 if ((l1
& 1) || (l1
> 64)) {
4539 gen_program_exception(s
, PGM_SPECIFICATION
);
4540 return EXIT_NORETURN
;
4542 l
= tcg_const_i32(l1
);
4543 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4544 tcg_temp_free_i32(l
);
4550 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4552 int d1
= get_field(s
->fields
, d1
);
4553 int d2
= get_field(s
->fields
, d2
);
4554 int b1
= get_field(s
->fields
, b1
);
4555 int b2
= get_field(s
->fields
, b2
);
4556 int l
= get_field(s
->fields
, l1
);
4559 o
->addr1
= get_address(s
, 0, b1
, d1
);
4561 /* If the addresses are identical, this is a store/memset of zero. */
4562 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4563 o
->in2
= tcg_const_i64(0);
4567 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4570 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4574 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4577 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4581 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4584 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4588 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4590 gen_op_movi_cc(s
, 0);
4594 /* But in general we'll defer to a helper. */
4595 o
->in2
= get_address(s
, 0, b2
, d2
);
4596 t32
= tcg_const_i32(l
);
4597 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4598 tcg_temp_free_i32(t32
);
4603 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4605 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4609 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4611 int shift
= s
->insn
->data
& 0xff;
4612 int size
= s
->insn
->data
>> 8;
4613 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4616 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4617 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4619 /* Produce the CC from only the bits manipulated. */
4620 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4621 set_cc_nz_u64(s
, cc_dst
);
4625 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4627 o
->out
= tcg_const_i64(0);
4631 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4633 o
->out
= tcg_const_i64(0);
4639 /* ====================================================================== */
4640 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4641 the original inputs), update the various cc data structures in order to
4642 be able to compute the new condition code. */
4644 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4646 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4649 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4651 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4654 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4656 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4659 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4661 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4664 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4666 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4669 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4671 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4674 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4676 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4679 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4681 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4684 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4686 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4689 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4691 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4694 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4696 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4699 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4701 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4704 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4706 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4709 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4711 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4714 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4716 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4719 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4721 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4724 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4726 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4729 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4731 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4734 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4736 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4739 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4741 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4742 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4745 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4747 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4750 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4752 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4755 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4757 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4760 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4762 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4765 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4767 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4770 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4772 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4775 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4777 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4780 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4782 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4785 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4787 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4790 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4792 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4795 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4797 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4800 /* ====================================================================== */
4801 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4802 with the TCG register to which we will write. Used in combination with
4803 the "wout" generators, in some cases we need a new temporary, and in
4804 some cases we can write to a TCG global. */
4806 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4808 o
->out
= tcg_temp_new_i64();
4810 #define SPEC_prep_new 0
4812 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4814 o
->out
= tcg_temp_new_i64();
4815 o
->out2
= tcg_temp_new_i64();
4817 #define SPEC_prep_new_P 0
4819 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4821 o
->out
= regs
[get_field(f
, r1
)];
4824 #define SPEC_prep_r1 0
4826 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4828 int r1
= get_field(f
, r1
);
4830 o
->out2
= regs
[r1
+ 1];
4831 o
->g_out
= o
->g_out2
= true;
4833 #define SPEC_prep_r1_P SPEC_r1_even
4835 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4837 o
->out
= fregs
[get_field(f
, r1
)];
4840 #define SPEC_prep_f1 0
4842 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4844 int r1
= get_field(f
, r1
);
4846 o
->out2
= fregs
[r1
+ 2];
4847 o
->g_out
= o
->g_out2
= true;
4849 #define SPEC_prep_x1 SPEC_r1_f128
4851 /* ====================================================================== */
4852 /* The "Write OUTput" generators. These generally perform some non-trivial
4853 copy of data to TCG globals, or to main memory. The trivial cases are
4854 generally handled by having a "prep" generator install the TCG global
4855 as the destination of the operation. */
4857 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4859 store_reg(get_field(f
, r1
), o
->out
);
4861 #define SPEC_wout_r1 0
4863 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4865 int r1
= get_field(f
, r1
);
4866 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4868 #define SPEC_wout_r1_8 0
4870 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4872 int r1
= get_field(f
, r1
);
4873 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4875 #define SPEC_wout_r1_16 0
4877 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4879 store_reg32_i64(get_field(f
, r1
), o
->out
);
4881 #define SPEC_wout_r1_32 0
4883 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4885 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4887 #define SPEC_wout_r1_32h 0
4889 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4891 int r1
= get_field(f
, r1
);
4892 store_reg32_i64(r1
, o
->out
);
4893 store_reg32_i64(r1
+ 1, o
->out2
);
4895 #define SPEC_wout_r1_P32 SPEC_r1_even
4897 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4899 int r1
= get_field(f
, r1
);
4900 store_reg32_i64(r1
+ 1, o
->out
);
4901 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4902 store_reg32_i64(r1
, o
->out
);
4904 #define SPEC_wout_r1_D32 SPEC_r1_even
4906 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4908 int r3
= get_field(f
, r3
);
4909 store_reg32_i64(r3
, o
->out
);
4910 store_reg32_i64(r3
+ 1, o
->out2
);
4912 #define SPEC_wout_r3_P32 SPEC_r3_even
4914 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4916 int r3
= get_field(f
, r3
);
4917 store_reg(r3
, o
->out
);
4918 store_reg(r3
+ 1, o
->out2
);
4920 #define SPEC_wout_r3_P64 SPEC_r3_even
4922 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4924 store_freg32_i64(get_field(f
, r1
), o
->out
);
4926 #define SPEC_wout_e1 0
4928 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4930 store_freg(get_field(f
, r1
), o
->out
);
4932 #define SPEC_wout_f1 0
4934 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4936 int f1
= get_field(s
->fields
, r1
);
4937 store_freg(f1
, o
->out
);
4938 store_freg(f1
+ 2, o
->out2
);
4940 #define SPEC_wout_x1 SPEC_r1_f128
4942 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4944 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4945 store_reg32_i64(get_field(f
, r1
), o
->out
);
4948 #define SPEC_wout_cond_r1r2_32 0
4950 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4952 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4953 store_freg32_i64(get_field(f
, r1
), o
->out
);
4956 #define SPEC_wout_cond_e1e2 0
4958 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4960 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4962 #define SPEC_wout_m1_8 0
4964 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4966 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4968 #define SPEC_wout_m1_16 0
4970 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4972 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4974 #define SPEC_wout_m1_32 0
4976 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4978 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4980 #define SPEC_wout_m1_64 0
4982 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4984 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4986 #define SPEC_wout_m2_32 0
4988 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4990 store_reg(get_field(f
, r1
), o
->in2
);
4992 #define SPEC_wout_in2_r1 0
4994 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4996 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4998 #define SPEC_wout_in2_r1_32 0
5000 /* ====================================================================== */
5001 /* The "INput 1" generators. These load the first operand to an insn. */
5003 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5005 o
->in1
= load_reg(get_field(f
, r1
));
5007 #define SPEC_in1_r1 0
5009 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5011 o
->in1
= regs
[get_field(f
, r1
)];
5014 #define SPEC_in1_r1_o 0
5016 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5018 o
->in1
= tcg_temp_new_i64();
5019 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5021 #define SPEC_in1_r1_32s 0
5023 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5025 o
->in1
= tcg_temp_new_i64();
5026 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5028 #define SPEC_in1_r1_32u 0
5030 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5032 o
->in1
= tcg_temp_new_i64();
5033 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
5035 #define SPEC_in1_r1_sr32 0
5037 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5039 o
->in1
= load_reg(get_field(f
, r1
) + 1);
5041 #define SPEC_in1_r1p1 SPEC_r1_even
5043 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5045 o
->in1
= tcg_temp_new_i64();
5046 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5048 #define SPEC_in1_r1p1_32s SPEC_r1_even
5050 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5052 o
->in1
= tcg_temp_new_i64();
5053 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5055 #define SPEC_in1_r1p1_32u SPEC_r1_even
5057 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5059 int r1
= get_field(f
, r1
);
5060 o
->in1
= tcg_temp_new_i64();
5061 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5063 #define SPEC_in1_r1_D32 SPEC_r1_even
5065 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5067 o
->in1
= load_reg(get_field(f
, r2
));
5069 #define SPEC_in1_r2 0
5071 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5073 o
->in1
= tcg_temp_new_i64();
5074 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5076 #define SPEC_in1_r2_sr32 0
5078 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5080 o
->in1
= load_reg(get_field(f
, r3
));
5082 #define SPEC_in1_r3 0
5084 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5086 o
->in1
= regs
[get_field(f
, r3
)];
5089 #define SPEC_in1_r3_o 0
5091 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5093 o
->in1
= tcg_temp_new_i64();
5094 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5096 #define SPEC_in1_r3_32s 0
5098 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5100 o
->in1
= tcg_temp_new_i64();
5101 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5103 #define SPEC_in1_r3_32u 0
5105 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5107 int r3
= get_field(f
, r3
);
5108 o
->in1
= tcg_temp_new_i64();
5109 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5111 #define SPEC_in1_r3_D32 SPEC_r3_even
5113 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5115 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5117 #define SPEC_in1_e1 0
5119 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5121 o
->in1
= fregs
[get_field(f
, r1
)];
5124 #define SPEC_in1_f1_o 0
5126 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5128 int r1
= get_field(f
, r1
);
5130 o
->out2
= fregs
[r1
+ 2];
5131 o
->g_out
= o
->g_out2
= true;
5133 #define SPEC_in1_x1_o SPEC_r1_f128
5135 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5137 o
->in1
= fregs
[get_field(f
, r3
)];
5140 #define SPEC_in1_f3_o 0
5142 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5144 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5146 #define SPEC_in1_la1 0
5148 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5150 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5151 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5153 #define SPEC_in1_la2 0
5155 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5158 o
->in1
= tcg_temp_new_i64();
5159 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5161 #define SPEC_in1_m1_8u 0
5163 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5166 o
->in1
= tcg_temp_new_i64();
5167 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5169 #define SPEC_in1_m1_16s 0
5171 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5174 o
->in1
= tcg_temp_new_i64();
5175 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5177 #define SPEC_in1_m1_16u 0
5179 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5182 o
->in1
= tcg_temp_new_i64();
5183 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5185 #define SPEC_in1_m1_32s 0
5187 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5190 o
->in1
= tcg_temp_new_i64();
5191 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5193 #define SPEC_in1_m1_32u 0
5195 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5198 o
->in1
= tcg_temp_new_i64();
5199 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5201 #define SPEC_in1_m1_64 0
5203 /* ====================================================================== */
5204 /* The "INput 2" generators. These load the second operand to an insn. */
5206 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5208 o
->in2
= regs
[get_field(f
, r1
)];
5211 #define SPEC_in2_r1_o 0
5213 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5215 o
->in2
= tcg_temp_new_i64();
5216 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5218 #define SPEC_in2_r1_16u 0
5220 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5222 o
->in2
= tcg_temp_new_i64();
5223 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5225 #define SPEC_in2_r1_32u 0
5227 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5229 int r1
= get_field(f
, r1
);
5230 o
->in2
= tcg_temp_new_i64();
5231 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5233 #define SPEC_in2_r1_D32 SPEC_r1_even
5235 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5237 o
->in2
= load_reg(get_field(f
, r2
));
5239 #define SPEC_in2_r2 0
5241 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5243 o
->in2
= regs
[get_field(f
, r2
)];
5246 #define SPEC_in2_r2_o 0
5248 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5250 int r2
= get_field(f
, r2
);
5252 o
->in2
= load_reg(r2
);
5255 #define SPEC_in2_r2_nz 0
5257 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5259 o
->in2
= tcg_temp_new_i64();
5260 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5262 #define SPEC_in2_r2_8s 0
5264 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5266 o
->in2
= tcg_temp_new_i64();
5267 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5269 #define SPEC_in2_r2_8u 0
5271 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5273 o
->in2
= tcg_temp_new_i64();
5274 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5276 #define SPEC_in2_r2_16s 0
5278 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5280 o
->in2
= tcg_temp_new_i64();
5281 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5283 #define SPEC_in2_r2_16u 0
5285 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5287 o
->in2
= load_reg(get_field(f
, r3
));
5289 #define SPEC_in2_r3 0
5291 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5293 o
->in2
= tcg_temp_new_i64();
5294 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5296 #define SPEC_in2_r3_sr32 0
5298 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5300 o
->in2
= tcg_temp_new_i64();
5301 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5303 #define SPEC_in2_r2_32s 0
5305 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5307 o
->in2
= tcg_temp_new_i64();
5308 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5310 #define SPEC_in2_r2_32u 0
5312 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5314 o
->in2
= tcg_temp_new_i64();
5315 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5317 #define SPEC_in2_r2_sr32 0
5319 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5321 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5323 #define SPEC_in2_e2 0
5325 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5327 o
->in2
= fregs
[get_field(f
, r2
)];
5330 #define SPEC_in2_f2_o 0
5332 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5334 int r2
= get_field(f
, r2
);
5336 o
->in2
= fregs
[r2
+ 2];
5337 o
->g_in1
= o
->g_in2
= true;
5339 #define SPEC_in2_x2_o SPEC_r2_f128
5341 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5343 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5345 #define SPEC_in2_ra2 0
5347 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5349 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5350 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5352 #define SPEC_in2_a2 0
5354 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5356 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
5358 #define SPEC_in2_ri2 0
5360 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5362 help_l2_shift(s
, f
, o
, 31);
5364 #define SPEC_in2_sh32 0
5366 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5368 help_l2_shift(s
, f
, o
, 63);
5370 #define SPEC_in2_sh64 0
5372 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5375 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5377 #define SPEC_in2_m2_8u 0
5379 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5382 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5384 #define SPEC_in2_m2_16s 0
5386 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5389 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5391 #define SPEC_in2_m2_16u 0
5393 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5396 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5398 #define SPEC_in2_m2_32s 0
5400 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5403 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5405 #define SPEC_in2_m2_32u 0
5407 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5410 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5412 #define SPEC_in2_m2_64 0
5414 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5417 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5419 #define SPEC_in2_mri2_16u 0
5421 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5424 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5426 #define SPEC_in2_mri2_32s 0
5428 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5431 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5433 #define SPEC_in2_mri2_32u 0
5435 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5438 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5440 #define SPEC_in2_mri2_64 0
5442 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5444 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5446 #define SPEC_in2_i2 0
5448 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5450 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5452 #define SPEC_in2_i2_8u 0
5454 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5456 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5458 #define SPEC_in2_i2_16u 0
5460 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5462 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5464 #define SPEC_in2_i2_32u 0
5466 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5468 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5469 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5471 #define SPEC_in2_i2_16u_shl 0
5473 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5475 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5476 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5478 #define SPEC_in2_i2_32u_shl 0
5480 #ifndef CONFIG_USER_ONLY
5481 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5483 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5485 #define SPEC_in2_insn 0
5488 /* ====================================================================== */
5490 /* Find opc within the table of insns. This is formulated as a switch
5491 statement so that (1) we get compile-time notice of cut-paste errors
5492 for duplicated opcodes, and (2) the compiler generates the binary
5493 search tree, rather than us having to post-process the table. */
5495 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5496 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5498 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5500 enum DisasInsnEnum
{
5501 #include "insn-data.def"
5505 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5509 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5511 .help_in1 = in1_##I1, \
5512 .help_in2 = in2_##I2, \
5513 .help_prep = prep_##P, \
5514 .help_wout = wout_##W, \
5515 .help_cout = cout_##CC, \
5516 .help_op = op_##OP, \
5520 /* Allow 0 to be used for NULL in the table below. */
5528 #define SPEC_in1_0 0
5529 #define SPEC_in2_0 0
5530 #define SPEC_prep_0 0
5531 #define SPEC_wout_0 0
5533 /* Give smaller names to the various facilities. */
5534 #define FAC_Z S390_FEAT_ZARCH
5535 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5536 #define FAC_DFP S390_FEAT_DFP
5537 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5538 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5539 #define FAC_EE S390_FEAT_EXECUTE_EXT
5540 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5541 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5542 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5543 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5544 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5545 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5546 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5547 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5548 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5549 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5550 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5551 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5552 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5553 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5554 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5555 #define FAC_SFLE S390_FEAT_STFLE
5556 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5557 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5558 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5559 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5560 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5561 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5562 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5563 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5564 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5565 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5566 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5567 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5568 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5570 static const DisasInsn insn_info
[] = {
5571 #include "insn-data.def"
5575 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5576 case OPC: return &insn_info[insn_ ## NM];
5578 static const DisasInsn
*lookup_opc(uint16_t opc
)
5581 #include "insn-data.def"
5590 /* Extract a field from the insn. The INSN should be left-aligned in
5591 the uint64_t so that we can more easily utilize the big-bit-endian
5592 definitions we extract from the Principals of Operation. */
5594 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5602 /* Zero extract the field from the insn. */
5603 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5605 /* Sign-extend, or un-swap the field as necessary. */
5607 case 0: /* unsigned */
5609 case 1: /* signed */
5610 assert(f
->size
<= 32);
5611 m
= 1u << (f
->size
- 1);
5614 case 2: /* dl+dh split, signed 20 bit. */
5615 r
= ((int8_t)r
<< 12) | (r
>> 8);
5621 /* Validate that the "compressed" encoding we selected above is valid.
5622 I.e. we havn't make two different original fields overlap. */
5623 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5624 o
->presentC
|= 1 << f
->indexC
;
5625 o
->presentO
|= 1 << f
->indexO
;
5627 o
->c
[f
->indexC
] = r
;
5630 /* Lookup the insn at the current PC, extracting the operands into O and
5631 returning the info struct for the insn. Returns NULL for invalid insn. */
5633 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5636 uint64_t insn
, pc
= s
->pc
;
5638 const DisasInsn
*info
;
5640 if (unlikely(s
->ex_value
)) {
5641 /* Drop the EX data now, so that it's clear on exception paths. */
5642 TCGv_i64 zero
= tcg_const_i64(0);
5643 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5644 tcg_temp_free_i64(zero
);
5646 /* Extract the values saved by EXECUTE. */
5647 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5648 ilen
= s
->ex_value
& 0xf;
5651 insn
= ld_code2(env
, pc
);
5652 op
= (insn
>> 8) & 0xff;
5653 ilen
= get_ilen(op
);
5659 insn
= ld_code4(env
, pc
) << 32;
5662 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5665 g_assert_not_reached();
5668 s
->next_pc
= s
->pc
+ ilen
;
5671 /* We can't actually determine the insn format until we've looked up
5672 the full insn opcode. Which we can't do without locating the
5673 secondary opcode. Assume by default that OP2 is at bit 40; for
5674 those smaller insns that don't actually have a secondary opcode
5675 this will correctly result in OP2 = 0. */
5681 case 0xb2: /* S, RRF, RRE, IE */
5682 case 0xb3: /* RRE, RRD, RRF */
5683 case 0xb9: /* RRE, RRF */
5684 case 0xe5: /* SSE, SIL */
5685 op2
= (insn
<< 8) >> 56;
5689 case 0xc0: /* RIL */
5690 case 0xc2: /* RIL */
5691 case 0xc4: /* RIL */
5692 case 0xc6: /* RIL */
5693 case 0xc8: /* SSF */
5694 case 0xcc: /* RIL */
5695 op2
= (insn
<< 12) >> 60;
5697 case 0xc5: /* MII */
5698 case 0xc7: /* SMI */
5699 case 0xd0 ... 0xdf: /* SS */
5705 case 0xee ... 0xf3: /* SS */
5706 case 0xf8 ... 0xfd: /* SS */
5710 op2
= (insn
<< 40) >> 56;
5714 memset(f
, 0, sizeof(*f
));
5719 /* Lookup the instruction. */
5720 info
= lookup_opc(op
<< 8 | op2
);
5722 /* If we found it, extract the operands. */
5724 DisasFormat fmt
= info
->fmt
;
5727 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5728 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5734 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5736 const DisasInsn
*insn
;
5737 ExitStatus ret
= NO_EXIT
;
5741 /* Search for the insn in the table. */
5742 insn
= extract_insn(env
, s
, &f
);
5744 /* Not found means unimplemented/illegal opcode. */
5746 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5748 gen_illegal_opcode(s
);
5749 return EXIT_NORETURN
;
5752 #ifndef CONFIG_USER_ONLY
5753 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5754 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5755 gen_helper_per_ifetch(cpu_env
, addr
);
5756 tcg_temp_free_i64(addr
);
5760 /* Check for insn specification exceptions. */
5762 int spec
= insn
->spec
, excp
= 0, r
;
5764 if (spec
& SPEC_r1_even
) {
5765 r
= get_field(&f
, r1
);
5767 excp
= PGM_SPECIFICATION
;
5770 if (spec
& SPEC_r2_even
) {
5771 r
= get_field(&f
, r2
);
5773 excp
= PGM_SPECIFICATION
;
5776 if (spec
& SPEC_r3_even
) {
5777 r
= get_field(&f
, r3
);
5779 excp
= PGM_SPECIFICATION
;
5782 if (spec
& SPEC_r1_f128
) {
5783 r
= get_field(&f
, r1
);
5785 excp
= PGM_SPECIFICATION
;
5788 if (spec
& SPEC_r2_f128
) {
5789 r
= get_field(&f
, r2
);
5791 excp
= PGM_SPECIFICATION
;
5795 gen_program_exception(s
, excp
);
5796 return EXIT_NORETURN
;
5800 /* Set up the strutures we use to communicate with the helpers. */
5803 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5804 TCGV_UNUSED_I64(o
.out
);
5805 TCGV_UNUSED_I64(o
.out2
);
5806 TCGV_UNUSED_I64(o
.in1
);
5807 TCGV_UNUSED_I64(o
.in2
);
5808 TCGV_UNUSED_I64(o
.addr1
);
5810 /* Implement the instruction. */
5811 if (insn
->help_in1
) {
5812 insn
->help_in1(s
, &f
, &o
);
5814 if (insn
->help_in2
) {
5815 insn
->help_in2(s
, &f
, &o
);
5817 if (insn
->help_prep
) {
5818 insn
->help_prep(s
, &f
, &o
);
5820 if (insn
->help_op
) {
5821 ret
= insn
->help_op(s
, &o
);
5823 if (insn
->help_wout
) {
5824 insn
->help_wout(s
, &f
, &o
);
5826 if (insn
->help_cout
) {
5827 insn
->help_cout(s
, &o
);
5830 /* Free any temporaries created by the helpers. */
5831 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5832 tcg_temp_free_i64(o
.out
);
5834 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5835 tcg_temp_free_i64(o
.out2
);
5837 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5838 tcg_temp_free_i64(o
.in1
);
5840 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5841 tcg_temp_free_i64(o
.in2
);
5843 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5844 tcg_temp_free_i64(o
.addr1
);
5847 #ifndef CONFIG_USER_ONLY
5848 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5849 /* An exception might be triggered, save PSW if not already done. */
5850 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5851 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5857 /* Call the helper to check for a possible PER exception. */
5858 gen_helper_per_check_exception(cpu_env
);
5862 /* Advance to the next instruction. */
5867 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
5869 CPUS390XState
*env
= cs
->env_ptr
;
5871 target_ulong pc_start
;
5872 uint64_t next_page_start
;
5873 int num_insns
, max_insns
;
5880 if (!(tb
->flags
& FLAG_MASK_64
)) {
5881 pc_start
&= 0x7fffffff;
5886 dc
.cc_op
= CC_OP_DYNAMIC
;
5887 dc
.ex_value
= tb
->cs_base
;
5888 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5890 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5893 max_insns
= tb_cflags(tb
) & CF_COUNT_MASK
;
5894 if (max_insns
== 0) {
5895 max_insns
= CF_COUNT_MASK
;
5897 if (max_insns
> TCG_MAX_INSNS
) {
5898 max_insns
= TCG_MAX_INSNS
;
5904 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5907 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5908 status
= EXIT_PC_STALE
;
5910 /* The address covered by the breakpoint must be included in
5911 [tb->pc, tb->pc + tb->size) in order to for it to be
5912 properly cleared -- thus we increment the PC here so that
5913 the logic setting tb->size below does the right thing. */
5918 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
5922 status
= translate_one(env
, &dc
);
5924 /* If we reach a page boundary, are single stepping,
5925 or exhaust instruction count, stop generation. */
5926 if (status
== NO_EXIT
5927 && (dc
.pc
>= next_page_start
5928 || tcg_op_buf_full()
5929 || num_insns
>= max_insns
5931 || cs
->singlestep_enabled
5933 status
= EXIT_PC_STALE
;
5935 } while (status
== NO_EXIT
);
5937 if (tb_cflags(tb
) & CF_LAST_IO
) {
5946 case EXIT_PC_STALE_NOCHAIN
:
5947 update_psw_addr(&dc
);
5949 case EXIT_PC_UPDATED
:
5950 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5951 cc op type is in env */
5954 case EXIT_PC_CC_UPDATED
:
5955 /* Exit the TB, either by raising a debug exception or by return. */
5957 gen_exception(EXCP_DEBUG
);
5958 } else if (use_exit_tb(&dc
) || status
== EXIT_PC_STALE_NOCHAIN
) {
5961 tcg_gen_lookup_and_goto_ptr();
5965 g_assert_not_reached();
5968 gen_tb_end(tb
, num_insns
);
5970 tb
->size
= dc
.pc
- pc_start
;
5971 tb
->icount
= num_insns
;
5973 #if defined(S390X_DEBUG_DISAS)
5974 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5975 && qemu_log_in_addr_range(pc_start
)) {
5977 if (unlikely(dc
.ex_value
)) {
5978 /* ??? Unfortunately log_target_disas can't use host memory. */
5979 qemu_log("IN: EXECUTE %016" PRIx64
"\n", dc
.ex_value
);
5981 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5982 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
);
5990 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5993 int cc_op
= data
[1];
5994 env
->psw
.addr
= data
[0];
5995 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {