4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
41 /* global register indexes */
42 static TCGv_env cpu_env
;
44 #include "exec/gen-icount.h"
45 #include "exec/helper-proto.h"
46 #include "exec/helper-gen.h"
48 #include "trace-tcg.h"
52 /* Information that (most) every instruction needs to manipulate. */
53 typedef struct DisasContext DisasContext
;
54 typedef struct DisasInsn DisasInsn
;
55 typedef struct DisasFields DisasFields
;
58 struct TranslationBlock
*tb
;
59 const DisasInsn
*insn
;
65 bool singlestep_enabled
;
68 /* Information carried about a condition to be evaluated. */
75 struct { TCGv_i64 a
, b
; } s64
;
76 struct { TCGv_i32 a
, b
; } s32
;
80 /* is_jmp field values */
81 #define DISAS_EXCP DISAS_TARGET_0
83 #ifdef DEBUG_INLINE_BRANCHES
84 static uint64_t inline_branch_hit
[CC_OP_MAX
];
85 static uint64_t inline_branch_miss
[CC_OP_MAX
];
88 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
90 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
91 if (s
->tb
->flags
& FLAG_MASK_32
) {
92 return pc
| 0x80000000;
98 static TCGv_i64 psw_addr
;
99 static TCGv_i64 psw_mask
;
100 static TCGv_i64 gbea
;
102 static TCGv_i32 cc_op
;
103 static TCGv_i64 cc_src
;
104 static TCGv_i64 cc_dst
;
105 static TCGv_i64 cc_vr
;
107 static char cpu_reg_names
[32][4];
108 static TCGv_i64 regs
[16];
109 static TCGv_i64 fregs
[16];
111 void s390x_translate_init(void)
115 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
116 tcg_ctx
.tcg_env
= cpu_env
;
117 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
118 offsetof(CPUS390XState
, psw
.addr
),
120 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
121 offsetof(CPUS390XState
, psw
.mask
),
123 gbea
= tcg_global_mem_new_i64(cpu_env
,
124 offsetof(CPUS390XState
, gbea
),
127 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
129 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
131 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
133 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
136 for (i
= 0; i
< 16; i
++) {
137 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
138 regs
[i
] = tcg_global_mem_new(cpu_env
,
139 offsetof(CPUS390XState
, regs
[i
]),
143 for (i
= 0; i
< 16; i
++) {
144 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
145 fregs
[i
] = tcg_global_mem_new(cpu_env
,
146 offsetof(CPUS390XState
, vregs
[i
][0].d
),
147 cpu_reg_names
[i
+ 16]);
151 static TCGv_i64
load_reg(int reg
)
153 TCGv_i64 r
= tcg_temp_new_i64();
154 tcg_gen_mov_i64(r
, regs
[reg
]);
158 static TCGv_i64
load_freg32_i64(int reg
)
160 TCGv_i64 r
= tcg_temp_new_i64();
161 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
165 static void store_reg(int reg
, TCGv_i64 v
)
167 tcg_gen_mov_i64(regs
[reg
], v
);
170 static void store_freg(int reg
, TCGv_i64 v
)
172 tcg_gen_mov_i64(fregs
[reg
], v
);
175 static void store_reg32_i64(int reg
, TCGv_i64 v
)
177 /* 32 bit register writes keep the upper half */
178 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
181 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
183 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
186 static void store_freg32_i64(int reg
, TCGv_i64 v
)
188 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
191 static void return_low128(TCGv_i64 dest
)
193 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
196 static void update_psw_addr(DisasContext
*s
)
199 tcg_gen_movi_i64(psw_addr
, s
->pc
);
202 static void per_branch(DisasContext
*s
, bool to_next
)
204 #ifndef CONFIG_USER_ONLY
205 tcg_gen_movi_i64(gbea
, s
->pc
);
207 if (s
->tb
->flags
& FLAG_MASK_PER
) {
208 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
209 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
211 tcg_temp_free_i64(next_pc
);
217 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
218 TCGv_i64 arg1
, TCGv_i64 arg2
)
220 #ifndef CONFIG_USER_ONLY
221 if (s
->tb
->flags
& FLAG_MASK_PER
) {
222 TCGLabel
*lab
= gen_new_label();
223 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
225 tcg_gen_movi_i64(gbea
, s
->pc
);
226 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
230 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
231 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
232 tcg_temp_free_i64(pc
);
237 static void per_breaking_event(DisasContext
*s
)
239 tcg_gen_movi_i64(gbea
, s
->pc
);
242 static void update_cc_op(DisasContext
*s
)
244 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
245 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
249 static void potential_page_fault(DisasContext
*s
)
255 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
257 return (uint64_t)cpu_lduw_code(env
, pc
);
260 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
262 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
265 static int get_mem_index(DisasContext
*s
)
267 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
268 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
270 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
272 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
280 static void gen_exception(int excp
)
282 TCGv_i32 tmp
= tcg_const_i32(excp
);
283 gen_helper_exception(cpu_env
, tmp
);
284 tcg_temp_free_i32(tmp
);
287 static void gen_program_exception(DisasContext
*s
, int code
)
291 /* Remember what pgm exeption this was. */
292 tmp
= tcg_const_i32(code
);
293 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
294 tcg_temp_free_i32(tmp
);
296 tmp
= tcg_const_i32(s
->ilen
);
297 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
298 tcg_temp_free_i32(tmp
);
306 /* Trigger exception. */
307 gen_exception(EXCP_PGM
);
310 static inline void gen_illegal_opcode(DisasContext
*s
)
312 gen_program_exception(s
, PGM_OPERATION
);
315 static inline void gen_trap(DisasContext
*s
)
319 /* Set DXC to 0xff. */
320 t
= tcg_temp_new_i32();
321 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
322 tcg_gen_ori_i32(t
, t
, 0xff00);
323 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
324 tcg_temp_free_i32(t
);
326 gen_program_exception(s
, PGM_DATA
);
329 #ifndef CONFIG_USER_ONLY
330 static void check_privileged(DisasContext
*s
)
332 if (s
->tb
->flags
& FLAG_MASK_PSTATE
) {
333 gen_program_exception(s
, PGM_PRIVILEGED
);
338 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
340 TCGv_i64 tmp
= tcg_temp_new_i64();
341 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
343 /* Note that d2 is limited to 20 bits, signed. If we crop negative
344 displacements early we create larger immedate addends. */
346 /* Note that addi optimizes the imm==0 case. */
348 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
349 tcg_gen_addi_i64(tmp
, tmp
, d2
);
351 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
353 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
359 tcg_gen_movi_i64(tmp
, d2
);
362 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
368 static inline bool live_cc_data(DisasContext
*s
)
370 return (s
->cc_op
!= CC_OP_DYNAMIC
371 && s
->cc_op
!= CC_OP_STATIC
375 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
377 if (live_cc_data(s
)) {
378 tcg_gen_discard_i64(cc_src
);
379 tcg_gen_discard_i64(cc_dst
);
380 tcg_gen_discard_i64(cc_vr
);
382 s
->cc_op
= CC_OP_CONST0
+ val
;
385 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
387 if (live_cc_data(s
)) {
388 tcg_gen_discard_i64(cc_src
);
389 tcg_gen_discard_i64(cc_vr
);
391 tcg_gen_mov_i64(cc_dst
, dst
);
395 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
398 if (live_cc_data(s
)) {
399 tcg_gen_discard_i64(cc_vr
);
401 tcg_gen_mov_i64(cc_src
, src
);
402 tcg_gen_mov_i64(cc_dst
, dst
);
406 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
407 TCGv_i64 dst
, TCGv_i64 vr
)
409 tcg_gen_mov_i64(cc_src
, src
);
410 tcg_gen_mov_i64(cc_dst
, dst
);
411 tcg_gen_mov_i64(cc_vr
, vr
);
415 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
417 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
420 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
422 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
425 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
427 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
430 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
432 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
435 /* CC value is in env->cc_op */
436 static void set_cc_static(DisasContext
*s
)
438 if (live_cc_data(s
)) {
439 tcg_gen_discard_i64(cc_src
);
440 tcg_gen_discard_i64(cc_dst
);
441 tcg_gen_discard_i64(cc_vr
);
443 s
->cc_op
= CC_OP_STATIC
;
446 /* calculates cc into cc_op */
447 static void gen_op_calc_cc(DisasContext
*s
)
449 TCGv_i32 local_cc_op
;
452 TCGV_UNUSED_I32(local_cc_op
);
453 TCGV_UNUSED_I64(dummy
);
456 dummy
= tcg_const_i64(0);
470 local_cc_op
= tcg_const_i32(s
->cc_op
);
486 /* s->cc_op is the cc value */
487 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
490 /* env->cc_op already is the cc value */
505 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
510 case CC_OP_LTUGTU_32
:
511 case CC_OP_LTUGTU_64
:
518 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
533 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
536 /* unknown operation - assume 3 arguments and cc_op in env */
537 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
543 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
544 tcg_temp_free_i32(local_cc_op
);
546 if (!TCGV_IS_UNUSED_I64(dummy
)) {
547 tcg_temp_free_i64(dummy
);
550 /* We now have cc in cc_op as constant */
554 static bool use_exit_tb(DisasContext
*s
)
556 return (s
->singlestep_enabled
||
557 (s
->tb
->cflags
& CF_LAST_IO
) ||
558 (s
->tb
->flags
& FLAG_MASK_PER
));
561 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
563 if (unlikely(use_exit_tb(s
))) {
566 #ifndef CONFIG_USER_ONLY
567 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
568 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
574 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_miss
[cc_op
]++;
581 static void account_inline_branch(DisasContext
*s
, int cc_op
)
583 #ifdef DEBUG_INLINE_BRANCHES
584 inline_branch_hit
[cc_op
]++;
588 /* Table of mask values to comparison codes, given a comparison as input.
589 For such, CC=3 should not be possible. */
590 static const TCGCond ltgt_cond
[16] = {
591 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
592 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
593 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
594 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
595 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
596 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
597 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
598 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
601 /* Table of mask values to comparison codes, given a logic op as input.
602 For such, only CC=0 and CC=1 should be possible. */
603 static const TCGCond nz_cond
[16] = {
604 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
605 TCG_COND_NEVER
, TCG_COND_NEVER
,
606 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
607 TCG_COND_NE
, TCG_COND_NE
,
608 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
609 TCG_COND_EQ
, TCG_COND_EQ
,
610 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
611 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
614 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
615 details required to generate a TCG comparison. */
616 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
619 enum cc_op old_cc_op
= s
->cc_op
;
621 if (mask
== 15 || mask
== 0) {
622 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
625 c
->g1
= c
->g2
= true;
630 /* Find the TCG condition for the mask + cc op. */
636 cond
= ltgt_cond
[mask
];
637 if (cond
== TCG_COND_NEVER
) {
640 account_inline_branch(s
, old_cc_op
);
643 case CC_OP_LTUGTU_32
:
644 case CC_OP_LTUGTU_64
:
645 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
646 if (cond
== TCG_COND_NEVER
) {
649 account_inline_branch(s
, old_cc_op
);
653 cond
= nz_cond
[mask
];
654 if (cond
== TCG_COND_NEVER
) {
657 account_inline_branch(s
, old_cc_op
);
672 account_inline_branch(s
, old_cc_op
);
687 account_inline_branch(s
, old_cc_op
);
691 switch (mask
& 0xa) {
692 case 8: /* src == 0 -> no one bit found */
695 case 2: /* src != 0 -> one bit found */
701 account_inline_branch(s
, old_cc_op
);
707 case 8 | 2: /* vr == 0 */
710 case 4 | 1: /* vr != 0 */
713 case 8 | 4: /* no carry -> vr >= src */
716 case 2 | 1: /* carry -> vr < src */
722 account_inline_branch(s
, old_cc_op
);
727 /* Note that CC=0 is impossible; treat it as dont-care. */
729 case 2: /* zero -> op1 == op2 */
732 case 4 | 1: /* !zero -> op1 != op2 */
735 case 4: /* borrow (!carry) -> op1 < op2 */
738 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
744 account_inline_branch(s
, old_cc_op
);
749 /* Calculate cc value. */
754 /* Jump based on CC. We'll load up the real cond below;
755 the assignment here merely avoids a compiler warning. */
756 account_noninline_branch(s
, old_cc_op
);
757 old_cc_op
= CC_OP_STATIC
;
758 cond
= TCG_COND_NEVER
;
762 /* Load up the arguments of the comparison. */
764 c
->g1
= c
->g2
= false;
768 c
->u
.s32
.a
= tcg_temp_new_i32();
769 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
770 c
->u
.s32
.b
= tcg_const_i32(0);
773 case CC_OP_LTUGTU_32
:
776 c
->u
.s32
.a
= tcg_temp_new_i32();
777 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
778 c
->u
.s32
.b
= tcg_temp_new_i32();
779 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
786 c
->u
.s64
.b
= tcg_const_i64(0);
790 case CC_OP_LTUGTU_64
:
794 c
->g1
= c
->g2
= true;
800 c
->u
.s64
.a
= tcg_temp_new_i64();
801 c
->u
.s64
.b
= tcg_const_i64(0);
802 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
807 c
->u
.s32
.a
= tcg_temp_new_i32();
808 c
->u
.s32
.b
= tcg_temp_new_i32();
809 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
810 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
811 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
813 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
820 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
821 c
->u
.s64
.b
= tcg_const_i64(0);
833 case 0x8 | 0x4 | 0x2: /* cc != 3 */
835 c
->u
.s32
.b
= tcg_const_i32(3);
837 case 0x8 | 0x4 | 0x1: /* cc != 2 */
839 c
->u
.s32
.b
= tcg_const_i32(2);
841 case 0x8 | 0x2 | 0x1: /* cc != 1 */
843 c
->u
.s32
.b
= tcg_const_i32(1);
845 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
848 c
->u
.s32
.a
= tcg_temp_new_i32();
849 c
->u
.s32
.b
= tcg_const_i32(0);
850 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
852 case 0x8 | 0x4: /* cc < 2 */
854 c
->u
.s32
.b
= tcg_const_i32(2);
856 case 0x8: /* cc == 0 */
858 c
->u
.s32
.b
= tcg_const_i32(0);
860 case 0x4 | 0x2 | 0x1: /* cc != 0 */
862 c
->u
.s32
.b
= tcg_const_i32(0);
864 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
867 c
->u
.s32
.a
= tcg_temp_new_i32();
868 c
->u
.s32
.b
= tcg_const_i32(0);
869 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
871 case 0x4: /* cc == 1 */
873 c
->u
.s32
.b
= tcg_const_i32(1);
875 case 0x2 | 0x1: /* cc > 1 */
877 c
->u
.s32
.b
= tcg_const_i32(1);
879 case 0x2: /* cc == 2 */
881 c
->u
.s32
.b
= tcg_const_i32(2);
883 case 0x1: /* cc == 3 */
885 c
->u
.s32
.b
= tcg_const_i32(3);
888 /* CC is masked by something else: (8 >> cc) & mask. */
891 c
->u
.s32
.a
= tcg_const_i32(8);
892 c
->u
.s32
.b
= tcg_const_i32(0);
893 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
894 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
905 static void free_compare(DisasCompare
*c
)
909 tcg_temp_free_i64(c
->u
.s64
.a
);
911 tcg_temp_free_i32(c
->u
.s32
.a
);
916 tcg_temp_free_i64(c
->u
.s64
.b
);
918 tcg_temp_free_i32(c
->u
.s32
.b
);
923 /* ====================================================================== */
924 /* Define the insn format enumeration. */
925 #define F0(N) FMT_##N,
926 #define F1(N, X1) F0(N)
927 #define F2(N, X1, X2) F0(N)
928 #define F3(N, X1, X2, X3) F0(N)
929 #define F4(N, X1, X2, X3, X4) F0(N)
930 #define F5(N, X1, X2, X3, X4, X5) F0(N)
933 #include "insn-format.def"
943 /* Define a structure to hold the decoded fields. We'll store each inside
944 an array indexed by an enum. In order to conserve memory, we'll arrange
945 for fields that do not exist at the same time to overlap, thus the "C"
946 for compact. For checking purposes there is an "O" for original index
947 as well that will be applied to availability bitmaps. */
949 enum DisasFieldIndexO
{
972 enum DisasFieldIndexC
{
1003 struct DisasFields
{
1007 unsigned presentC
:16;
1008 unsigned int presentO
;
1012 /* This is the way fields are to be accessed out of DisasFields. */
1013 #define have_field(S, F) have_field1((S), FLD_O_##F)
1014 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1016 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1018 return (f
->presentO
>> c
) & 1;
1021 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1022 enum DisasFieldIndexC c
)
1024 assert(have_field1(f
, o
));
1028 /* Describe the layout of each field in each format. */
1029 typedef struct DisasField
{
1031 unsigned int size
:8;
1032 unsigned int type
:2;
1033 unsigned int indexC
:6;
1034 enum DisasFieldIndexO indexO
:8;
1037 typedef struct DisasFormatInfo
{
1038 DisasField op
[NUM_C_FIELD
];
1041 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1042 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1043 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1044 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1045 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1046 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1047 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1048 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1049 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1050 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1051 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1052 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1053 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1054 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1056 #define F0(N) { { } },
1057 #define F1(N, X1) { { X1 } },
1058 #define F2(N, X1, X2) { { X1, X2 } },
1059 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1060 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1061 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1063 static const DisasFormatInfo format_info
[] = {
1064 #include "insn-format.def"
1082 /* Generally, we'll extract operands into this structures, operate upon
1083 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1084 of routines below for more details. */
1086 bool g_out
, g_out2
, g_in1
, g_in2
;
1087 TCGv_i64 out
, out2
, in1
, in2
;
1091 /* Instructions can place constraints on their operands, raising specification
1092 exceptions if they are violated. To make this easy to automate, each "in1",
1093 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1094 of the following, or 0. To make this easy to document, we'll put the
1095 SPEC_<name> defines next to <name>. */
1097 #define SPEC_r1_even 1
1098 #define SPEC_r2_even 2
1099 #define SPEC_r3_even 4
1100 #define SPEC_r1_f128 8
1101 #define SPEC_r2_f128 16
1103 /* Return values from translate_one, indicating the state of the TB. */
1105 /* Continue the TB. */
1107 /* We have emitted one or more goto_tb. No fixup required. */
1109 /* We are not using a goto_tb (for whatever reason), but have updated
1110 the PC (for whatever reason), so there's no need to do it again on
1113 /* We have updated the PC and CC values. */
1115 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1116 updated the PC for the next instruction to be executed. */
1118 /* We are exiting the TB to the main loop. */
1119 EXIT_PC_STALE_NOCHAIN
,
1120 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1121 No following code will be executed. */
1133 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1134 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1135 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1136 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1137 void (*help_cout
)(DisasContext
*, DisasOps
*);
1138 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1143 /* ====================================================================== */
1144 /* Miscellaneous helpers, used by several operations. */
1146 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1147 DisasOps
*o
, int mask
)
1149 int b2
= get_field(f
, b2
);
1150 int d2
= get_field(f
, d2
);
1153 o
->in2
= tcg_const_i64(d2
& mask
);
1155 o
->in2
= get_address(s
, 0, b2
, d2
);
1156 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1160 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1162 if (dest
== s
->next_pc
) {
1163 per_branch(s
, true);
1166 if (use_goto_tb(s
, dest
)) {
1168 per_breaking_event(s
);
1170 tcg_gen_movi_i64(psw_addr
, dest
);
1171 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1172 return EXIT_GOTO_TB
;
1174 tcg_gen_movi_i64(psw_addr
, dest
);
1175 per_branch(s
, false);
1176 return EXIT_PC_UPDATED
;
1180 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1181 bool is_imm
, int imm
, TCGv_i64 cdest
)
1184 uint64_t dest
= s
->pc
+ 2 * imm
;
1187 /* Take care of the special cases first. */
1188 if (c
->cond
== TCG_COND_NEVER
) {
1193 if (dest
== s
->next_pc
) {
1194 /* Branch to next. */
1195 per_branch(s
, true);
1199 if (c
->cond
== TCG_COND_ALWAYS
) {
1200 ret
= help_goto_direct(s
, dest
);
1204 if (TCGV_IS_UNUSED_I64(cdest
)) {
1205 /* E.g. bcr %r0 -> no branch. */
1209 if (c
->cond
== TCG_COND_ALWAYS
) {
1210 tcg_gen_mov_i64(psw_addr
, cdest
);
1211 per_branch(s
, false);
1212 ret
= EXIT_PC_UPDATED
;
1217 if (use_goto_tb(s
, s
->next_pc
)) {
1218 if (is_imm
&& use_goto_tb(s
, dest
)) {
1219 /* Both exits can use goto_tb. */
1222 lab
= gen_new_label();
1224 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1226 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1229 /* Branch not taken. */
1231 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1232 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1236 per_breaking_event(s
);
1238 tcg_gen_movi_i64(psw_addr
, dest
);
1239 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1243 /* Fallthru can use goto_tb, but taken branch cannot. */
1244 /* Store taken branch destination before the brcond. This
1245 avoids having to allocate a new local temp to hold it.
1246 We'll overwrite this in the not taken case anyway. */
1248 tcg_gen_mov_i64(psw_addr
, cdest
);
1251 lab
= gen_new_label();
1253 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1255 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1258 /* Branch not taken. */
1261 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1262 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1266 tcg_gen_movi_i64(psw_addr
, dest
);
1268 per_breaking_event(s
);
1269 ret
= EXIT_PC_UPDATED
;
1272 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1273 Most commonly we're single-stepping or some other condition that
1274 disables all use of goto_tb. Just update the PC and exit. */
1276 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1278 cdest
= tcg_const_i64(dest
);
1282 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1284 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1286 TCGv_i32 t0
= tcg_temp_new_i32();
1287 TCGv_i64 t1
= tcg_temp_new_i64();
1288 TCGv_i64 z
= tcg_const_i64(0);
1289 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1290 tcg_gen_extu_i32_i64(t1
, t0
);
1291 tcg_temp_free_i32(t0
);
1292 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1293 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1294 tcg_temp_free_i64(t1
);
1295 tcg_temp_free_i64(z
);
1299 tcg_temp_free_i64(cdest
);
1301 tcg_temp_free_i64(next
);
1303 ret
= EXIT_PC_UPDATED
;
1311 /* ====================================================================== */
1312 /* The operations. These perform the bulk of the work for any insn,
1313 usually after the operands have been loaded and output initialized. */
1315 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1318 z
= tcg_const_i64(0);
1319 n
= tcg_temp_new_i64();
1320 tcg_gen_neg_i64(n
, o
->in2
);
1321 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1322 tcg_temp_free_i64(n
);
1323 tcg_temp_free_i64(z
);
1327 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1329 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1333 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1335 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1339 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1341 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1342 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1346 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1348 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1352 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1357 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1359 /* The carry flag is the msb of CC, therefore the branch mask that would
1360 create that comparison is 3. Feeding the generated comparison to
1361 setcond produces the carry flag that we desire. */
1362 disas_jcc(s
, &cmp
, 3);
1363 carry
= tcg_temp_new_i64();
1365 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1367 TCGv_i32 t
= tcg_temp_new_i32();
1368 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1369 tcg_gen_extu_i32_i64(carry
, t
);
1370 tcg_temp_free_i32(t
);
1374 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1375 tcg_temp_free_i64(carry
);
1379 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1381 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1385 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1387 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1391 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1393 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1394 return_low128(o
->out2
);
1398 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1400 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1404 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1406 int shift
= s
->insn
->data
& 0xff;
1407 int size
= s
->insn
->data
>> 8;
1408 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1411 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1412 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1413 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1415 /* Produce the CC from only the bits manipulated. */
1416 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1417 set_cc_nz_u64(s
, cc_dst
);
1421 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1423 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1424 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1425 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1426 per_branch(s
, false);
1427 return EXIT_PC_UPDATED
;
1433 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1435 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1436 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1439 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1441 int m1
= get_field(s
->fields
, m1
);
1442 bool is_imm
= have_field(s
->fields
, i2
);
1443 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1446 /* BCR with R2 = 0 causes no branching */
1447 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1449 /* Perform serialization */
1450 /* FIXME: check for fast-BCR-serialization facility */
1451 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1454 /* Perform serialization */
1455 /* FIXME: perform checkpoint-synchronisation */
1456 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1461 disas_jcc(s
, &c
, m1
);
1462 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1465 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1467 int r1
= get_field(s
->fields
, r1
);
1468 bool is_imm
= have_field(s
->fields
, i2
);
1469 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1473 c
.cond
= TCG_COND_NE
;
1478 t
= tcg_temp_new_i64();
1479 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1480 store_reg32_i64(r1
, t
);
1481 c
.u
.s32
.a
= tcg_temp_new_i32();
1482 c
.u
.s32
.b
= tcg_const_i32(0);
1483 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1484 tcg_temp_free_i64(t
);
1486 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1489 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1491 int r1
= get_field(s
->fields
, r1
);
1492 int imm
= get_field(s
->fields
, i2
);
1496 c
.cond
= TCG_COND_NE
;
1501 t
= tcg_temp_new_i64();
1502 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1503 tcg_gen_subi_i64(t
, t
, 1);
1504 store_reg32h_i64(r1
, t
);
1505 c
.u
.s32
.a
= tcg_temp_new_i32();
1506 c
.u
.s32
.b
= tcg_const_i32(0);
1507 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1508 tcg_temp_free_i64(t
);
1510 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1513 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1515 int r1
= get_field(s
->fields
, r1
);
1516 bool is_imm
= have_field(s
->fields
, i2
);
1517 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1520 c
.cond
= TCG_COND_NE
;
1525 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1526 c
.u
.s64
.a
= regs
[r1
];
1527 c
.u
.s64
.b
= tcg_const_i64(0);
1529 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1532 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1534 int r1
= get_field(s
->fields
, r1
);
1535 int r3
= get_field(s
->fields
, r3
);
1536 bool is_imm
= have_field(s
->fields
, i2
);
1537 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1541 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1546 t
= tcg_temp_new_i64();
1547 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1548 c
.u
.s32
.a
= tcg_temp_new_i32();
1549 c
.u
.s32
.b
= tcg_temp_new_i32();
1550 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1551 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1552 store_reg32_i64(r1
, t
);
1553 tcg_temp_free_i64(t
);
1555 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1558 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1560 int r1
= get_field(s
->fields
, r1
);
1561 int r3
= get_field(s
->fields
, r3
);
1562 bool is_imm
= have_field(s
->fields
, i2
);
1563 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1566 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1569 if (r1
== (r3
| 1)) {
1570 c
.u
.s64
.b
= load_reg(r3
| 1);
1573 c
.u
.s64
.b
= regs
[r3
| 1];
1577 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1578 c
.u
.s64
.a
= regs
[r1
];
1581 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1584 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1586 int imm
, m3
= get_field(s
->fields
, m3
);
1590 c
.cond
= ltgt_cond
[m3
];
1591 if (s
->insn
->data
) {
1592 c
.cond
= tcg_unsigned_cond(c
.cond
);
1594 c
.is_64
= c
.g1
= c
.g2
= true;
1598 is_imm
= have_field(s
->fields
, i4
);
1600 imm
= get_field(s
->fields
, i4
);
1603 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1604 get_field(s
->fields
, d4
));
1607 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1610 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1612 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1617 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1619 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1624 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1626 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1631 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1633 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1634 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1635 tcg_temp_free_i32(m3
);
1636 gen_set_cc_nz_f32(s
, o
->in2
);
1640 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1642 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1643 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1644 tcg_temp_free_i32(m3
);
1645 gen_set_cc_nz_f64(s
, o
->in2
);
1649 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1651 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1652 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1653 tcg_temp_free_i32(m3
);
1654 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1658 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1660 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1661 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1662 tcg_temp_free_i32(m3
);
1663 gen_set_cc_nz_f32(s
, o
->in2
);
1667 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1669 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1670 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1671 tcg_temp_free_i32(m3
);
1672 gen_set_cc_nz_f64(s
, o
->in2
);
1676 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1678 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1679 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1680 tcg_temp_free_i32(m3
);
1681 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1685 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1687 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1688 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1689 tcg_temp_free_i32(m3
);
1690 gen_set_cc_nz_f32(s
, o
->in2
);
1694 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1696 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1697 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1698 tcg_temp_free_i32(m3
);
1699 gen_set_cc_nz_f64(s
, o
->in2
);
1703 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1705 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1706 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1707 tcg_temp_free_i32(m3
);
1708 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1712 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1714 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1715 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1716 tcg_temp_free_i32(m3
);
1717 gen_set_cc_nz_f32(s
, o
->in2
);
1721 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1723 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1724 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1725 tcg_temp_free_i32(m3
);
1726 gen_set_cc_nz_f64(s
, o
->in2
);
1730 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1732 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1733 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1734 tcg_temp_free_i32(m3
);
1735 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1739 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1741 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1742 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1743 tcg_temp_free_i32(m3
);
1747 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1749 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1750 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1751 tcg_temp_free_i32(m3
);
1755 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1757 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1758 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1759 tcg_temp_free_i32(m3
);
1760 return_low128(o
->out2
);
1764 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1766 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1767 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1768 tcg_temp_free_i32(m3
);
1772 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1774 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1775 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1776 tcg_temp_free_i32(m3
);
1780 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1782 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1783 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1784 tcg_temp_free_i32(m3
);
1785 return_low128(o
->out2
);
1789 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1791 int r2
= get_field(s
->fields
, r2
);
1792 TCGv_i64 len
= tcg_temp_new_i64();
1794 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1796 return_low128(o
->out
);
1798 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1799 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1800 tcg_temp_free_i64(len
);
1805 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1807 int l
= get_field(s
->fields
, l1
);
1812 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1813 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1816 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1817 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1820 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1821 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1824 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1825 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1828 vl
= tcg_const_i32(l
);
1829 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1830 tcg_temp_free_i32(vl
);
1834 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1838 static ExitStatus
op_clcl(DisasContext
*s
, DisasOps
*o
)
1840 int r1
= get_field(s
->fields
, r1
);
1841 int r2
= get_field(s
->fields
, r2
);
1844 /* r1 and r2 must be even. */
1845 if (r1
& 1 || r2
& 1) {
1846 gen_program_exception(s
, PGM_SPECIFICATION
);
1847 return EXIT_NORETURN
;
1850 t1
= tcg_const_i32(r1
);
1851 t2
= tcg_const_i32(r2
);
1852 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1853 tcg_temp_free_i32(t1
);
1854 tcg_temp_free_i32(t2
);
1859 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1861 int r1
= get_field(s
->fields
, r1
);
1862 int r3
= get_field(s
->fields
, r3
);
1865 /* r1 and r3 must be even. */
1866 if (r1
& 1 || r3
& 1) {
1867 gen_program_exception(s
, PGM_SPECIFICATION
);
1868 return EXIT_NORETURN
;
1871 t1
= tcg_const_i32(r1
);
1872 t3
= tcg_const_i32(r3
);
1873 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1874 tcg_temp_free_i32(t1
);
1875 tcg_temp_free_i32(t3
);
1880 static ExitStatus
op_clclu(DisasContext
*s
, DisasOps
*o
)
1882 int r1
= get_field(s
->fields
, r1
);
1883 int r3
= get_field(s
->fields
, r3
);
1886 /* r1 and r3 must be even. */
1887 if (r1
& 1 || r3
& 1) {
1888 gen_program_exception(s
, PGM_SPECIFICATION
);
1889 return EXIT_NORETURN
;
1892 t1
= tcg_const_i32(r1
);
1893 t3
= tcg_const_i32(r3
);
1894 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1895 tcg_temp_free_i32(t1
);
1896 tcg_temp_free_i32(t3
);
1901 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1903 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1904 TCGv_i32 t1
= tcg_temp_new_i32();
1905 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1906 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1908 tcg_temp_free_i32(t1
);
1909 tcg_temp_free_i32(m3
);
1913 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1915 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1917 return_low128(o
->in2
);
1921 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1923 TCGv_i64 t
= tcg_temp_new_i64();
1924 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1925 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1926 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1927 tcg_temp_free_i64(t
);
1931 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1933 int d2
= get_field(s
->fields
, d2
);
1934 int b2
= get_field(s
->fields
, b2
);
1937 /* Note that in1 = R3 (new value) and
1938 in2 = (zero-extended) R1 (expected value). */
1940 addr
= get_address(s
, 0, b2
, d2
);
1941 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1942 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1943 tcg_temp_free_i64(addr
);
1945 /* Are the memory and expected values (un)equal? Note that this setcond
1946 produces the output CC value, thus the NE sense of the test. */
1947 cc
= tcg_temp_new_i64();
1948 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1949 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1950 tcg_temp_free_i64(cc
);
1956 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1958 int r1
= get_field(s
->fields
, r1
);
1959 int r3
= get_field(s
->fields
, r3
);
1960 int d2
= get_field(s
->fields
, d2
);
1961 int b2
= get_field(s
->fields
, b2
);
1963 TCGv_i32 t_r1
, t_r3
;
1965 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1966 addr
= get_address(s
, 0, b2
, d2
);
1967 t_r1
= tcg_const_i32(r1
);
1968 t_r3
= tcg_const_i32(r3
);
1969 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
1970 tcg_temp_free_i64(addr
);
1971 tcg_temp_free_i32(t_r1
);
1972 tcg_temp_free_i32(t_r3
);
1978 static ExitStatus
op_csst(DisasContext
*s
, DisasOps
*o
)
1980 int r3
= get_field(s
->fields
, r3
);
1981 TCGv_i32 t_r3
= tcg_const_i32(r3
);
1983 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
1984 tcg_temp_free_i32(t_r3
);
1990 #ifndef CONFIG_USER_ONLY
1991 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1993 TCGMemOp mop
= s
->insn
->data
;
1994 TCGv_i64 addr
, old
, cc
;
1995 TCGLabel
*lab
= gen_new_label();
1997 /* Note that in1 = R1 (zero-extended expected value),
1998 out = R1 (original reg), out2 = R1+1 (new value). */
2000 check_privileged(s
);
2001 addr
= tcg_temp_new_i64();
2002 old
= tcg_temp_new_i64();
2003 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2004 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2005 get_mem_index(s
), mop
| MO_ALIGN
);
2006 tcg_temp_free_i64(addr
);
2008 /* Are the memory and expected values (un)equal? */
2009 cc
= tcg_temp_new_i64();
2010 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2011 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2013 /* Write back the output now, so that it happens before the
2014 following branch, so that we don't need local temps. */
2015 if ((mop
& MO_SIZE
) == MO_32
) {
2016 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2018 tcg_gen_mov_i64(o
->out
, old
);
2020 tcg_temp_free_i64(old
);
2022 /* If the comparison was equal, and the LSB of R2 was set,
2023 then we need to flush the TLB (for all cpus). */
2024 tcg_gen_xori_i64(cc
, cc
, 1);
2025 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2026 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2027 tcg_temp_free_i64(cc
);
2029 gen_helper_purge(cpu_env
);
2036 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2038 TCGv_i64 t1
= tcg_temp_new_i64();
2039 TCGv_i32 t2
= tcg_temp_new_i32();
2040 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2041 gen_helper_cvd(t1
, t2
);
2042 tcg_temp_free_i32(t2
);
2043 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2044 tcg_temp_free_i64(t1
);
2048 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2050 int m3
= get_field(s
->fields
, m3
);
2051 TCGLabel
*lab
= gen_new_label();
2054 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2055 if (s
->insn
->data
) {
2056 c
= tcg_unsigned_cond(c
);
2058 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2067 static ExitStatus
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2069 int m3
= get_field(s
->fields
, m3
);
2070 int r1
= get_field(s
->fields
, r1
);
2071 int r2
= get_field(s
->fields
, r2
);
2072 TCGv_i32 tr1
, tr2
, chk
;
2074 /* R1 and R2 must both be even. */
2075 if ((r1
| r2
) & 1) {
2076 gen_program_exception(s
, PGM_SPECIFICATION
);
2077 return EXIT_NORETURN
;
2079 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2083 tr1
= tcg_const_i32(r1
);
2084 tr2
= tcg_const_i32(r2
);
2085 chk
= tcg_const_i32(m3
);
2087 switch (s
->insn
->data
) {
2089 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2092 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2095 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2098 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2101 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2104 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2107 g_assert_not_reached();
2110 tcg_temp_free_i32(tr1
);
2111 tcg_temp_free_i32(tr2
);
2112 tcg_temp_free_i32(chk
);
2117 #ifndef CONFIG_USER_ONLY
2118 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2120 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2121 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2122 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2124 check_privileged(s
);
2128 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2130 tcg_temp_free_i32(func_code
);
2131 tcg_temp_free_i32(r3
);
2132 tcg_temp_free_i32(r1
);
2137 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2139 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2140 return_low128(o
->out
);
2144 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2146 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2147 return_low128(o
->out
);
2151 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2153 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2154 return_low128(o
->out
);
2158 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2160 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2161 return_low128(o
->out
);
2165 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2167 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2171 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2173 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2177 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2179 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2180 return_low128(o
->out2
);
2184 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2186 int r2
= get_field(s
->fields
, r2
);
2187 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2191 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2193 /* No cache information provided. */
2194 tcg_gen_movi_i64(o
->out
, -1);
2198 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2200 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2204 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2206 int r1
= get_field(s
->fields
, r1
);
2207 int r2
= get_field(s
->fields
, r2
);
2208 TCGv_i64 t
= tcg_temp_new_i64();
2210 /* Note the "subsequently" in the PoO, which implies a defined result
2211 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2212 tcg_gen_shri_i64(t
, psw_mask
, 32);
2213 store_reg32_i64(r1
, t
);
2215 store_reg32_i64(r2
, psw_mask
);
2218 tcg_temp_free_i64(t
);
2222 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2224 int r1
= get_field(s
->fields
, r1
);
2228 /* Nested EXECUTE is not allowed. */
2229 if (unlikely(s
->ex_value
)) {
2230 gen_program_exception(s
, PGM_EXECUTE
);
2231 return EXIT_NORETURN
;
2238 v1
= tcg_const_i64(0);
2243 ilen
= tcg_const_i32(s
->ilen
);
2244 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2245 tcg_temp_free_i32(ilen
);
2248 tcg_temp_free_i64(v1
);
2251 return EXIT_PC_CC_UPDATED
;
2254 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2256 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2257 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2258 tcg_temp_free_i32(m3
);
2262 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2264 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2265 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2266 tcg_temp_free_i32(m3
);
2270 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2272 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2273 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2274 return_low128(o
->out2
);
2275 tcg_temp_free_i32(m3
);
2279 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2281 /* We'll use the original input for cc computation, since we get to
2282 compare that against 0, which ought to be better than comparing
2283 the real output against 64. It also lets cc_dst be a convenient
2284 temporary during our computation. */
2285 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2287 /* R1 = IN ? CLZ(IN) : 64. */
2288 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2290 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2291 value by 64, which is undefined. But since the shift is 64 iff the
2292 input is zero, we still get the correct result after and'ing. */
2293 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2294 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2295 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2299 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2301 int m3
= get_field(s
->fields
, m3
);
2302 int pos
, len
, base
= s
->insn
->data
;
2303 TCGv_i64 tmp
= tcg_temp_new_i64();
2308 /* Effectively a 32-bit load. */
2309 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2316 /* Effectively a 16-bit load. */
2317 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2325 /* Effectively an 8-bit load. */
2326 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2331 pos
= base
+ ctz32(m3
) * 8;
2332 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2333 ccm
= ((1ull << len
) - 1) << pos
;
2337 /* This is going to be a sequence of loads and inserts. */
2338 pos
= base
+ 32 - 8;
2342 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2343 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2344 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2347 m3
= (m3
<< 1) & 0xf;
2353 tcg_gen_movi_i64(tmp
, ccm
);
2354 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2355 tcg_temp_free_i64(tmp
);
2359 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2361 int shift
= s
->insn
->data
& 0xff;
2362 int size
= s
->insn
->data
>> 8;
2363 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2367 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2372 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2374 t1
= tcg_temp_new_i64();
2375 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2376 tcg_gen_shri_i64(t1
, t1
, 36);
2377 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2379 tcg_gen_extu_i32_i64(t1
, cc_op
);
2380 tcg_gen_shli_i64(t1
, t1
, 28);
2381 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2382 tcg_temp_free_i64(t1
);
2386 #ifndef CONFIG_USER_ONLY
2387 static ExitStatus
op_idte(DisasContext
*s
, DisasOps
*o
)
2391 check_privileged(s
);
2392 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2393 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2395 m4
= tcg_const_i32(0);
2397 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2398 tcg_temp_free_i32(m4
);
2402 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2406 check_privileged(s
);
2407 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2408 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2410 m4
= tcg_const_i32(0);
2412 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2413 tcg_temp_free_i32(m4
);
2417 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2419 check_privileged(s
);
2420 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2425 static ExitStatus
op_msa(DisasContext
*s
, DisasOps
*o
)
2427 int r1
= have_field(s
->fields
, r1
) ? get_field(s
->fields
, r1
) : 0;
2428 int r2
= have_field(s
->fields
, r2
) ? get_field(s
->fields
, r2
) : 0;
2429 int r3
= have_field(s
->fields
, r3
) ? get_field(s
->fields
, r3
) : 0;
2430 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2432 switch (s
->insn
->data
) {
2433 case S390_FEAT_TYPE_KMCTR
:
2434 if (r3
& 1 || !r3
) {
2435 gen_program_exception(s
, PGM_SPECIFICATION
);
2436 return EXIT_NORETURN
;
2439 case S390_FEAT_TYPE_PPNO
:
2440 case S390_FEAT_TYPE_KMF
:
2441 case S390_FEAT_TYPE_KMC
:
2442 case S390_FEAT_TYPE_KMO
:
2443 case S390_FEAT_TYPE_KM
:
2444 if (r1
& 1 || !r1
) {
2445 gen_program_exception(s
, PGM_SPECIFICATION
);
2446 return EXIT_NORETURN
;
2449 case S390_FEAT_TYPE_KMAC
:
2450 case S390_FEAT_TYPE_KIMD
:
2451 case S390_FEAT_TYPE_KLMD
:
2452 if (r2
& 1 || !r2
) {
2453 gen_program_exception(s
, PGM_SPECIFICATION
);
2454 return EXIT_NORETURN
;
2457 case S390_FEAT_TYPE_PCKMO
:
2458 case S390_FEAT_TYPE_PCC
:
2461 g_assert_not_reached();
2464 t_r1
= tcg_const_i32(r1
);
2465 t_r2
= tcg_const_i32(r2
);
2466 t_r3
= tcg_const_i32(r3
);
2467 type
= tcg_const_i32(s
->insn
->data
);
2468 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2470 tcg_temp_free_i32(t_r1
);
2471 tcg_temp_free_i32(t_r2
);
2472 tcg_temp_free_i32(t_r3
);
2473 tcg_temp_free_i32(type
);
2477 static ExitStatus
op_keb(DisasContext
*s
, DisasOps
*o
)
2479 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2484 static ExitStatus
op_kdb(DisasContext
*s
, DisasOps
*o
)
2486 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2491 static ExitStatus
op_kxb(DisasContext
*s
, DisasOps
*o
)
2493 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2498 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2500 /* The real output is indeed the original value in memory;
2501 recompute the addition for the computation of CC. */
2502 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2503 s
->insn
->data
| MO_ALIGN
);
2504 /* However, we need to recompute the addition for setting CC. */
2505 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2509 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2511 /* The real output is indeed the original value in memory;
2512 recompute the addition for the computation of CC. */
2513 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2514 s
->insn
->data
| MO_ALIGN
);
2515 /* However, we need to recompute the operation for setting CC. */
2516 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2520 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2522 /* The real output is indeed the original value in memory;
2523 recompute the addition for the computation of CC. */
2524 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2525 s
->insn
->data
| MO_ALIGN
);
2526 /* However, we need to recompute the operation for setting CC. */
2527 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2531 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2533 /* The real output is indeed the original value in memory;
2534 recompute the addition for the computation of CC. */
2535 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2536 s
->insn
->data
| MO_ALIGN
);
2537 /* However, we need to recompute the operation for setting CC. */
2538 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2542 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2544 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2548 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2550 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2554 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2556 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2560 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2562 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2566 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2568 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2569 return_low128(o
->out2
);
2573 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2575 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2576 return_low128(o
->out2
);
2580 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2582 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2586 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2588 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2592 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2594 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2598 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2600 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2604 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2606 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2610 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2612 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2616 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2618 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2622 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2624 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2628 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2630 TCGLabel
*lab
= gen_new_label();
2631 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2632 /* The value is stored even in case of trap. */
2633 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2639 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2641 TCGLabel
*lab
= gen_new_label();
2642 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2643 /* The value is stored even in case of trap. */
2644 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2650 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2652 TCGLabel
*lab
= gen_new_label();
2653 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2654 /* The value is stored even in case of trap. */
2655 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2661 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2663 TCGLabel
*lab
= gen_new_label();
2664 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2665 /* The value is stored even in case of trap. */
2666 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2672 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2674 TCGLabel
*lab
= gen_new_label();
2675 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2676 /* The value is stored even in case of trap. */
2677 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2683 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2687 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2690 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2694 TCGv_i32 t32
= tcg_temp_new_i32();
2697 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2700 t
= tcg_temp_new_i64();
2701 tcg_gen_extu_i32_i64(t
, t32
);
2702 tcg_temp_free_i32(t32
);
2704 z
= tcg_const_i64(0);
2705 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2706 tcg_temp_free_i64(t
);
2707 tcg_temp_free_i64(z
);
2713 #ifndef CONFIG_USER_ONLY
2714 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2716 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2717 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2718 check_privileged(s
);
2719 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2720 tcg_temp_free_i32(r1
);
2721 tcg_temp_free_i32(r3
);
2725 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2727 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2728 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2729 check_privileged(s
);
2730 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2731 tcg_temp_free_i32(r1
);
2732 tcg_temp_free_i32(r3
);
2736 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2738 check_privileged(s
);
2739 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2744 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2746 check_privileged(s
);
2748 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2752 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2756 check_privileged(s
);
2757 per_breaking_event(s
);
2759 t1
= tcg_temp_new_i64();
2760 t2
= tcg_temp_new_i64();
2761 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2762 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2763 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2764 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2765 tcg_gen_shli_i64(t1
, t1
, 32);
2766 gen_helper_load_psw(cpu_env
, t1
, t2
);
2767 tcg_temp_free_i64(t1
);
2768 tcg_temp_free_i64(t2
);
2769 return EXIT_NORETURN
;
2772 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2776 check_privileged(s
);
2777 per_breaking_event(s
);
2779 t1
= tcg_temp_new_i64();
2780 t2
= tcg_temp_new_i64();
2781 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2782 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2783 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2784 gen_helper_load_psw(cpu_env
, t1
, t2
);
2785 tcg_temp_free_i64(t1
);
2786 tcg_temp_free_i64(t2
);
2787 return EXIT_NORETURN
;
2791 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2793 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2794 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2795 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2796 tcg_temp_free_i32(r1
);
2797 tcg_temp_free_i32(r3
);
2801 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2803 int r1
= get_field(s
->fields
, r1
);
2804 int r3
= get_field(s
->fields
, r3
);
2807 /* Only one register to read. */
2808 t1
= tcg_temp_new_i64();
2809 if (unlikely(r1
== r3
)) {
2810 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2811 store_reg32_i64(r1
, t1
);
2816 /* First load the values of the first and last registers to trigger
2817 possible page faults. */
2818 t2
= tcg_temp_new_i64();
2819 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2820 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2821 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2822 store_reg32_i64(r1
, t1
);
2823 store_reg32_i64(r3
, t2
);
2825 /* Only two registers to read. */
2826 if (((r1
+ 1) & 15) == r3
) {
2832 /* Then load the remaining registers. Page fault can't occur. */
2834 tcg_gen_movi_i64(t2
, 4);
2837 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2838 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2839 store_reg32_i64(r1
, t1
);
2847 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2849 int r1
= get_field(s
->fields
, r1
);
2850 int r3
= get_field(s
->fields
, r3
);
2853 /* Only one register to read. */
2854 t1
= tcg_temp_new_i64();
2855 if (unlikely(r1
== r3
)) {
2856 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2857 store_reg32h_i64(r1
, t1
);
2862 /* First load the values of the first and last registers to trigger
2863 possible page faults. */
2864 t2
= tcg_temp_new_i64();
2865 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2866 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2867 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2868 store_reg32h_i64(r1
, t1
);
2869 store_reg32h_i64(r3
, t2
);
2871 /* Only two registers to read. */
2872 if (((r1
+ 1) & 15) == r3
) {
2878 /* Then load the remaining registers. Page fault can't occur. */
2880 tcg_gen_movi_i64(t2
, 4);
2883 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2884 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2885 store_reg32h_i64(r1
, t1
);
2893 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2895 int r1
= get_field(s
->fields
, r1
);
2896 int r3
= get_field(s
->fields
, r3
);
2899 /* Only one register to read. */
2900 if (unlikely(r1
== r3
)) {
2901 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2905 /* First load the values of the first and last registers to trigger
2906 possible page faults. */
2907 t1
= tcg_temp_new_i64();
2908 t2
= tcg_temp_new_i64();
2909 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2910 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2911 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2912 tcg_gen_mov_i64(regs
[r1
], t1
);
2915 /* Only two registers to read. */
2916 if (((r1
+ 1) & 15) == r3
) {
2921 /* Then load the remaining registers. Page fault can't occur. */
2923 tcg_gen_movi_i64(t1
, 8);
2926 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2927 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2934 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2937 TCGMemOp mop
= s
->insn
->data
;
2939 /* In a parallel context, stop the world and single step. */
2940 if (parallel_cpus
) {
2941 potential_page_fault(s
);
2942 gen_exception(EXCP_ATOMIC
);
2943 return EXIT_NORETURN
;
2946 /* In a serial context, perform the two loads ... */
2947 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2948 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2949 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2950 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2951 tcg_temp_free_i64(a1
);
2952 tcg_temp_free_i64(a2
);
2954 /* ... and indicate that we performed them while interlocked. */
2955 gen_op_movi_cc(s
, 0);
2959 static ExitStatus
op_lpq(DisasContext
*s
, DisasOps
*o
)
2961 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
2962 return_low128(o
->out2
);
2966 #ifndef CONFIG_USER_ONLY
2967 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2969 check_privileged(s
);
2970 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2974 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2976 check_privileged(s
);
2977 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2982 static ExitStatus
op_lzrb(DisasContext
*s
, DisasOps
*o
)
2984 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
2988 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2991 o
->g_out
= o
->g_in2
;
2992 TCGV_UNUSED_I64(o
->in2
);
2997 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2999 int b2
= get_field(s
->fields
, b2
);
3000 TCGv ar1
= tcg_temp_new_i64();
3003 o
->g_out
= o
->g_in2
;
3004 TCGV_UNUSED_I64(o
->in2
);
3007 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
3008 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3009 tcg_gen_movi_i64(ar1
, 0);
3011 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3012 tcg_gen_movi_i64(ar1
, 1);
3014 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3016 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3018 tcg_gen_movi_i64(ar1
, 0);
3021 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3022 tcg_gen_movi_i64(ar1
, 2);
3026 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3027 tcg_temp_free_i64(ar1
);
3032 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
3036 o
->g_out
= o
->g_in1
;
3037 o
->g_out2
= o
->g_in2
;
3038 TCGV_UNUSED_I64(o
->in1
);
3039 TCGV_UNUSED_I64(o
->in2
);
3040 o
->g_in1
= o
->g_in2
= false;
3044 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
3046 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3047 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3048 tcg_temp_free_i32(l
);
3052 static ExitStatus
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3054 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3055 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3056 tcg_temp_free_i32(l
);
3060 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3062 int r1
= get_field(s
->fields
, r1
);
3063 int r2
= get_field(s
->fields
, r2
);
3066 /* r1 and r2 must be even. */
3067 if (r1
& 1 || r2
& 1) {
3068 gen_program_exception(s
, PGM_SPECIFICATION
);
3069 return EXIT_NORETURN
;
3072 t1
= tcg_const_i32(r1
);
3073 t2
= tcg_const_i32(r2
);
3074 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3075 tcg_temp_free_i32(t1
);
3076 tcg_temp_free_i32(t2
);
3081 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3083 int r1
= get_field(s
->fields
, r1
);
3084 int r3
= get_field(s
->fields
, r3
);
3087 /* r1 and r3 must be even. */
3088 if (r1
& 1 || r3
& 1) {
3089 gen_program_exception(s
, PGM_SPECIFICATION
);
3090 return EXIT_NORETURN
;
3093 t1
= tcg_const_i32(r1
);
3094 t3
= tcg_const_i32(r3
);
3095 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3096 tcg_temp_free_i32(t1
);
3097 tcg_temp_free_i32(t3
);
3102 static ExitStatus
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3104 int r1
= get_field(s
->fields
, r1
);
3105 int r3
= get_field(s
->fields
, r3
);
3108 /* r1 and r3 must be even. */
3109 if (r1
& 1 || r3
& 1) {
3110 gen_program_exception(s
, PGM_SPECIFICATION
);
3111 return EXIT_NORETURN
;
3114 t1
= tcg_const_i32(r1
);
3115 t3
= tcg_const_i32(r3
);
3116 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3117 tcg_temp_free_i32(t1
);
3118 tcg_temp_free_i32(t3
);
3123 static ExitStatus
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3125 int r3
= get_field(s
->fields
, r3
);
3126 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3131 #ifndef CONFIG_USER_ONLY
3132 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3134 int r1
= get_field(s
->fields
, l1
);
3135 check_privileged(s
);
3136 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3141 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3143 int r1
= get_field(s
->fields
, l1
);
3144 check_privileged(s
);
3145 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3151 static ExitStatus
op_mvn(DisasContext
*s
, DisasOps
*o
)
3153 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3154 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3155 tcg_temp_free_i32(l
);
3159 static ExitStatus
op_mvo(DisasContext
*s
, DisasOps
*o
)
3161 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3162 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3163 tcg_temp_free_i32(l
);
3167 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3169 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3174 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
3176 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3178 return_low128(o
->in2
);
3182 static ExitStatus
op_mvz(DisasContext
*s
, DisasOps
*o
)
3184 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3185 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3186 tcg_temp_free_i32(l
);
3190 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3192 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3196 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3198 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3202 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
3204 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3208 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3210 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3214 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
3216 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3220 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
3222 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3223 return_low128(o
->out2
);
3227 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3229 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3230 return_low128(o
->out2
);
3234 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
3236 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3237 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3238 tcg_temp_free_i64(r3
);
3242 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
3244 int r3
= get_field(s
->fields
, r3
);
3245 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3249 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
3251 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3252 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3253 tcg_temp_free_i64(r3
);
3257 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3259 int r3
= get_field(s
->fields
, r3
);
3260 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3264 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3267 z
= tcg_const_i64(0);
3268 n
= tcg_temp_new_i64();
3269 tcg_gen_neg_i64(n
, o
->in2
);
3270 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3271 tcg_temp_free_i64(n
);
3272 tcg_temp_free_i64(z
);
3276 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3278 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3282 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3284 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3288 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3290 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3291 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3295 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3297 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3298 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3299 tcg_temp_free_i32(l
);
3304 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3306 tcg_gen_neg_i64(o
->out
, o
->in2
);
3310 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3312 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3316 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3318 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3322 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3324 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3325 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3329 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3331 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3332 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3333 tcg_temp_free_i32(l
);
3338 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3340 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3344 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3346 int shift
= s
->insn
->data
& 0xff;
3347 int size
= s
->insn
->data
>> 8;
3348 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3351 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3352 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3354 /* Produce the CC from only the bits manipulated. */
3355 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3356 set_cc_nz_u64(s
, cc_dst
);
3360 static ExitStatus
op_pack(DisasContext
*s
, DisasOps
*o
)
3362 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3363 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3364 tcg_temp_free_i32(l
);
3368 static ExitStatus
op_pka(DisasContext
*s
, DisasOps
*o
)
3370 int l2
= get_field(s
->fields
, l2
) + 1;
3373 /* The length must not exceed 32 bytes. */
3375 gen_program_exception(s
, PGM_SPECIFICATION
);
3376 return EXIT_NORETURN
;
3378 l
= tcg_const_i32(l2
);
3379 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3380 tcg_temp_free_i32(l
);
3384 static ExitStatus
op_pku(DisasContext
*s
, DisasOps
*o
)
3386 int l2
= get_field(s
->fields
, l2
) + 1;
3389 /* The length must be even and should not exceed 64 bytes. */
3390 if ((l2
& 1) || (l2
> 64)) {
3391 gen_program_exception(s
, PGM_SPECIFICATION
);
3392 return EXIT_NORETURN
;
3394 l
= tcg_const_i32(l2
);
3395 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3396 tcg_temp_free_i32(l
);
3400 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3402 gen_helper_popcnt(o
->out
, o
->in2
);
3406 #ifndef CONFIG_USER_ONLY
3407 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3409 check_privileged(s
);
3410 gen_helper_ptlb(cpu_env
);
3415 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3417 int i3
= get_field(s
->fields
, i3
);
3418 int i4
= get_field(s
->fields
, i4
);
3419 int i5
= get_field(s
->fields
, i5
);
3420 int do_zero
= i4
& 0x80;
3421 uint64_t mask
, imask
, pmask
;
3424 /* Adjust the arguments for the specific insn. */
3425 switch (s
->fields
->op2
) {
3426 case 0x55: /* risbg */
3431 case 0x5d: /* risbhg */
3434 pmask
= 0xffffffff00000000ull
;
3436 case 0x51: /* risblg */
3439 pmask
= 0x00000000ffffffffull
;
3445 /* MASK is the set of bits to be inserted from R2.
3446 Take care for I3/I4 wraparound. */
3449 mask
^= pmask
>> i4
>> 1;
3451 mask
|= ~(pmask
>> i4
>> 1);
3455 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3456 insns, we need to keep the other half of the register. */
3457 imask
= ~mask
| ~pmask
;
3459 if (s
->fields
->op2
== 0x55) {
3469 if (s
->fields
->op2
== 0x5d) {
3473 /* In some cases we can implement this with extract. */
3474 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3475 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3479 /* In some cases we can implement this with deposit. */
3480 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3481 /* Note that we rotate the bits to be inserted to the lsb, not to
3482 the position as described in the PoO. */
3483 rot
= (rot
- pos
) & 63;
3488 /* Rotate the input as necessary. */
3489 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3491 /* Insert the selected bits into the output. */
3494 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3496 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3498 } else if (imask
== 0) {
3499 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3501 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3502 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3503 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3508 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3510 int i3
= get_field(s
->fields
, i3
);
3511 int i4
= get_field(s
->fields
, i4
);
3512 int i5
= get_field(s
->fields
, i5
);
3515 /* If this is a test-only form, arrange to discard the result. */
3517 o
->out
= tcg_temp_new_i64();
3525 /* MASK is the set of bits to be operated on from R2.
3526 Take care for I3/I4 wraparound. */
3529 mask
^= ~0ull >> i4
>> 1;
3531 mask
|= ~(~0ull >> i4
>> 1);
3534 /* Rotate the input as necessary. */
3535 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3538 switch (s
->fields
->op2
) {
3539 case 0x55: /* AND */
3540 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3541 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3544 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3545 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3547 case 0x57: /* XOR */
3548 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3549 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3556 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3557 set_cc_nz_u64(s
, cc_dst
);
3561 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3563 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3567 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3569 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3573 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3575 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3579 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3581 TCGv_i32 t1
= tcg_temp_new_i32();
3582 TCGv_i32 t2
= tcg_temp_new_i32();
3583 TCGv_i32 to
= tcg_temp_new_i32();
3584 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3585 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3586 tcg_gen_rotl_i32(to
, t1
, t2
);
3587 tcg_gen_extu_i32_i64(o
->out
, to
);
3588 tcg_temp_free_i32(t1
);
3589 tcg_temp_free_i32(t2
);
3590 tcg_temp_free_i32(to
);
3594 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3596 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3600 #ifndef CONFIG_USER_ONLY
3601 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3603 check_privileged(s
);
3604 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3609 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3611 check_privileged(s
);
3612 gen_helper_sacf(cpu_env
, o
->in2
);
3613 /* Addressing mode has changed, so end the block. */
3614 return EXIT_PC_STALE
;
3618 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3620 int sam
= s
->insn
->data
;
3636 /* Bizarre but true, we check the address of the current insn for the
3637 specification exception, not the next to be executed. Thus the PoO
3638 documents that Bad Things Happen two bytes before the end. */
3639 if (s
->pc
& ~mask
) {
3640 gen_program_exception(s
, PGM_SPECIFICATION
);
3641 return EXIT_NORETURN
;
3645 tsam
= tcg_const_i64(sam
);
3646 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3647 tcg_temp_free_i64(tsam
);
3649 /* Always exit the TB, since we (may have) changed execution mode. */
3650 return EXIT_PC_STALE
;
3653 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3655 int r1
= get_field(s
->fields
, r1
);
3656 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3660 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3662 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3666 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3668 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3672 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3674 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3675 return_low128(o
->out2
);
3679 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3681 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3685 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3687 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3691 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3693 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3694 return_low128(o
->out2
);
3698 #ifndef CONFIG_USER_ONLY
3699 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3701 check_privileged(s
);
3702 potential_page_fault(s
);
3703 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3708 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3710 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3711 check_privileged(s
);
3712 potential_page_fault(s
);
3713 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3715 tcg_temp_free_i32(r1
);
3720 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3727 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3729 /* We want to store when the condition is fulfilled, so branch
3730 out when it's not */
3731 c
.cond
= tcg_invert_cond(c
.cond
);
3733 lab
= gen_new_label();
3735 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3737 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3741 r1
= get_field(s
->fields
, r1
);
3742 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3743 switch (s
->insn
->data
) {
3745 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3748 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3750 case 2: /* STOCFH */
3751 h
= tcg_temp_new_i64();
3752 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3753 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3754 tcg_temp_free_i64(h
);
3757 g_assert_not_reached();
3759 tcg_temp_free_i64(a
);
3765 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3767 uint64_t sign
= 1ull << s
->insn
->data
;
3768 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3769 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3770 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3771 /* The arithmetic left shift is curious in that it does not affect
3772 the sign bit. Copy that over from the source unchanged. */
3773 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3774 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3775 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3779 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3781 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3785 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3787 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3791 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3793 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3797 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3799 gen_helper_sfpc(cpu_env
, o
->in2
);
3803 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3805 gen_helper_sfas(cpu_env
, o
->in2
);
3809 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3811 int b2
= get_field(s
->fields
, b2
);
3812 int d2
= get_field(s
->fields
, d2
);
3813 TCGv_i64 t1
= tcg_temp_new_i64();
3814 TCGv_i64 t2
= tcg_temp_new_i64();
3817 switch (s
->fields
->op2
) {
3818 case 0x99: /* SRNM */
3821 case 0xb8: /* SRNMB */
3824 case 0xb9: /* SRNMT */
3830 mask
= (1 << len
) - 1;
3832 /* Insert the value into the appropriate field of the FPC. */
3834 tcg_gen_movi_i64(t1
, d2
& mask
);
3836 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3837 tcg_gen_andi_i64(t1
, t1
, mask
);
3839 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3840 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3841 tcg_temp_free_i64(t1
);
3843 /* Then install the new FPC to set the rounding mode in fpu_status. */
3844 gen_helper_sfpc(cpu_env
, t2
);
3845 tcg_temp_free_i64(t2
);
3849 static ExitStatus
op_spm(DisasContext
*s
, DisasOps
*o
)
3851 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
3852 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
3855 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
3856 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
3860 #ifndef CONFIG_USER_ONLY
3861 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3863 check_privileged(s
);
3864 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3865 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3869 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3871 check_privileged(s
);
3872 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3876 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3878 check_privileged(s
);
3879 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3880 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3881 return EXIT_PC_STALE_NOCHAIN
;
3884 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3886 check_privileged(s
);
3887 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
3891 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3893 gen_helper_stck(o
->out
, cpu_env
);
3894 /* ??? We don't implement clock states. */
3895 gen_op_movi_cc(s
, 0);
3899 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3901 TCGv_i64 c1
= tcg_temp_new_i64();
3902 TCGv_i64 c2
= tcg_temp_new_i64();
3903 gen_helper_stck(c1
, cpu_env
);
3904 /* Shift the 64-bit value into its place as a zero-extended
3905 104-bit value. Note that "bit positions 64-103 are always
3906 non-zero so that they compare differently to STCK"; we set
3907 the least significant bit to 1. */
3908 tcg_gen_shli_i64(c2
, c1
, 56);
3909 tcg_gen_shri_i64(c1
, c1
, 8);
3910 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3911 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3912 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3913 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3914 tcg_temp_free_i64(c1
);
3915 tcg_temp_free_i64(c2
);
3916 /* ??? We don't implement clock states. */
3917 gen_op_movi_cc(s
, 0);
3921 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3923 check_privileged(s
);
3924 gen_helper_sckc(cpu_env
, o
->in2
);
3928 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3930 check_privileged(s
);
3931 gen_helper_stckc(o
->out
, cpu_env
);
3935 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3937 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3938 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3939 check_privileged(s
);
3940 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3941 tcg_temp_free_i32(r1
);
3942 tcg_temp_free_i32(r3
);
3946 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3948 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3949 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3950 check_privileged(s
);
3951 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3952 tcg_temp_free_i32(r1
);
3953 tcg_temp_free_i32(r3
);
3957 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3959 check_privileged(s
);
3960 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
3961 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
3965 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3967 check_privileged(s
);
3968 gen_helper_spt(cpu_env
, o
->in2
);
3972 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3974 check_privileged(s
);
3975 gen_helper_stfl(cpu_env
);
3979 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3981 check_privileged(s
);
3982 gen_helper_stpt(o
->out
, cpu_env
);
3986 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3988 check_privileged(s
);
3989 potential_page_fault(s
);
3990 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3995 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3997 check_privileged(s
);
3998 gen_helper_spx(cpu_env
, o
->in2
);
4002 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
4004 check_privileged(s
);
4005 potential_page_fault(s
);
4006 gen_helper_xsch(cpu_env
, regs
[1]);
4011 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
4013 check_privileged(s
);
4014 potential_page_fault(s
);
4015 gen_helper_csch(cpu_env
, regs
[1]);
4020 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
4022 check_privileged(s
);
4023 potential_page_fault(s
);
4024 gen_helper_hsch(cpu_env
, regs
[1]);
4029 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
4031 check_privileged(s
);
4032 potential_page_fault(s
);
4033 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4038 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
4040 check_privileged(s
);
4041 potential_page_fault(s
);
4042 gen_helper_rchp(cpu_env
, regs
[1]);
4047 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
4049 check_privileged(s
);
4050 potential_page_fault(s
);
4051 gen_helper_rsch(cpu_env
, regs
[1]);
4056 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
4058 check_privileged(s
);
4059 potential_page_fault(s
);
4060 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4065 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
4067 check_privileged(s
);
4068 potential_page_fault(s
);
4069 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4074 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
4076 check_privileged(s
);
4077 potential_page_fault(s
);
4078 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4083 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
4085 check_privileged(s
);
4086 potential_page_fault(s
);
4087 gen_helper_chsc(cpu_env
, o
->in2
);
4092 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
4094 check_privileged(s
);
4095 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4096 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4100 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4102 uint64_t i2
= get_field(s
->fields
, i2
);
4105 check_privileged(s
);
4107 /* It is important to do what the instruction name says: STORE THEN.
4108 If we let the output hook perform the store then if we fault and
4109 restart, we'll have the wrong SYSTEM MASK in place. */
4110 t
= tcg_temp_new_i64();
4111 tcg_gen_shri_i64(t
, psw_mask
, 56);
4112 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4113 tcg_temp_free_i64(t
);
4115 if (s
->fields
->op
== 0xac) {
4116 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4117 (i2
<< 56) | 0x00ffffffffffffffull
);
4119 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4122 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4123 return EXIT_PC_STALE_NOCHAIN
;
4126 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
4128 check_privileged(s
);
4129 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4133 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
4135 check_privileged(s
);
4136 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4141 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
4143 potential_page_fault(s
);
4144 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4149 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
4151 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4155 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
4157 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4161 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
4163 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4167 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
4169 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4173 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
4175 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4176 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4177 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4178 tcg_temp_free_i32(r1
);
4179 tcg_temp_free_i32(r3
);
4183 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
4185 int m3
= get_field(s
->fields
, m3
);
4186 int pos
, base
= s
->insn
->data
;
4187 TCGv_i64 tmp
= tcg_temp_new_i64();
4189 pos
= base
+ ctz32(m3
) * 8;
4192 /* Effectively a 32-bit store. */
4193 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4194 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4200 /* Effectively a 16-bit store. */
4201 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4202 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4209 /* Effectively an 8-bit store. */
4210 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4211 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4215 /* This is going to be a sequence of shifts and stores. */
4216 pos
= base
+ 32 - 8;
4219 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4220 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4221 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4223 m3
= (m3
<< 1) & 0xf;
4228 tcg_temp_free_i64(tmp
);
4232 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
4234 int r1
= get_field(s
->fields
, r1
);
4235 int r3
= get_field(s
->fields
, r3
);
4236 int size
= s
->insn
->data
;
4237 TCGv_i64 tsize
= tcg_const_i64(size
);
4241 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4243 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4248 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4252 tcg_temp_free_i64(tsize
);
4256 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
4258 int r1
= get_field(s
->fields
, r1
);
4259 int r3
= get_field(s
->fields
, r3
);
4260 TCGv_i64 t
= tcg_temp_new_i64();
4261 TCGv_i64 t4
= tcg_const_i64(4);
4262 TCGv_i64 t32
= tcg_const_i64(32);
4265 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4266 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4270 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4274 tcg_temp_free_i64(t
);
4275 tcg_temp_free_i64(t4
);
4276 tcg_temp_free_i64(t32
);
4280 static ExitStatus
op_stpq(DisasContext
*s
, DisasOps
*o
)
4282 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4286 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
4288 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4289 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4291 gen_helper_srst(cpu_env
, r1
, r2
);
4293 tcg_temp_free_i32(r1
);
4294 tcg_temp_free_i32(r2
);
4299 static ExitStatus
op_srstu(DisasContext
*s
, DisasOps
*o
)
4301 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4302 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4304 gen_helper_srstu(cpu_env
, r1
, r2
);
4306 tcg_temp_free_i32(r1
);
4307 tcg_temp_free_i32(r2
);
4312 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
4314 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4318 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
4323 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4325 /* The !borrow flag is the msb of CC. Since we want the inverse of
4326 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4327 disas_jcc(s
, &cmp
, 8 | 4);
4328 borrow
= tcg_temp_new_i64();
4330 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4332 TCGv_i32 t
= tcg_temp_new_i32();
4333 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4334 tcg_gen_extu_i32_i64(borrow
, t
);
4335 tcg_temp_free_i32(t
);
4339 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4340 tcg_temp_free_i64(borrow
);
4344 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4351 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4352 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4353 tcg_temp_free_i32(t
);
4355 t
= tcg_const_i32(s
->ilen
);
4356 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4357 tcg_temp_free_i32(t
);
4359 gen_exception(EXCP_SVC
);
4360 return EXIT_NORETURN
;
4363 static ExitStatus
op_tam(DisasContext
*s
, DisasOps
*o
)
4367 cc
|= (s
->tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4368 cc
|= (s
->tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4369 gen_op_movi_cc(s
, cc
);
4373 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4375 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4380 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4382 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4387 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4389 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4394 #ifndef CONFIG_USER_ONLY
4396 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4398 check_privileged(s
);
4399 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4404 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4406 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4413 static ExitStatus
op_tp(DisasContext
*s
, DisasOps
*o
)
4415 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4416 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4417 tcg_temp_free_i32(l1
);
4422 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4424 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4425 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4426 tcg_temp_free_i32(l
);
4431 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4433 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4434 return_low128(o
->out2
);
4439 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4441 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4442 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4443 tcg_temp_free_i32(l
);
4448 static ExitStatus
op_trtr(DisasContext
*s
, DisasOps
*o
)
4450 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4451 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4452 tcg_temp_free_i32(l
);
4457 static ExitStatus
op_trXX(DisasContext
*s
, DisasOps
*o
)
4459 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4460 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4461 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4462 TCGv_i32 tst
= tcg_temp_new_i32();
4463 int m3
= get_field(s
->fields
, m3
);
4465 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4469 tcg_gen_movi_i32(tst
, -1);
4471 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4472 if (s
->insn
->opc
& 3) {
4473 tcg_gen_ext8u_i32(tst
, tst
);
4475 tcg_gen_ext16u_i32(tst
, tst
);
4478 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4480 tcg_temp_free_i32(r1
);
4481 tcg_temp_free_i32(r2
);
4482 tcg_temp_free_i32(sizes
);
4483 tcg_temp_free_i32(tst
);
4488 static ExitStatus
op_ts(DisasContext
*s
, DisasOps
*o
)
4490 TCGv_i32 t1
= tcg_const_i32(0xff);
4491 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4492 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4493 tcg_temp_free_i32(t1
);
4498 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4500 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4501 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4502 tcg_temp_free_i32(l
);
4506 static ExitStatus
op_unpka(DisasContext
*s
, DisasOps
*o
)
4508 int l1
= get_field(s
->fields
, l1
) + 1;
4511 /* The length must not exceed 32 bytes. */
4513 gen_program_exception(s
, PGM_SPECIFICATION
);
4514 return EXIT_NORETURN
;
4516 l
= tcg_const_i32(l1
);
4517 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4518 tcg_temp_free_i32(l
);
4523 static ExitStatus
op_unpku(DisasContext
*s
, DisasOps
*o
)
4525 int l1
= get_field(s
->fields
, l1
) + 1;
4528 /* The length must be even and should not exceed 64 bytes. */
4529 if ((l1
& 1) || (l1
> 64)) {
4530 gen_program_exception(s
, PGM_SPECIFICATION
);
4531 return EXIT_NORETURN
;
4533 l
= tcg_const_i32(l1
);
4534 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4535 tcg_temp_free_i32(l
);
4541 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4543 int d1
= get_field(s
->fields
, d1
);
4544 int d2
= get_field(s
->fields
, d2
);
4545 int b1
= get_field(s
->fields
, b1
);
4546 int b2
= get_field(s
->fields
, b2
);
4547 int l
= get_field(s
->fields
, l1
);
4550 o
->addr1
= get_address(s
, 0, b1
, d1
);
4552 /* If the addresses are identical, this is a store/memset of zero. */
4553 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4554 o
->in2
= tcg_const_i64(0);
4558 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4561 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4565 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4568 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4572 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4575 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4579 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4581 gen_op_movi_cc(s
, 0);
4585 /* But in general we'll defer to a helper. */
4586 o
->in2
= get_address(s
, 0, b2
, d2
);
4587 t32
= tcg_const_i32(l
);
4588 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4589 tcg_temp_free_i32(t32
);
4594 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4596 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4600 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4602 int shift
= s
->insn
->data
& 0xff;
4603 int size
= s
->insn
->data
>> 8;
4604 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4607 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4608 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4610 /* Produce the CC from only the bits manipulated. */
4611 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4612 set_cc_nz_u64(s
, cc_dst
);
4616 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4618 o
->out
= tcg_const_i64(0);
4622 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4624 o
->out
= tcg_const_i64(0);
4630 /* ====================================================================== */
4631 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4632 the original inputs), update the various cc data structures in order to
4633 be able to compute the new condition code. */
4635 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4637 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4640 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4642 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4645 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4647 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4650 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4652 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4655 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4657 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4660 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4662 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4665 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4667 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4670 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4672 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4675 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4677 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4680 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4682 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4685 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4687 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4690 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4692 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4695 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4697 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4700 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4702 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4705 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4707 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4710 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4712 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4715 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4717 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4720 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4722 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4725 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4727 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4730 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4732 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4733 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4736 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4738 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4741 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4743 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4746 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4748 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4751 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4753 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4756 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4758 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4761 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4763 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4766 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4768 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4771 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4773 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4776 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4778 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4781 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4783 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4786 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4788 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4791 /* ====================================================================== */
4792 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4793 with the TCG register to which we will write. Used in combination with
4794 the "wout" generators, in some cases we need a new temporary, and in
4795 some cases we can write to a TCG global. */
4797 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4799 o
->out
= tcg_temp_new_i64();
4801 #define SPEC_prep_new 0
4803 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4805 o
->out
= tcg_temp_new_i64();
4806 o
->out2
= tcg_temp_new_i64();
4808 #define SPEC_prep_new_P 0
4810 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4812 o
->out
= regs
[get_field(f
, r1
)];
4815 #define SPEC_prep_r1 0
4817 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4819 int r1
= get_field(f
, r1
);
4821 o
->out2
= regs
[r1
+ 1];
4822 o
->g_out
= o
->g_out2
= true;
4824 #define SPEC_prep_r1_P SPEC_r1_even
4826 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4828 o
->out
= fregs
[get_field(f
, r1
)];
4831 #define SPEC_prep_f1 0
4833 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4835 int r1
= get_field(f
, r1
);
4837 o
->out2
= fregs
[r1
+ 2];
4838 o
->g_out
= o
->g_out2
= true;
4840 #define SPEC_prep_x1 SPEC_r1_f128
4842 /* ====================================================================== */
4843 /* The "Write OUTput" generators. These generally perform some non-trivial
4844 copy of data to TCG globals, or to main memory. The trivial cases are
4845 generally handled by having a "prep" generator install the TCG global
4846 as the destination of the operation. */
4848 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4850 store_reg(get_field(f
, r1
), o
->out
);
4852 #define SPEC_wout_r1 0
4854 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4856 int r1
= get_field(f
, r1
);
4857 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4859 #define SPEC_wout_r1_8 0
4861 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4863 int r1
= get_field(f
, r1
);
4864 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4866 #define SPEC_wout_r1_16 0
4868 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4870 store_reg32_i64(get_field(f
, r1
), o
->out
);
4872 #define SPEC_wout_r1_32 0
4874 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4876 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4878 #define SPEC_wout_r1_32h 0
4880 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4882 int r1
= get_field(f
, r1
);
4883 store_reg32_i64(r1
, o
->out
);
4884 store_reg32_i64(r1
+ 1, o
->out2
);
4886 #define SPEC_wout_r1_P32 SPEC_r1_even
4888 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4890 int r1
= get_field(f
, r1
);
4891 store_reg32_i64(r1
+ 1, o
->out
);
4892 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4893 store_reg32_i64(r1
, o
->out
);
4895 #define SPEC_wout_r1_D32 SPEC_r1_even
4897 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4899 int r3
= get_field(f
, r3
);
4900 store_reg32_i64(r3
, o
->out
);
4901 store_reg32_i64(r3
+ 1, o
->out2
);
4903 #define SPEC_wout_r3_P32 SPEC_r3_even
4905 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4907 int r3
= get_field(f
, r3
);
4908 store_reg(r3
, o
->out
);
4909 store_reg(r3
+ 1, o
->out2
);
4911 #define SPEC_wout_r3_P64 SPEC_r3_even
4913 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4915 store_freg32_i64(get_field(f
, r1
), o
->out
);
4917 #define SPEC_wout_e1 0
4919 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4921 store_freg(get_field(f
, r1
), o
->out
);
4923 #define SPEC_wout_f1 0
4925 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4927 int f1
= get_field(s
->fields
, r1
);
4928 store_freg(f1
, o
->out
);
4929 store_freg(f1
+ 2, o
->out2
);
4931 #define SPEC_wout_x1 SPEC_r1_f128
4933 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4935 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4936 store_reg32_i64(get_field(f
, r1
), o
->out
);
4939 #define SPEC_wout_cond_r1r2_32 0
4941 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4943 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4944 store_freg32_i64(get_field(f
, r1
), o
->out
);
4947 #define SPEC_wout_cond_e1e2 0
4949 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4951 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4953 #define SPEC_wout_m1_8 0
4955 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4957 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4959 #define SPEC_wout_m1_16 0
4961 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4963 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4965 #define SPEC_wout_m1_32 0
4967 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4969 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4971 #define SPEC_wout_m1_64 0
4973 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4975 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4977 #define SPEC_wout_m2_32 0
4979 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4981 store_reg(get_field(f
, r1
), o
->in2
);
4983 #define SPEC_wout_in2_r1 0
4985 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4987 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4989 #define SPEC_wout_in2_r1_32 0
4991 /* ====================================================================== */
4992 /* The "INput 1" generators. These load the first operand to an insn. */
4994 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4996 o
->in1
= load_reg(get_field(f
, r1
));
4998 #define SPEC_in1_r1 0
5000 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5002 o
->in1
= regs
[get_field(f
, r1
)];
5005 #define SPEC_in1_r1_o 0
5007 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5009 o
->in1
= tcg_temp_new_i64();
5010 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5012 #define SPEC_in1_r1_32s 0
5014 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5016 o
->in1
= tcg_temp_new_i64();
5017 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5019 #define SPEC_in1_r1_32u 0
5021 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5023 o
->in1
= tcg_temp_new_i64();
5024 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
5026 #define SPEC_in1_r1_sr32 0
5028 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5030 o
->in1
= load_reg(get_field(f
, r1
) + 1);
5032 #define SPEC_in1_r1p1 SPEC_r1_even
5034 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5036 o
->in1
= tcg_temp_new_i64();
5037 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5039 #define SPEC_in1_r1p1_32s SPEC_r1_even
5041 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5043 o
->in1
= tcg_temp_new_i64();
5044 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5046 #define SPEC_in1_r1p1_32u SPEC_r1_even
5048 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5050 int r1
= get_field(f
, r1
);
5051 o
->in1
= tcg_temp_new_i64();
5052 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5054 #define SPEC_in1_r1_D32 SPEC_r1_even
5056 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5058 o
->in1
= load_reg(get_field(f
, r2
));
5060 #define SPEC_in1_r2 0
5062 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5064 o
->in1
= tcg_temp_new_i64();
5065 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5067 #define SPEC_in1_r2_sr32 0
5069 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5071 o
->in1
= load_reg(get_field(f
, r3
));
5073 #define SPEC_in1_r3 0
5075 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5077 o
->in1
= regs
[get_field(f
, r3
)];
5080 #define SPEC_in1_r3_o 0
5082 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5084 o
->in1
= tcg_temp_new_i64();
5085 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5087 #define SPEC_in1_r3_32s 0
5089 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5091 o
->in1
= tcg_temp_new_i64();
5092 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5094 #define SPEC_in1_r3_32u 0
5096 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5098 int r3
= get_field(f
, r3
);
5099 o
->in1
= tcg_temp_new_i64();
5100 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5102 #define SPEC_in1_r3_D32 SPEC_r3_even
5104 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5106 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5108 #define SPEC_in1_e1 0
5110 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5112 o
->in1
= fregs
[get_field(f
, r1
)];
5115 #define SPEC_in1_f1_o 0
5117 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5119 int r1
= get_field(f
, r1
);
5121 o
->out2
= fregs
[r1
+ 2];
5122 o
->g_out
= o
->g_out2
= true;
5124 #define SPEC_in1_x1_o SPEC_r1_f128
5126 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5128 o
->in1
= fregs
[get_field(f
, r3
)];
5131 #define SPEC_in1_f3_o 0
5133 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5135 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5137 #define SPEC_in1_la1 0
5139 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5141 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5142 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5144 #define SPEC_in1_la2 0
5146 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5149 o
->in1
= tcg_temp_new_i64();
5150 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5152 #define SPEC_in1_m1_8u 0
5154 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5157 o
->in1
= tcg_temp_new_i64();
5158 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5160 #define SPEC_in1_m1_16s 0
5162 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5165 o
->in1
= tcg_temp_new_i64();
5166 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5168 #define SPEC_in1_m1_16u 0
5170 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5173 o
->in1
= tcg_temp_new_i64();
5174 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5176 #define SPEC_in1_m1_32s 0
5178 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5181 o
->in1
= tcg_temp_new_i64();
5182 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5184 #define SPEC_in1_m1_32u 0
5186 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5189 o
->in1
= tcg_temp_new_i64();
5190 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5192 #define SPEC_in1_m1_64 0
5194 /* ====================================================================== */
5195 /* The "INput 2" generators. These load the second operand to an insn. */
5197 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5199 o
->in2
= regs
[get_field(f
, r1
)];
5202 #define SPEC_in2_r1_o 0
5204 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5206 o
->in2
= tcg_temp_new_i64();
5207 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5209 #define SPEC_in2_r1_16u 0
5211 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5213 o
->in2
= tcg_temp_new_i64();
5214 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5216 #define SPEC_in2_r1_32u 0
5218 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5220 int r1
= get_field(f
, r1
);
5221 o
->in2
= tcg_temp_new_i64();
5222 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5224 #define SPEC_in2_r1_D32 SPEC_r1_even
5226 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5228 o
->in2
= load_reg(get_field(f
, r2
));
5230 #define SPEC_in2_r2 0
5232 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5234 o
->in2
= regs
[get_field(f
, r2
)];
5237 #define SPEC_in2_r2_o 0
5239 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5241 int r2
= get_field(f
, r2
);
5243 o
->in2
= load_reg(r2
);
5246 #define SPEC_in2_r2_nz 0
5248 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5250 o
->in2
= tcg_temp_new_i64();
5251 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5253 #define SPEC_in2_r2_8s 0
5255 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5257 o
->in2
= tcg_temp_new_i64();
5258 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5260 #define SPEC_in2_r2_8u 0
5262 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5264 o
->in2
= tcg_temp_new_i64();
5265 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5267 #define SPEC_in2_r2_16s 0
5269 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5271 o
->in2
= tcg_temp_new_i64();
5272 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5274 #define SPEC_in2_r2_16u 0
5276 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5278 o
->in2
= load_reg(get_field(f
, r3
));
5280 #define SPEC_in2_r3 0
5282 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5284 o
->in2
= tcg_temp_new_i64();
5285 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5287 #define SPEC_in2_r3_sr32 0
5289 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5291 o
->in2
= tcg_temp_new_i64();
5292 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5294 #define SPEC_in2_r2_32s 0
5296 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5298 o
->in2
= tcg_temp_new_i64();
5299 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5301 #define SPEC_in2_r2_32u 0
5303 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5305 o
->in2
= tcg_temp_new_i64();
5306 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5308 #define SPEC_in2_r2_sr32 0
5310 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5312 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5314 #define SPEC_in2_e2 0
5316 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5318 o
->in2
= fregs
[get_field(f
, r2
)];
5321 #define SPEC_in2_f2_o 0
5323 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5325 int r2
= get_field(f
, r2
);
5327 o
->in2
= fregs
[r2
+ 2];
5328 o
->g_in1
= o
->g_in2
= true;
5330 #define SPEC_in2_x2_o SPEC_r2_f128
5332 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5334 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5336 #define SPEC_in2_ra2 0
5338 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5340 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5341 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5343 #define SPEC_in2_a2 0
5345 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5347 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
5349 #define SPEC_in2_ri2 0
5351 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5353 help_l2_shift(s
, f
, o
, 31);
5355 #define SPEC_in2_sh32 0
5357 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5359 help_l2_shift(s
, f
, o
, 63);
5361 #define SPEC_in2_sh64 0
5363 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5366 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5368 #define SPEC_in2_m2_8u 0
5370 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5373 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5375 #define SPEC_in2_m2_16s 0
5377 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5380 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5382 #define SPEC_in2_m2_16u 0
5384 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5387 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5389 #define SPEC_in2_m2_32s 0
5391 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5394 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5396 #define SPEC_in2_m2_32u 0
5398 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5401 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5403 #define SPEC_in2_m2_64 0
5405 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5408 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5410 #define SPEC_in2_mri2_16u 0
5412 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5415 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5417 #define SPEC_in2_mri2_32s 0
5419 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5422 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5424 #define SPEC_in2_mri2_32u 0
5426 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5429 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5431 #define SPEC_in2_mri2_64 0
5433 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5435 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5437 #define SPEC_in2_i2 0
5439 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5441 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5443 #define SPEC_in2_i2_8u 0
5445 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5447 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5449 #define SPEC_in2_i2_16u 0
5451 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5453 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5455 #define SPEC_in2_i2_32u 0
5457 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5459 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5460 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5462 #define SPEC_in2_i2_16u_shl 0
5464 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5466 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5467 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5469 #define SPEC_in2_i2_32u_shl 0
5471 #ifndef CONFIG_USER_ONLY
5472 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5474 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5476 #define SPEC_in2_insn 0
5479 /* ====================================================================== */
5481 /* Find opc within the table of insns. This is formulated as a switch
5482 statement so that (1) we get compile-time notice of cut-paste errors
5483 for duplicated opcodes, and (2) the compiler generates the binary
5484 search tree, rather than us having to post-process the table. */
5486 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5487 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5489 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5491 enum DisasInsnEnum
{
5492 #include "insn-data.def"
5496 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5500 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5502 .help_in1 = in1_##I1, \
5503 .help_in2 = in2_##I2, \
5504 .help_prep = prep_##P, \
5505 .help_wout = wout_##W, \
5506 .help_cout = cout_##CC, \
5507 .help_op = op_##OP, \
5511 /* Allow 0 to be used for NULL in the table below. */
5519 #define SPEC_in1_0 0
5520 #define SPEC_in2_0 0
5521 #define SPEC_prep_0 0
5522 #define SPEC_wout_0 0
5524 /* Give smaller names to the various facilities. */
5525 #define FAC_Z S390_FEAT_ZARCH
5526 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5527 #define FAC_DFP S390_FEAT_DFP
5528 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5529 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5530 #define FAC_EE S390_FEAT_EXECUTE_EXT
5531 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5532 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5533 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5534 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5535 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5536 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5537 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5538 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5539 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5540 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5541 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5542 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5543 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5544 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5545 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5546 #define FAC_SFLE S390_FEAT_STFLE
5547 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5548 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5549 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5550 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5551 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5552 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5553 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5554 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5555 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5556 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5557 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5558 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5559 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5561 static const DisasInsn insn_info
[] = {
5562 #include "insn-data.def"
5566 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5567 case OPC: return &insn_info[insn_ ## NM];
5569 static const DisasInsn
*lookup_opc(uint16_t opc
)
5572 #include "insn-data.def"
5581 /* Extract a field from the insn. The INSN should be left-aligned in
5582 the uint64_t so that we can more easily utilize the big-bit-endian
5583 definitions we extract from the Principals of Operation. */
5585 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5593 /* Zero extract the field from the insn. */
5594 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5596 /* Sign-extend, or un-swap the field as necessary. */
5598 case 0: /* unsigned */
5600 case 1: /* signed */
5601 assert(f
->size
<= 32);
5602 m
= 1u << (f
->size
- 1);
5605 case 2: /* dl+dh split, signed 20 bit. */
5606 r
= ((int8_t)r
<< 12) | (r
>> 8);
5612 /* Validate that the "compressed" encoding we selected above is valid.
5613 I.e. we havn't make two different original fields overlap. */
5614 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5615 o
->presentC
|= 1 << f
->indexC
;
5616 o
->presentO
|= 1 << f
->indexO
;
5618 o
->c
[f
->indexC
] = r
;
5621 /* Lookup the insn at the current PC, extracting the operands into O and
5622 returning the info struct for the insn. Returns NULL for invalid insn. */
5624 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5627 uint64_t insn
, pc
= s
->pc
;
5629 const DisasInsn
*info
;
5631 if (unlikely(s
->ex_value
)) {
5632 /* Drop the EX data now, so that it's clear on exception paths. */
5633 TCGv_i64 zero
= tcg_const_i64(0);
5634 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5635 tcg_temp_free_i64(zero
);
5637 /* Extract the values saved by EXECUTE. */
5638 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5639 ilen
= s
->ex_value
& 0xf;
5642 insn
= ld_code2(env
, pc
);
5643 op
= (insn
>> 8) & 0xff;
5644 ilen
= get_ilen(op
);
5650 insn
= ld_code4(env
, pc
) << 32;
5653 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5656 g_assert_not_reached();
5659 s
->next_pc
= s
->pc
+ ilen
;
5662 /* We can't actually determine the insn format until we've looked up
5663 the full insn opcode. Which we can't do without locating the
5664 secondary opcode. Assume by default that OP2 is at bit 40; for
5665 those smaller insns that don't actually have a secondary opcode
5666 this will correctly result in OP2 = 0. */
5672 case 0xb2: /* S, RRF, RRE, IE */
5673 case 0xb3: /* RRE, RRD, RRF */
5674 case 0xb9: /* RRE, RRF */
5675 case 0xe5: /* SSE, SIL */
5676 op2
= (insn
<< 8) >> 56;
5680 case 0xc0: /* RIL */
5681 case 0xc2: /* RIL */
5682 case 0xc4: /* RIL */
5683 case 0xc6: /* RIL */
5684 case 0xc8: /* SSF */
5685 case 0xcc: /* RIL */
5686 op2
= (insn
<< 12) >> 60;
5688 case 0xc5: /* MII */
5689 case 0xc7: /* SMI */
5690 case 0xd0 ... 0xdf: /* SS */
5696 case 0xee ... 0xf3: /* SS */
5697 case 0xf8 ... 0xfd: /* SS */
5701 op2
= (insn
<< 40) >> 56;
5705 memset(f
, 0, sizeof(*f
));
5710 /* Lookup the instruction. */
5711 info
= lookup_opc(op
<< 8 | op2
);
5713 /* If we found it, extract the operands. */
5715 DisasFormat fmt
= info
->fmt
;
5718 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5719 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5725 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5727 const DisasInsn
*insn
;
5728 ExitStatus ret
= NO_EXIT
;
5732 /* Search for the insn in the table. */
5733 insn
= extract_insn(env
, s
, &f
);
5735 /* Not found means unimplemented/illegal opcode. */
5737 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5739 gen_illegal_opcode(s
);
5740 return EXIT_NORETURN
;
5743 #ifndef CONFIG_USER_ONLY
5744 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5745 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5746 gen_helper_per_ifetch(cpu_env
, addr
);
5747 tcg_temp_free_i64(addr
);
5751 /* Check for insn specification exceptions. */
5753 int spec
= insn
->spec
, excp
= 0, r
;
5755 if (spec
& SPEC_r1_even
) {
5756 r
= get_field(&f
, r1
);
5758 excp
= PGM_SPECIFICATION
;
5761 if (spec
& SPEC_r2_even
) {
5762 r
= get_field(&f
, r2
);
5764 excp
= PGM_SPECIFICATION
;
5767 if (spec
& SPEC_r3_even
) {
5768 r
= get_field(&f
, r3
);
5770 excp
= PGM_SPECIFICATION
;
5773 if (spec
& SPEC_r1_f128
) {
5774 r
= get_field(&f
, r1
);
5776 excp
= PGM_SPECIFICATION
;
5779 if (spec
& SPEC_r2_f128
) {
5780 r
= get_field(&f
, r2
);
5782 excp
= PGM_SPECIFICATION
;
5786 gen_program_exception(s
, excp
);
5787 return EXIT_NORETURN
;
5791 /* Set up the strutures we use to communicate with the helpers. */
5794 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5795 TCGV_UNUSED_I64(o
.out
);
5796 TCGV_UNUSED_I64(o
.out2
);
5797 TCGV_UNUSED_I64(o
.in1
);
5798 TCGV_UNUSED_I64(o
.in2
);
5799 TCGV_UNUSED_I64(o
.addr1
);
5801 /* Implement the instruction. */
5802 if (insn
->help_in1
) {
5803 insn
->help_in1(s
, &f
, &o
);
5805 if (insn
->help_in2
) {
5806 insn
->help_in2(s
, &f
, &o
);
5808 if (insn
->help_prep
) {
5809 insn
->help_prep(s
, &f
, &o
);
5811 if (insn
->help_op
) {
5812 ret
= insn
->help_op(s
, &o
);
5814 if (insn
->help_wout
) {
5815 insn
->help_wout(s
, &f
, &o
);
5817 if (insn
->help_cout
) {
5818 insn
->help_cout(s
, &o
);
5821 /* Free any temporaries created by the helpers. */
5822 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5823 tcg_temp_free_i64(o
.out
);
5825 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5826 tcg_temp_free_i64(o
.out2
);
5828 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5829 tcg_temp_free_i64(o
.in1
);
5831 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5832 tcg_temp_free_i64(o
.in2
);
5834 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5835 tcg_temp_free_i64(o
.addr1
);
5838 #ifndef CONFIG_USER_ONLY
5839 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5840 /* An exception might be triggered, save PSW if not already done. */
5841 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5842 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5848 /* Call the helper to check for a possible PER exception. */
5849 gen_helper_per_check_exception(cpu_env
);
5853 /* Advance to the next instruction. */
5858 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
5860 CPUS390XState
*env
= cs
->env_ptr
;
5862 target_ulong pc_start
;
5863 uint64_t next_page_start
;
5864 int num_insns
, max_insns
;
5871 if (!(tb
->flags
& FLAG_MASK_64
)) {
5872 pc_start
&= 0x7fffffff;
5877 dc
.cc_op
= CC_OP_DYNAMIC
;
5878 dc
.ex_value
= tb
->cs_base
;
5879 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5881 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5884 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5885 if (max_insns
== 0) {
5886 max_insns
= CF_COUNT_MASK
;
5888 if (max_insns
> TCG_MAX_INSNS
) {
5889 max_insns
= TCG_MAX_INSNS
;
5895 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5898 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5899 status
= EXIT_PC_STALE
;
5901 /* The address covered by the breakpoint must be included in
5902 [tb->pc, tb->pc + tb->size) in order to for it to be
5903 properly cleared -- thus we increment the PC here so that
5904 the logic setting tb->size below does the right thing. */
5909 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5913 status
= translate_one(env
, &dc
);
5915 /* If we reach a page boundary, are single stepping,
5916 or exhaust instruction count, stop generation. */
5917 if (status
== NO_EXIT
5918 && (dc
.pc
>= next_page_start
5919 || tcg_op_buf_full()
5920 || num_insns
>= max_insns
5922 || cs
->singlestep_enabled
5924 status
= EXIT_PC_STALE
;
5926 } while (status
== NO_EXIT
);
5928 if (tb
->cflags
& CF_LAST_IO
) {
5937 case EXIT_PC_STALE_NOCHAIN
:
5938 update_psw_addr(&dc
);
5940 case EXIT_PC_UPDATED
:
5941 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5942 cc op type is in env */
5945 case EXIT_PC_CC_UPDATED
:
5946 /* Exit the TB, either by raising a debug exception or by return. */
5948 gen_exception(EXCP_DEBUG
);
5949 } else if (use_exit_tb(&dc
) || status
== EXIT_PC_STALE_NOCHAIN
) {
5952 tcg_gen_lookup_and_goto_ptr(psw_addr
);
5956 g_assert_not_reached();
5959 gen_tb_end(tb
, num_insns
);
5961 tb
->size
= dc
.pc
- pc_start
;
5962 tb
->icount
= num_insns
;
5964 #if defined(S390X_DEBUG_DISAS)
5965 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5966 && qemu_log_in_addr_range(pc_start
)) {
5968 if (unlikely(dc
.ex_value
)) {
5969 /* ??? Unfortunately log_target_disas can't use host memory. */
5970 qemu_log("IN: EXECUTE %016" PRIx64
"\n", dc
.ex_value
);
5972 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5973 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5981 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5984 int cc_op
= data
[1];
5985 env
->psw
.addr
= data
[0];
5986 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {