4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
41 /* global register indexes */
42 static TCGv_env cpu_env
;
44 #include "exec/gen-icount.h"
45 #include "exec/helper-proto.h"
46 #include "exec/helper-gen.h"
48 #include "trace-tcg.h"
52 /* Information that (most) every instruction needs to manipulate. */
53 typedef struct DisasContext DisasContext
;
54 typedef struct DisasInsn DisasInsn
;
55 typedef struct DisasFields DisasFields
;
58 struct TranslationBlock
*tb
;
59 const DisasInsn
*insn
;
65 bool singlestep_enabled
;
68 /* Information carried about a condition to be evaluated. */
75 struct { TCGv_i64 a
, b
; } s64
;
76 struct { TCGv_i32 a
, b
; } s32
;
80 /* is_jmp field values */
81 #define DISAS_EXCP DISAS_TARGET_0
83 #ifdef DEBUG_INLINE_BRANCHES
84 static uint64_t inline_branch_hit
[CC_OP_MAX
];
85 static uint64_t inline_branch_miss
[CC_OP_MAX
];
88 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
90 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
91 if (s
->tb
->flags
& FLAG_MASK_32
) {
92 return pc
| 0x80000000;
98 static TCGv_i64 psw_addr
;
99 static TCGv_i64 psw_mask
;
100 static TCGv_i64 gbea
;
102 static TCGv_i32 cc_op
;
103 static TCGv_i64 cc_src
;
104 static TCGv_i64 cc_dst
;
105 static TCGv_i64 cc_vr
;
107 static char cpu_reg_names
[32][4];
108 static TCGv_i64 regs
[16];
109 static TCGv_i64 fregs
[16];
111 void s390x_translate_init(void)
115 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
116 tcg_ctx
.tcg_env
= cpu_env
;
117 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
118 offsetof(CPUS390XState
, psw
.addr
),
120 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
121 offsetof(CPUS390XState
, psw
.mask
),
123 gbea
= tcg_global_mem_new_i64(cpu_env
,
124 offsetof(CPUS390XState
, gbea
),
127 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
129 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
131 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
133 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
136 for (i
= 0; i
< 16; i
++) {
137 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
138 regs
[i
] = tcg_global_mem_new(cpu_env
,
139 offsetof(CPUS390XState
, regs
[i
]),
143 for (i
= 0; i
< 16; i
++) {
144 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
145 fregs
[i
] = tcg_global_mem_new(cpu_env
,
146 offsetof(CPUS390XState
, vregs
[i
][0].d
),
147 cpu_reg_names
[i
+ 16]);
151 static TCGv_i64
load_reg(int reg
)
153 TCGv_i64 r
= tcg_temp_new_i64();
154 tcg_gen_mov_i64(r
, regs
[reg
]);
158 static TCGv_i64
load_freg32_i64(int reg
)
160 TCGv_i64 r
= tcg_temp_new_i64();
161 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
165 static void store_reg(int reg
, TCGv_i64 v
)
167 tcg_gen_mov_i64(regs
[reg
], v
);
170 static void store_freg(int reg
, TCGv_i64 v
)
172 tcg_gen_mov_i64(fregs
[reg
], v
);
175 static void store_reg32_i64(int reg
, TCGv_i64 v
)
177 /* 32 bit register writes keep the upper half */
178 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
181 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
183 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
186 static void store_freg32_i64(int reg
, TCGv_i64 v
)
188 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
191 static void return_low128(TCGv_i64 dest
)
193 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
196 static void update_psw_addr(DisasContext
*s
)
199 tcg_gen_movi_i64(psw_addr
, s
->pc
);
202 static void per_branch(DisasContext
*s
, bool to_next
)
204 #ifndef CONFIG_USER_ONLY
205 tcg_gen_movi_i64(gbea
, s
->pc
);
207 if (s
->tb
->flags
& FLAG_MASK_PER
) {
208 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
209 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
211 tcg_temp_free_i64(next_pc
);
217 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
218 TCGv_i64 arg1
, TCGv_i64 arg2
)
220 #ifndef CONFIG_USER_ONLY
221 if (s
->tb
->flags
& FLAG_MASK_PER
) {
222 TCGLabel
*lab
= gen_new_label();
223 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
225 tcg_gen_movi_i64(gbea
, s
->pc
);
226 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
230 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
231 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
232 tcg_temp_free_i64(pc
);
237 static void per_breaking_event(DisasContext
*s
)
239 tcg_gen_movi_i64(gbea
, s
->pc
);
242 static void update_cc_op(DisasContext
*s
)
244 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
245 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
249 static void potential_page_fault(DisasContext
*s
)
255 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
257 return (uint64_t)cpu_lduw_code(env
, pc
);
260 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
262 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
265 static int get_mem_index(DisasContext
*s
)
267 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
268 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
270 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
272 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
280 static void gen_exception(int excp
)
282 TCGv_i32 tmp
= tcg_const_i32(excp
);
283 gen_helper_exception(cpu_env
, tmp
);
284 tcg_temp_free_i32(tmp
);
287 static void gen_program_exception(DisasContext
*s
, int code
)
291 /* Remember what pgm exeption this was. */
292 tmp
= tcg_const_i32(code
);
293 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
294 tcg_temp_free_i32(tmp
);
296 tmp
= tcg_const_i32(s
->ilen
);
297 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
298 tcg_temp_free_i32(tmp
);
306 /* Trigger exception. */
307 gen_exception(EXCP_PGM
);
310 static inline void gen_illegal_opcode(DisasContext
*s
)
312 gen_program_exception(s
, PGM_OPERATION
);
315 static inline void gen_trap(DisasContext
*s
)
319 /* Set DXC to 0xff. */
320 t
= tcg_temp_new_i32();
321 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
322 tcg_gen_ori_i32(t
, t
, 0xff00);
323 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
324 tcg_temp_free_i32(t
);
326 gen_program_exception(s
, PGM_DATA
);
329 #ifndef CONFIG_USER_ONLY
330 static void check_privileged(DisasContext
*s
)
332 if (s
->tb
->flags
& FLAG_MASK_PSTATE
) {
333 gen_program_exception(s
, PGM_PRIVILEGED
);
338 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
340 TCGv_i64 tmp
= tcg_temp_new_i64();
341 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
343 /* Note that d2 is limited to 20 bits, signed. If we crop negative
344 displacements early we create larger immedate addends. */
346 /* Note that addi optimizes the imm==0 case. */
348 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
349 tcg_gen_addi_i64(tmp
, tmp
, d2
);
351 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
353 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
359 tcg_gen_movi_i64(tmp
, d2
);
362 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
368 static inline bool live_cc_data(DisasContext
*s
)
370 return (s
->cc_op
!= CC_OP_DYNAMIC
371 && s
->cc_op
!= CC_OP_STATIC
375 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
377 if (live_cc_data(s
)) {
378 tcg_gen_discard_i64(cc_src
);
379 tcg_gen_discard_i64(cc_dst
);
380 tcg_gen_discard_i64(cc_vr
);
382 s
->cc_op
= CC_OP_CONST0
+ val
;
385 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
387 if (live_cc_data(s
)) {
388 tcg_gen_discard_i64(cc_src
);
389 tcg_gen_discard_i64(cc_vr
);
391 tcg_gen_mov_i64(cc_dst
, dst
);
395 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
398 if (live_cc_data(s
)) {
399 tcg_gen_discard_i64(cc_vr
);
401 tcg_gen_mov_i64(cc_src
, src
);
402 tcg_gen_mov_i64(cc_dst
, dst
);
406 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
407 TCGv_i64 dst
, TCGv_i64 vr
)
409 tcg_gen_mov_i64(cc_src
, src
);
410 tcg_gen_mov_i64(cc_dst
, dst
);
411 tcg_gen_mov_i64(cc_vr
, vr
);
415 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
417 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
420 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
422 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
425 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
427 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
430 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
432 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
435 /* CC value is in env->cc_op */
436 static void set_cc_static(DisasContext
*s
)
438 if (live_cc_data(s
)) {
439 tcg_gen_discard_i64(cc_src
);
440 tcg_gen_discard_i64(cc_dst
);
441 tcg_gen_discard_i64(cc_vr
);
443 s
->cc_op
= CC_OP_STATIC
;
446 /* calculates cc into cc_op */
447 static void gen_op_calc_cc(DisasContext
*s
)
449 TCGv_i32 local_cc_op
;
452 TCGV_UNUSED_I32(local_cc_op
);
453 TCGV_UNUSED_I64(dummy
);
456 dummy
= tcg_const_i64(0);
470 local_cc_op
= tcg_const_i32(s
->cc_op
);
486 /* s->cc_op is the cc value */
487 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
490 /* env->cc_op already is the cc value */
505 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
510 case CC_OP_LTUGTU_32
:
511 case CC_OP_LTUGTU_64
:
518 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
533 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
536 /* unknown operation - assume 3 arguments and cc_op in env */
537 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
543 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
544 tcg_temp_free_i32(local_cc_op
);
546 if (!TCGV_IS_UNUSED_I64(dummy
)) {
547 tcg_temp_free_i64(dummy
);
550 /* We now have cc in cc_op as constant */
554 static bool use_exit_tb(DisasContext
*s
)
556 return (s
->singlestep_enabled
||
557 (s
->tb
->cflags
& CF_LAST_IO
) ||
558 (s
->tb
->flags
& FLAG_MASK_PER
));
561 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
563 if (unlikely(use_exit_tb(s
))) {
566 #ifndef CONFIG_USER_ONLY
567 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
568 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
574 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_miss
[cc_op
]++;
581 static void account_inline_branch(DisasContext
*s
, int cc_op
)
583 #ifdef DEBUG_INLINE_BRANCHES
584 inline_branch_hit
[cc_op
]++;
588 /* Table of mask values to comparison codes, given a comparison as input.
589 For such, CC=3 should not be possible. */
590 static const TCGCond ltgt_cond
[16] = {
591 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
592 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
593 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
594 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
595 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
596 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
597 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
598 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
601 /* Table of mask values to comparison codes, given a logic op as input.
602 For such, only CC=0 and CC=1 should be possible. */
603 static const TCGCond nz_cond
[16] = {
604 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
605 TCG_COND_NEVER
, TCG_COND_NEVER
,
606 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
607 TCG_COND_NE
, TCG_COND_NE
,
608 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
609 TCG_COND_EQ
, TCG_COND_EQ
,
610 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
611 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
614 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
615 details required to generate a TCG comparison. */
616 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
619 enum cc_op old_cc_op
= s
->cc_op
;
621 if (mask
== 15 || mask
== 0) {
622 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
625 c
->g1
= c
->g2
= true;
630 /* Find the TCG condition for the mask + cc op. */
636 cond
= ltgt_cond
[mask
];
637 if (cond
== TCG_COND_NEVER
) {
640 account_inline_branch(s
, old_cc_op
);
643 case CC_OP_LTUGTU_32
:
644 case CC_OP_LTUGTU_64
:
645 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
646 if (cond
== TCG_COND_NEVER
) {
649 account_inline_branch(s
, old_cc_op
);
653 cond
= nz_cond
[mask
];
654 if (cond
== TCG_COND_NEVER
) {
657 account_inline_branch(s
, old_cc_op
);
672 account_inline_branch(s
, old_cc_op
);
687 account_inline_branch(s
, old_cc_op
);
691 switch (mask
& 0xa) {
692 case 8: /* src == 0 -> no one bit found */
695 case 2: /* src != 0 -> one bit found */
701 account_inline_branch(s
, old_cc_op
);
707 case 8 | 2: /* vr == 0 */
710 case 4 | 1: /* vr != 0 */
713 case 8 | 4: /* no carry -> vr >= src */
716 case 2 | 1: /* carry -> vr < src */
722 account_inline_branch(s
, old_cc_op
);
727 /* Note that CC=0 is impossible; treat it as dont-care. */
729 case 2: /* zero -> op1 == op2 */
732 case 4 | 1: /* !zero -> op1 != op2 */
735 case 4: /* borrow (!carry) -> op1 < op2 */
738 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
744 account_inline_branch(s
, old_cc_op
);
749 /* Calculate cc value. */
754 /* Jump based on CC. We'll load up the real cond below;
755 the assignment here merely avoids a compiler warning. */
756 account_noninline_branch(s
, old_cc_op
);
757 old_cc_op
= CC_OP_STATIC
;
758 cond
= TCG_COND_NEVER
;
762 /* Load up the arguments of the comparison. */
764 c
->g1
= c
->g2
= false;
768 c
->u
.s32
.a
= tcg_temp_new_i32();
769 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
770 c
->u
.s32
.b
= tcg_const_i32(0);
773 case CC_OP_LTUGTU_32
:
776 c
->u
.s32
.a
= tcg_temp_new_i32();
777 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
778 c
->u
.s32
.b
= tcg_temp_new_i32();
779 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
786 c
->u
.s64
.b
= tcg_const_i64(0);
790 case CC_OP_LTUGTU_64
:
794 c
->g1
= c
->g2
= true;
800 c
->u
.s64
.a
= tcg_temp_new_i64();
801 c
->u
.s64
.b
= tcg_const_i64(0);
802 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
807 c
->u
.s32
.a
= tcg_temp_new_i32();
808 c
->u
.s32
.b
= tcg_temp_new_i32();
809 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
810 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
811 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
813 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
820 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
821 c
->u
.s64
.b
= tcg_const_i64(0);
833 case 0x8 | 0x4 | 0x2: /* cc != 3 */
835 c
->u
.s32
.b
= tcg_const_i32(3);
837 case 0x8 | 0x4 | 0x1: /* cc != 2 */
839 c
->u
.s32
.b
= tcg_const_i32(2);
841 case 0x8 | 0x2 | 0x1: /* cc != 1 */
843 c
->u
.s32
.b
= tcg_const_i32(1);
845 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
848 c
->u
.s32
.a
= tcg_temp_new_i32();
849 c
->u
.s32
.b
= tcg_const_i32(0);
850 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
852 case 0x8 | 0x4: /* cc < 2 */
854 c
->u
.s32
.b
= tcg_const_i32(2);
856 case 0x8: /* cc == 0 */
858 c
->u
.s32
.b
= tcg_const_i32(0);
860 case 0x4 | 0x2 | 0x1: /* cc != 0 */
862 c
->u
.s32
.b
= tcg_const_i32(0);
864 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
867 c
->u
.s32
.a
= tcg_temp_new_i32();
868 c
->u
.s32
.b
= tcg_const_i32(0);
869 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
871 case 0x4: /* cc == 1 */
873 c
->u
.s32
.b
= tcg_const_i32(1);
875 case 0x2 | 0x1: /* cc > 1 */
877 c
->u
.s32
.b
= tcg_const_i32(1);
879 case 0x2: /* cc == 2 */
881 c
->u
.s32
.b
= tcg_const_i32(2);
883 case 0x1: /* cc == 3 */
885 c
->u
.s32
.b
= tcg_const_i32(3);
888 /* CC is masked by something else: (8 >> cc) & mask. */
891 c
->u
.s32
.a
= tcg_const_i32(8);
892 c
->u
.s32
.b
= tcg_const_i32(0);
893 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
894 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
905 static void free_compare(DisasCompare
*c
)
909 tcg_temp_free_i64(c
->u
.s64
.a
);
911 tcg_temp_free_i32(c
->u
.s32
.a
);
916 tcg_temp_free_i64(c
->u
.s64
.b
);
918 tcg_temp_free_i32(c
->u
.s32
.b
);
923 /* ====================================================================== */
924 /* Define the insn format enumeration. */
925 #define F0(N) FMT_##N,
926 #define F1(N, X1) F0(N)
927 #define F2(N, X1, X2) F0(N)
928 #define F3(N, X1, X2, X3) F0(N)
929 #define F4(N, X1, X2, X3, X4) F0(N)
930 #define F5(N, X1, X2, X3, X4, X5) F0(N)
933 #include "insn-format.def"
943 /* Define a structure to hold the decoded fields. We'll store each inside
944 an array indexed by an enum. In order to conserve memory, we'll arrange
945 for fields that do not exist at the same time to overlap, thus the "C"
946 for compact. For checking purposes there is an "O" for original index
947 as well that will be applied to availability bitmaps. */
949 enum DisasFieldIndexO
{
972 enum DisasFieldIndexC
{
1003 struct DisasFields
{
1007 unsigned presentC
:16;
1008 unsigned int presentO
;
1012 /* This is the way fields are to be accessed out of DisasFields. */
1013 #define have_field(S, F) have_field1((S), FLD_O_##F)
1014 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1016 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1018 return (f
->presentO
>> c
) & 1;
1021 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1022 enum DisasFieldIndexC c
)
1024 assert(have_field1(f
, o
));
1028 /* Describe the layout of each field in each format. */
1029 typedef struct DisasField
{
1031 unsigned int size
:8;
1032 unsigned int type
:2;
1033 unsigned int indexC
:6;
1034 enum DisasFieldIndexO indexO
:8;
1037 typedef struct DisasFormatInfo
{
1038 DisasField op
[NUM_C_FIELD
];
1041 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1042 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1043 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1044 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1045 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1046 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1047 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1048 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1049 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1050 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1051 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1052 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1053 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1054 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1056 #define F0(N) { { } },
1057 #define F1(N, X1) { { X1 } },
1058 #define F2(N, X1, X2) { { X1, X2 } },
1059 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1060 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1061 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1063 static const DisasFormatInfo format_info
[] = {
1064 #include "insn-format.def"
1082 /* Generally, we'll extract operands into this structures, operate upon
1083 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1084 of routines below for more details. */
1086 bool g_out
, g_out2
, g_in1
, g_in2
;
1087 TCGv_i64 out
, out2
, in1
, in2
;
1091 /* Instructions can place constraints on their operands, raising specification
1092 exceptions if they are violated. To make this easy to automate, each "in1",
1093 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1094 of the following, or 0. To make this easy to document, we'll put the
1095 SPEC_<name> defines next to <name>. */
1097 #define SPEC_r1_even 1
1098 #define SPEC_r2_even 2
1099 #define SPEC_r3_even 4
1100 #define SPEC_r1_f128 8
1101 #define SPEC_r2_f128 16
1103 /* Return values from translate_one, indicating the state of the TB. */
1105 /* Continue the TB. */
1107 /* We have emitted one or more goto_tb. No fixup required. */
1109 /* We are not using a goto_tb (for whatever reason), but have updated
1110 the PC (for whatever reason), so there's no need to do it again on
1113 /* We have updated the PC and CC values. */
1115 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1116 updated the PC for the next instruction to be executed. */
1118 /* We are exiting the TB to the main loop. */
1119 EXIT_PC_STALE_NOCHAIN
,
1120 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1121 No following code will be executed. */
1133 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1134 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1135 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1136 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1137 void (*help_cout
)(DisasContext
*, DisasOps
*);
1138 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1143 /* ====================================================================== */
1144 /* Miscellaneous helpers, used by several operations. */
1146 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1147 DisasOps
*o
, int mask
)
1149 int b2
= get_field(f
, b2
);
1150 int d2
= get_field(f
, d2
);
1153 o
->in2
= tcg_const_i64(d2
& mask
);
1155 o
->in2
= get_address(s
, 0, b2
, d2
);
1156 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1160 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1162 if (dest
== s
->next_pc
) {
1163 per_branch(s
, true);
1166 if (use_goto_tb(s
, dest
)) {
1168 per_breaking_event(s
);
1170 tcg_gen_movi_i64(psw_addr
, dest
);
1171 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1172 return EXIT_GOTO_TB
;
1174 tcg_gen_movi_i64(psw_addr
, dest
);
1175 per_branch(s
, false);
1176 return EXIT_PC_UPDATED
;
1180 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1181 bool is_imm
, int imm
, TCGv_i64 cdest
)
1184 uint64_t dest
= s
->pc
+ 2 * imm
;
1187 /* Take care of the special cases first. */
1188 if (c
->cond
== TCG_COND_NEVER
) {
1193 if (dest
== s
->next_pc
) {
1194 /* Branch to next. */
1195 per_branch(s
, true);
1199 if (c
->cond
== TCG_COND_ALWAYS
) {
1200 ret
= help_goto_direct(s
, dest
);
1204 if (TCGV_IS_UNUSED_I64(cdest
)) {
1205 /* E.g. bcr %r0 -> no branch. */
1209 if (c
->cond
== TCG_COND_ALWAYS
) {
1210 tcg_gen_mov_i64(psw_addr
, cdest
);
1211 per_branch(s
, false);
1212 ret
= EXIT_PC_UPDATED
;
1217 if (use_goto_tb(s
, s
->next_pc
)) {
1218 if (is_imm
&& use_goto_tb(s
, dest
)) {
1219 /* Both exits can use goto_tb. */
1222 lab
= gen_new_label();
1224 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1226 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1229 /* Branch not taken. */
1231 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1232 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1236 per_breaking_event(s
);
1238 tcg_gen_movi_i64(psw_addr
, dest
);
1239 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1243 /* Fallthru can use goto_tb, but taken branch cannot. */
1244 /* Store taken branch destination before the brcond. This
1245 avoids having to allocate a new local temp to hold it.
1246 We'll overwrite this in the not taken case anyway. */
1248 tcg_gen_mov_i64(psw_addr
, cdest
);
1251 lab
= gen_new_label();
1253 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1255 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1258 /* Branch not taken. */
1261 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1262 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1266 tcg_gen_movi_i64(psw_addr
, dest
);
1268 per_breaking_event(s
);
1269 ret
= EXIT_PC_UPDATED
;
1272 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1273 Most commonly we're single-stepping or some other condition that
1274 disables all use of goto_tb. Just update the PC and exit. */
1276 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1278 cdest
= tcg_const_i64(dest
);
1282 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1284 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1286 TCGv_i32 t0
= tcg_temp_new_i32();
1287 TCGv_i64 t1
= tcg_temp_new_i64();
1288 TCGv_i64 z
= tcg_const_i64(0);
1289 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1290 tcg_gen_extu_i32_i64(t1
, t0
);
1291 tcg_temp_free_i32(t0
);
1292 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1293 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1294 tcg_temp_free_i64(t1
);
1295 tcg_temp_free_i64(z
);
1299 tcg_temp_free_i64(cdest
);
1301 tcg_temp_free_i64(next
);
1303 ret
= EXIT_PC_UPDATED
;
1311 /* ====================================================================== */
1312 /* The operations. These perform the bulk of the work for any insn,
1313 usually after the operands have been loaded and output initialized. */
1315 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1318 z
= tcg_const_i64(0);
1319 n
= tcg_temp_new_i64();
1320 tcg_gen_neg_i64(n
, o
->in2
);
1321 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1322 tcg_temp_free_i64(n
);
1323 tcg_temp_free_i64(z
);
1327 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1329 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1333 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1335 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1339 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1341 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1342 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1346 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1348 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1352 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1357 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1359 /* The carry flag is the msb of CC, therefore the branch mask that would
1360 create that comparison is 3. Feeding the generated comparison to
1361 setcond produces the carry flag that we desire. */
1362 disas_jcc(s
, &cmp
, 3);
1363 carry
= tcg_temp_new_i64();
1365 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1367 TCGv_i32 t
= tcg_temp_new_i32();
1368 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1369 tcg_gen_extu_i32_i64(carry
, t
);
1370 tcg_temp_free_i32(t
);
1374 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1375 tcg_temp_free_i64(carry
);
1379 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1381 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1385 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1387 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1391 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1393 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1394 return_low128(o
->out2
);
1398 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1400 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1404 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1406 int shift
= s
->insn
->data
& 0xff;
1407 int size
= s
->insn
->data
>> 8;
1408 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1411 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1412 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1413 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1415 /* Produce the CC from only the bits manipulated. */
1416 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1417 set_cc_nz_u64(s
, cc_dst
);
1421 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1423 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1424 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1425 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1426 per_branch(s
, false);
1427 return EXIT_PC_UPDATED
;
1433 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1435 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1436 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1439 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1441 int m1
= get_field(s
->fields
, m1
);
1442 bool is_imm
= have_field(s
->fields
, i2
);
1443 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1446 /* BCR with R2 = 0 causes no branching */
1447 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1449 /* Perform serialization */
1450 /* FIXME: check for fast-BCR-serialization facility */
1451 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1454 /* Perform serialization */
1455 /* FIXME: perform checkpoint-synchronisation */
1456 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1461 disas_jcc(s
, &c
, m1
);
1462 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1465 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1467 int r1
= get_field(s
->fields
, r1
);
1468 bool is_imm
= have_field(s
->fields
, i2
);
1469 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1473 c
.cond
= TCG_COND_NE
;
1478 t
= tcg_temp_new_i64();
1479 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1480 store_reg32_i64(r1
, t
);
1481 c
.u
.s32
.a
= tcg_temp_new_i32();
1482 c
.u
.s32
.b
= tcg_const_i32(0);
1483 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1484 tcg_temp_free_i64(t
);
1486 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1489 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1491 int r1
= get_field(s
->fields
, r1
);
1492 int imm
= get_field(s
->fields
, i2
);
1496 c
.cond
= TCG_COND_NE
;
1501 t
= tcg_temp_new_i64();
1502 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1503 tcg_gen_subi_i64(t
, t
, 1);
1504 store_reg32h_i64(r1
, t
);
1505 c
.u
.s32
.a
= tcg_temp_new_i32();
1506 c
.u
.s32
.b
= tcg_const_i32(0);
1507 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1508 tcg_temp_free_i64(t
);
1510 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1513 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1515 int r1
= get_field(s
->fields
, r1
);
1516 bool is_imm
= have_field(s
->fields
, i2
);
1517 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1520 c
.cond
= TCG_COND_NE
;
1525 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1526 c
.u
.s64
.a
= regs
[r1
];
1527 c
.u
.s64
.b
= tcg_const_i64(0);
1529 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1532 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1534 int r1
= get_field(s
->fields
, r1
);
1535 int r3
= get_field(s
->fields
, r3
);
1536 bool is_imm
= have_field(s
->fields
, i2
);
1537 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1541 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1546 t
= tcg_temp_new_i64();
1547 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1548 c
.u
.s32
.a
= tcg_temp_new_i32();
1549 c
.u
.s32
.b
= tcg_temp_new_i32();
1550 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1551 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1552 store_reg32_i64(r1
, t
);
1553 tcg_temp_free_i64(t
);
1555 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1558 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1560 int r1
= get_field(s
->fields
, r1
);
1561 int r3
= get_field(s
->fields
, r3
);
1562 bool is_imm
= have_field(s
->fields
, i2
);
1563 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1566 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1569 if (r1
== (r3
| 1)) {
1570 c
.u
.s64
.b
= load_reg(r3
| 1);
1573 c
.u
.s64
.b
= regs
[r3
| 1];
1577 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1578 c
.u
.s64
.a
= regs
[r1
];
1581 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1584 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1586 int imm
, m3
= get_field(s
->fields
, m3
);
1590 c
.cond
= ltgt_cond
[m3
];
1591 if (s
->insn
->data
) {
1592 c
.cond
= tcg_unsigned_cond(c
.cond
);
1594 c
.is_64
= c
.g1
= c
.g2
= true;
1598 is_imm
= have_field(s
->fields
, i4
);
1600 imm
= get_field(s
->fields
, i4
);
1603 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1604 get_field(s
->fields
, d4
));
1607 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1610 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1612 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1617 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1619 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1624 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1626 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1631 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1633 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1634 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1635 tcg_temp_free_i32(m3
);
1636 gen_set_cc_nz_f32(s
, o
->in2
);
1640 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1642 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1643 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1644 tcg_temp_free_i32(m3
);
1645 gen_set_cc_nz_f64(s
, o
->in2
);
1649 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1651 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1652 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1653 tcg_temp_free_i32(m3
);
1654 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1658 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1660 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1661 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1662 tcg_temp_free_i32(m3
);
1663 gen_set_cc_nz_f32(s
, o
->in2
);
1667 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1669 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1670 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1671 tcg_temp_free_i32(m3
);
1672 gen_set_cc_nz_f64(s
, o
->in2
);
1676 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1678 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1679 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1680 tcg_temp_free_i32(m3
);
1681 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1685 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1687 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1688 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1689 tcg_temp_free_i32(m3
);
1690 gen_set_cc_nz_f32(s
, o
->in2
);
1694 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1696 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1697 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1698 tcg_temp_free_i32(m3
);
1699 gen_set_cc_nz_f64(s
, o
->in2
);
1703 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1705 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1706 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1707 tcg_temp_free_i32(m3
);
1708 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1712 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1714 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1715 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1716 tcg_temp_free_i32(m3
);
1717 gen_set_cc_nz_f32(s
, o
->in2
);
1721 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1723 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1724 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1725 tcg_temp_free_i32(m3
);
1726 gen_set_cc_nz_f64(s
, o
->in2
);
1730 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1732 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1733 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1734 tcg_temp_free_i32(m3
);
1735 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1739 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1741 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1742 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1743 tcg_temp_free_i32(m3
);
1747 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1749 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1750 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1751 tcg_temp_free_i32(m3
);
1755 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1757 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1758 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1759 tcg_temp_free_i32(m3
);
1760 return_low128(o
->out2
);
1764 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1766 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1767 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1768 tcg_temp_free_i32(m3
);
1772 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1774 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1775 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1776 tcg_temp_free_i32(m3
);
1780 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1782 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1783 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1784 tcg_temp_free_i32(m3
);
1785 return_low128(o
->out2
);
1789 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1791 int r2
= get_field(s
->fields
, r2
);
1792 TCGv_i64 len
= tcg_temp_new_i64();
1794 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1796 return_low128(o
->out
);
1798 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1799 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1800 tcg_temp_free_i64(len
);
1805 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1807 int l
= get_field(s
->fields
, l1
);
1812 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1813 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1816 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1817 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1820 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1821 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1824 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1825 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1828 vl
= tcg_const_i32(l
);
1829 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1830 tcg_temp_free_i32(vl
);
1834 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1838 static ExitStatus
op_clcl(DisasContext
*s
, DisasOps
*o
)
1840 int r1
= get_field(s
->fields
, r1
);
1841 int r2
= get_field(s
->fields
, r2
);
1844 /* r1 and r2 must be even. */
1845 if (r1
& 1 || r2
& 1) {
1846 gen_program_exception(s
, PGM_SPECIFICATION
);
1847 return EXIT_NORETURN
;
1850 t1
= tcg_const_i32(r1
);
1851 t2
= tcg_const_i32(r2
);
1852 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1853 tcg_temp_free_i32(t1
);
1854 tcg_temp_free_i32(t2
);
1859 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1861 int r1
= get_field(s
->fields
, r1
);
1862 int r3
= get_field(s
->fields
, r3
);
1865 /* r1 and r3 must be even. */
1866 if (r1
& 1 || r3
& 1) {
1867 gen_program_exception(s
, PGM_SPECIFICATION
);
1868 return EXIT_NORETURN
;
1871 t1
= tcg_const_i32(r1
);
1872 t3
= tcg_const_i32(r3
);
1873 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1874 tcg_temp_free_i32(t1
);
1875 tcg_temp_free_i32(t3
);
1880 static ExitStatus
op_clclu(DisasContext
*s
, DisasOps
*o
)
1882 int r1
= get_field(s
->fields
, r1
);
1883 int r3
= get_field(s
->fields
, r3
);
1886 /* r1 and r3 must be even. */
1887 if (r1
& 1 || r3
& 1) {
1888 gen_program_exception(s
, PGM_SPECIFICATION
);
1889 return EXIT_NORETURN
;
1892 t1
= tcg_const_i32(r1
);
1893 t3
= tcg_const_i32(r3
);
1894 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1895 tcg_temp_free_i32(t1
);
1896 tcg_temp_free_i32(t3
);
1901 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1903 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1904 TCGv_i32 t1
= tcg_temp_new_i32();
1905 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1906 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1908 tcg_temp_free_i32(t1
);
1909 tcg_temp_free_i32(m3
);
1913 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1915 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1917 return_low128(o
->in2
);
1921 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1923 TCGv_i64 t
= tcg_temp_new_i64();
1924 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1925 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1926 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1927 tcg_temp_free_i64(t
);
1931 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1933 int d2
= get_field(s
->fields
, d2
);
1934 int b2
= get_field(s
->fields
, b2
);
1937 /* Note that in1 = R3 (new value) and
1938 in2 = (zero-extended) R1 (expected value). */
1940 addr
= get_address(s
, 0, b2
, d2
);
1941 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1942 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1943 tcg_temp_free_i64(addr
);
1945 /* Are the memory and expected values (un)equal? Note that this setcond
1946 produces the output CC value, thus the NE sense of the test. */
1947 cc
= tcg_temp_new_i64();
1948 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1949 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1950 tcg_temp_free_i64(cc
);
1956 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1958 int r1
= get_field(s
->fields
, r1
);
1959 int r3
= get_field(s
->fields
, r3
);
1960 int d2
= get_field(s
->fields
, d2
);
1961 int b2
= get_field(s
->fields
, b2
);
1963 TCGv_i32 t_r1
, t_r3
;
1965 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1966 addr
= get_address(s
, 0, b2
, d2
);
1967 t_r1
= tcg_const_i32(r1
);
1968 t_r3
= tcg_const_i32(r3
);
1969 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
1970 tcg_temp_free_i64(addr
);
1971 tcg_temp_free_i32(t_r1
);
1972 tcg_temp_free_i32(t_r3
);
1978 static ExitStatus
op_csst(DisasContext
*s
, DisasOps
*o
)
1980 int r3
= get_field(s
->fields
, r3
);
1981 TCGv_i32 t_r3
= tcg_const_i32(r3
);
1983 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
1984 tcg_temp_free_i32(t_r3
);
1990 #ifndef CONFIG_USER_ONLY
1991 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1993 TCGMemOp mop
= s
->insn
->data
;
1994 TCGv_i64 addr
, old
, cc
;
1995 TCGLabel
*lab
= gen_new_label();
1997 /* Note that in1 = R1 (zero-extended expected value),
1998 out = R1 (original reg), out2 = R1+1 (new value). */
2000 check_privileged(s
);
2001 addr
= tcg_temp_new_i64();
2002 old
= tcg_temp_new_i64();
2003 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2004 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2005 get_mem_index(s
), mop
| MO_ALIGN
);
2006 tcg_temp_free_i64(addr
);
2008 /* Are the memory and expected values (un)equal? */
2009 cc
= tcg_temp_new_i64();
2010 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2011 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2013 /* Write back the output now, so that it happens before the
2014 following branch, so that we don't need local temps. */
2015 if ((mop
& MO_SIZE
) == MO_32
) {
2016 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2018 tcg_gen_mov_i64(o
->out
, old
);
2020 tcg_temp_free_i64(old
);
2022 /* If the comparison was equal, and the LSB of R2 was set,
2023 then we need to flush the TLB (for all cpus). */
2024 tcg_gen_xori_i64(cc
, cc
, 1);
2025 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2026 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2027 tcg_temp_free_i64(cc
);
2029 gen_helper_purge(cpu_env
);
2036 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2038 TCGv_i64 t1
= tcg_temp_new_i64();
2039 TCGv_i32 t2
= tcg_temp_new_i32();
2040 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2041 gen_helper_cvd(t1
, t2
);
2042 tcg_temp_free_i32(t2
);
2043 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2044 tcg_temp_free_i64(t1
);
2048 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2050 int m3
= get_field(s
->fields
, m3
);
2051 TCGLabel
*lab
= gen_new_label();
2054 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2055 if (s
->insn
->data
) {
2056 c
= tcg_unsigned_cond(c
);
2058 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2067 static ExitStatus
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2069 int m3
= get_field(s
->fields
, m3
);
2070 int r1
= get_field(s
->fields
, r1
);
2071 int r2
= get_field(s
->fields
, r2
);
2072 TCGv_i32 tr1
, tr2
, chk
;
2074 /* R1 and R2 must both be even. */
2075 if ((r1
| r2
) & 1) {
2076 gen_program_exception(s
, PGM_SPECIFICATION
);
2077 return EXIT_NORETURN
;
2079 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2083 tr1
= tcg_const_i32(r1
);
2084 tr2
= tcg_const_i32(r2
);
2085 chk
= tcg_const_i32(m3
);
2087 switch (s
->insn
->data
) {
2089 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2092 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2095 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2098 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2101 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2104 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2107 g_assert_not_reached();
2110 tcg_temp_free_i32(tr1
);
2111 tcg_temp_free_i32(tr2
);
2112 tcg_temp_free_i32(chk
);
2117 #ifndef CONFIG_USER_ONLY
2118 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2120 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2121 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2122 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2124 check_privileged(s
);
2128 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2130 tcg_temp_free_i32(func_code
);
2131 tcg_temp_free_i32(r3
);
2132 tcg_temp_free_i32(r1
);
2137 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2139 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2140 return_low128(o
->out
);
2144 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2146 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2147 return_low128(o
->out
);
2151 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2153 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2154 return_low128(o
->out
);
2158 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2160 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2161 return_low128(o
->out
);
2165 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2167 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2171 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2173 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2177 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2179 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2180 return_low128(o
->out2
);
2184 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2186 int r2
= get_field(s
->fields
, r2
);
2187 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2191 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2193 /* No cache information provided. */
2194 tcg_gen_movi_i64(o
->out
, -1);
2198 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2200 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2204 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2206 int r1
= get_field(s
->fields
, r1
);
2207 int r2
= get_field(s
->fields
, r2
);
2208 TCGv_i64 t
= tcg_temp_new_i64();
2210 /* Note the "subsequently" in the PoO, which implies a defined result
2211 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2212 tcg_gen_shri_i64(t
, psw_mask
, 32);
2213 store_reg32_i64(r1
, t
);
2215 store_reg32_i64(r2
, psw_mask
);
2218 tcg_temp_free_i64(t
);
2222 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2224 int r1
= get_field(s
->fields
, r1
);
2228 /* Nested EXECUTE is not allowed. */
2229 if (unlikely(s
->ex_value
)) {
2230 gen_program_exception(s
, PGM_EXECUTE
);
2231 return EXIT_NORETURN
;
2238 v1
= tcg_const_i64(0);
2243 ilen
= tcg_const_i32(s
->ilen
);
2244 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2245 tcg_temp_free_i32(ilen
);
2248 tcg_temp_free_i64(v1
);
2251 return EXIT_PC_CC_UPDATED
;
2254 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2256 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2257 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2258 tcg_temp_free_i32(m3
);
2262 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2264 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2265 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2266 tcg_temp_free_i32(m3
);
2270 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2272 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2273 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2274 return_low128(o
->out2
);
2275 tcg_temp_free_i32(m3
);
2279 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2281 /* We'll use the original input for cc computation, since we get to
2282 compare that against 0, which ought to be better than comparing
2283 the real output against 64. It also lets cc_dst be a convenient
2284 temporary during our computation. */
2285 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2287 /* R1 = IN ? CLZ(IN) : 64. */
2288 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2290 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2291 value by 64, which is undefined. But since the shift is 64 iff the
2292 input is zero, we still get the correct result after and'ing. */
2293 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2294 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2295 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2299 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2301 int m3
= get_field(s
->fields
, m3
);
2302 int pos
, len
, base
= s
->insn
->data
;
2303 TCGv_i64 tmp
= tcg_temp_new_i64();
2308 /* Effectively a 32-bit load. */
2309 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2316 /* Effectively a 16-bit load. */
2317 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2325 /* Effectively an 8-bit load. */
2326 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2331 pos
= base
+ ctz32(m3
) * 8;
2332 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2333 ccm
= ((1ull << len
) - 1) << pos
;
2337 /* This is going to be a sequence of loads and inserts. */
2338 pos
= base
+ 32 - 8;
2342 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2343 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2344 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2347 m3
= (m3
<< 1) & 0xf;
2353 tcg_gen_movi_i64(tmp
, ccm
);
2354 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2355 tcg_temp_free_i64(tmp
);
2359 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2361 int shift
= s
->insn
->data
& 0xff;
2362 int size
= s
->insn
->data
>> 8;
2363 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2367 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2372 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2374 t1
= tcg_temp_new_i64();
2375 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2376 tcg_gen_shri_i64(t1
, t1
, 36);
2377 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2379 tcg_gen_extu_i32_i64(t1
, cc_op
);
2380 tcg_gen_shli_i64(t1
, t1
, 28);
2381 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2382 tcg_temp_free_i64(t1
);
2386 #ifndef CONFIG_USER_ONLY
2387 static ExitStatus
op_idte(DisasContext
*s
, DisasOps
*o
)
2391 check_privileged(s
);
2392 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2393 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2395 m4
= tcg_const_i32(0);
2397 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2398 tcg_temp_free_i32(m4
);
2402 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2406 check_privileged(s
);
2407 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2408 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2410 m4
= tcg_const_i32(0);
2412 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2413 tcg_temp_free_i32(m4
);
2417 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2419 check_privileged(s
);
2420 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2425 static ExitStatus
op_keb(DisasContext
*s
, DisasOps
*o
)
2427 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2432 static ExitStatus
op_kdb(DisasContext
*s
, DisasOps
*o
)
2434 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2439 static ExitStatus
op_kxb(DisasContext
*s
, DisasOps
*o
)
2441 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2446 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2448 /* The real output is indeed the original value in memory;
2449 recompute the addition for the computation of CC. */
2450 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2451 s
->insn
->data
| MO_ALIGN
);
2452 /* However, we need to recompute the addition for setting CC. */
2453 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2457 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2459 /* The real output is indeed the original value in memory;
2460 recompute the addition for the computation of CC. */
2461 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2462 s
->insn
->data
| MO_ALIGN
);
2463 /* However, we need to recompute the operation for setting CC. */
2464 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2468 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2470 /* The real output is indeed the original value in memory;
2471 recompute the addition for the computation of CC. */
2472 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2473 s
->insn
->data
| MO_ALIGN
);
2474 /* However, we need to recompute the operation for setting CC. */
2475 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2479 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2481 /* The real output is indeed the original value in memory;
2482 recompute the addition for the computation of CC. */
2483 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2484 s
->insn
->data
| MO_ALIGN
);
2485 /* However, we need to recompute the operation for setting CC. */
2486 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2490 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2492 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2496 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2498 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2502 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2504 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2508 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2510 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2514 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2516 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2517 return_low128(o
->out2
);
2521 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2523 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2524 return_low128(o
->out2
);
2528 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2530 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2534 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2536 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2540 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2542 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2546 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2548 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2552 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2554 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2558 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2560 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2564 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2566 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2570 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2572 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2576 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2578 TCGLabel
*lab
= gen_new_label();
2579 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2580 /* The value is stored even in case of trap. */
2581 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2587 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2589 TCGLabel
*lab
= gen_new_label();
2590 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2591 /* The value is stored even in case of trap. */
2592 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2598 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2600 TCGLabel
*lab
= gen_new_label();
2601 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2602 /* The value is stored even in case of trap. */
2603 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2609 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2611 TCGLabel
*lab
= gen_new_label();
2612 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2613 /* The value is stored even in case of trap. */
2614 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2620 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2622 TCGLabel
*lab
= gen_new_label();
2623 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2624 /* The value is stored even in case of trap. */
2625 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2631 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2635 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2638 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2642 TCGv_i32 t32
= tcg_temp_new_i32();
2645 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2648 t
= tcg_temp_new_i64();
2649 tcg_gen_extu_i32_i64(t
, t32
);
2650 tcg_temp_free_i32(t32
);
2652 z
= tcg_const_i64(0);
2653 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2654 tcg_temp_free_i64(t
);
2655 tcg_temp_free_i64(z
);
2661 #ifndef CONFIG_USER_ONLY
2662 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2664 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2665 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2666 check_privileged(s
);
2667 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2668 tcg_temp_free_i32(r1
);
2669 tcg_temp_free_i32(r3
);
2673 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2675 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2676 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2677 check_privileged(s
);
2678 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2679 tcg_temp_free_i32(r1
);
2680 tcg_temp_free_i32(r3
);
2684 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2686 check_privileged(s
);
2687 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2692 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2694 check_privileged(s
);
2696 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2700 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2704 check_privileged(s
);
2705 per_breaking_event(s
);
2707 t1
= tcg_temp_new_i64();
2708 t2
= tcg_temp_new_i64();
2709 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2710 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2711 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2712 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2713 tcg_gen_shli_i64(t1
, t1
, 32);
2714 gen_helper_load_psw(cpu_env
, t1
, t2
);
2715 tcg_temp_free_i64(t1
);
2716 tcg_temp_free_i64(t2
);
2717 return EXIT_NORETURN
;
2720 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2724 check_privileged(s
);
2725 per_breaking_event(s
);
2727 t1
= tcg_temp_new_i64();
2728 t2
= tcg_temp_new_i64();
2729 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2730 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2731 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2732 gen_helper_load_psw(cpu_env
, t1
, t2
);
2733 tcg_temp_free_i64(t1
);
2734 tcg_temp_free_i64(t2
);
2735 return EXIT_NORETURN
;
2739 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2741 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2742 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2743 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2744 tcg_temp_free_i32(r1
);
2745 tcg_temp_free_i32(r3
);
2749 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2751 int r1
= get_field(s
->fields
, r1
);
2752 int r3
= get_field(s
->fields
, r3
);
2755 /* Only one register to read. */
2756 t1
= tcg_temp_new_i64();
2757 if (unlikely(r1
== r3
)) {
2758 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2759 store_reg32_i64(r1
, t1
);
2764 /* First load the values of the first and last registers to trigger
2765 possible page faults. */
2766 t2
= tcg_temp_new_i64();
2767 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2768 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2769 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2770 store_reg32_i64(r1
, t1
);
2771 store_reg32_i64(r3
, t2
);
2773 /* Only two registers to read. */
2774 if (((r1
+ 1) & 15) == r3
) {
2780 /* Then load the remaining registers. Page fault can't occur. */
2782 tcg_gen_movi_i64(t2
, 4);
2785 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2786 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2787 store_reg32_i64(r1
, t1
);
2795 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2797 int r1
= get_field(s
->fields
, r1
);
2798 int r3
= get_field(s
->fields
, r3
);
2801 /* Only one register to read. */
2802 t1
= tcg_temp_new_i64();
2803 if (unlikely(r1
== r3
)) {
2804 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2805 store_reg32h_i64(r1
, t1
);
2810 /* First load the values of the first and last registers to trigger
2811 possible page faults. */
2812 t2
= tcg_temp_new_i64();
2813 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2814 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2815 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2816 store_reg32h_i64(r1
, t1
);
2817 store_reg32h_i64(r3
, t2
);
2819 /* Only two registers to read. */
2820 if (((r1
+ 1) & 15) == r3
) {
2826 /* Then load the remaining registers. Page fault can't occur. */
2828 tcg_gen_movi_i64(t2
, 4);
2831 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2832 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2833 store_reg32h_i64(r1
, t1
);
2841 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2843 int r1
= get_field(s
->fields
, r1
);
2844 int r3
= get_field(s
->fields
, r3
);
2847 /* Only one register to read. */
2848 if (unlikely(r1
== r3
)) {
2849 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2853 /* First load the values of the first and last registers to trigger
2854 possible page faults. */
2855 t1
= tcg_temp_new_i64();
2856 t2
= tcg_temp_new_i64();
2857 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2858 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2859 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2860 tcg_gen_mov_i64(regs
[r1
], t1
);
2863 /* Only two registers to read. */
2864 if (((r1
+ 1) & 15) == r3
) {
2869 /* Then load the remaining registers. Page fault can't occur. */
2871 tcg_gen_movi_i64(t1
, 8);
2874 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2875 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2882 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2885 TCGMemOp mop
= s
->insn
->data
;
2887 /* In a parallel context, stop the world and single step. */
2888 if (parallel_cpus
) {
2889 potential_page_fault(s
);
2890 gen_exception(EXCP_ATOMIC
);
2891 return EXIT_NORETURN
;
2894 /* In a serial context, perform the two loads ... */
2895 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2896 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2897 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2898 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2899 tcg_temp_free_i64(a1
);
2900 tcg_temp_free_i64(a2
);
2902 /* ... and indicate that we performed them while interlocked. */
2903 gen_op_movi_cc(s
, 0);
2907 static ExitStatus
op_lpq(DisasContext
*s
, DisasOps
*o
)
2909 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
2910 return_low128(o
->out2
);
2914 #ifndef CONFIG_USER_ONLY
2915 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2917 check_privileged(s
);
2918 potential_page_fault(s
);
2919 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2923 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2925 check_privileged(s
);
2926 potential_page_fault(s
);
2927 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2932 static ExitStatus
op_lzrb(DisasContext
*s
, DisasOps
*o
)
2934 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
2938 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2941 o
->g_out
= o
->g_in2
;
2942 TCGV_UNUSED_I64(o
->in2
);
2947 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2949 int b2
= get_field(s
->fields
, b2
);
2950 TCGv ar1
= tcg_temp_new_i64();
2953 o
->g_out
= o
->g_in2
;
2954 TCGV_UNUSED_I64(o
->in2
);
2957 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2958 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
2959 tcg_gen_movi_i64(ar1
, 0);
2961 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
2962 tcg_gen_movi_i64(ar1
, 1);
2964 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
2966 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2968 tcg_gen_movi_i64(ar1
, 0);
2971 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
2972 tcg_gen_movi_i64(ar1
, 2);
2976 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2977 tcg_temp_free_i64(ar1
);
2982 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2986 o
->g_out
= o
->g_in1
;
2987 o
->g_out2
= o
->g_in2
;
2988 TCGV_UNUSED_I64(o
->in1
);
2989 TCGV_UNUSED_I64(o
->in2
);
2990 o
->g_in1
= o
->g_in2
= false;
2994 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2996 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2997 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2998 tcg_temp_free_i32(l
);
3002 static ExitStatus
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3004 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3005 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3006 tcg_temp_free_i32(l
);
3010 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3012 int r1
= get_field(s
->fields
, r1
);
3013 int r2
= get_field(s
->fields
, r2
);
3016 /* r1 and r2 must be even. */
3017 if (r1
& 1 || r2
& 1) {
3018 gen_program_exception(s
, PGM_SPECIFICATION
);
3019 return EXIT_NORETURN
;
3022 t1
= tcg_const_i32(r1
);
3023 t2
= tcg_const_i32(r2
);
3024 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3025 tcg_temp_free_i32(t1
);
3026 tcg_temp_free_i32(t2
);
3031 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3033 int r1
= get_field(s
->fields
, r1
);
3034 int r3
= get_field(s
->fields
, r3
);
3037 /* r1 and r3 must be even. */
3038 if (r1
& 1 || r3
& 1) {
3039 gen_program_exception(s
, PGM_SPECIFICATION
);
3040 return EXIT_NORETURN
;
3043 t1
= tcg_const_i32(r1
);
3044 t3
= tcg_const_i32(r3
);
3045 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3046 tcg_temp_free_i32(t1
);
3047 tcg_temp_free_i32(t3
);
3052 static ExitStatus
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3054 int r1
= get_field(s
->fields
, r1
);
3055 int r3
= get_field(s
->fields
, r3
);
3058 /* r1 and r3 must be even. */
3059 if (r1
& 1 || r3
& 1) {
3060 gen_program_exception(s
, PGM_SPECIFICATION
);
3061 return EXIT_NORETURN
;
3064 t1
= tcg_const_i32(r1
);
3065 t3
= tcg_const_i32(r3
);
3066 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3067 tcg_temp_free_i32(t1
);
3068 tcg_temp_free_i32(t3
);
3073 static ExitStatus
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3075 int r3
= get_field(s
->fields
, r3
);
3076 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3081 #ifndef CONFIG_USER_ONLY
3082 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3084 int r1
= get_field(s
->fields
, l1
);
3085 check_privileged(s
);
3086 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3091 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3093 int r1
= get_field(s
->fields
, l1
);
3094 check_privileged(s
);
3095 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3101 static ExitStatus
op_mvn(DisasContext
*s
, DisasOps
*o
)
3103 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3104 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3105 tcg_temp_free_i32(l
);
3109 static ExitStatus
op_mvo(DisasContext
*s
, DisasOps
*o
)
3111 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3112 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3113 tcg_temp_free_i32(l
);
3117 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3119 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3124 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
3126 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3128 return_low128(o
->in2
);
3132 static ExitStatus
op_mvz(DisasContext
*s
, DisasOps
*o
)
3134 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3135 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3136 tcg_temp_free_i32(l
);
3140 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3142 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3146 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3148 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3152 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
3154 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3158 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3160 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3164 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
3166 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3170 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
3172 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3173 return_low128(o
->out2
);
3177 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3179 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3180 return_low128(o
->out2
);
3184 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
3186 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3187 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3188 tcg_temp_free_i64(r3
);
3192 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
3194 int r3
= get_field(s
->fields
, r3
);
3195 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3199 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
3201 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3202 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3203 tcg_temp_free_i64(r3
);
3207 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3209 int r3
= get_field(s
->fields
, r3
);
3210 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3214 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3217 z
= tcg_const_i64(0);
3218 n
= tcg_temp_new_i64();
3219 tcg_gen_neg_i64(n
, o
->in2
);
3220 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3221 tcg_temp_free_i64(n
);
3222 tcg_temp_free_i64(z
);
3226 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3228 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3232 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3234 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3238 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3240 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3241 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3245 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3247 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3248 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3249 tcg_temp_free_i32(l
);
3254 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3256 tcg_gen_neg_i64(o
->out
, o
->in2
);
3260 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3262 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3266 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3268 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3272 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3274 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3275 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3279 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3281 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3282 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3283 tcg_temp_free_i32(l
);
3288 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3290 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3294 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3296 int shift
= s
->insn
->data
& 0xff;
3297 int size
= s
->insn
->data
>> 8;
3298 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3301 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3302 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3304 /* Produce the CC from only the bits manipulated. */
3305 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3306 set_cc_nz_u64(s
, cc_dst
);
3310 static ExitStatus
op_pack(DisasContext
*s
, DisasOps
*o
)
3312 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3313 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3314 tcg_temp_free_i32(l
);
3318 static ExitStatus
op_pka(DisasContext
*s
, DisasOps
*o
)
3320 int l2
= get_field(s
->fields
, l2
) + 1;
3323 /* The length must not exceed 32 bytes. */
3325 gen_program_exception(s
, PGM_SPECIFICATION
);
3326 return EXIT_NORETURN
;
3328 l
= tcg_const_i32(l2
);
3329 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3330 tcg_temp_free_i32(l
);
3334 static ExitStatus
op_pku(DisasContext
*s
, DisasOps
*o
)
3336 int l2
= get_field(s
->fields
, l2
) + 1;
3339 /* The length must be even and should not exceed 64 bytes. */
3340 if ((l2
& 1) || (l2
> 64)) {
3341 gen_program_exception(s
, PGM_SPECIFICATION
);
3342 return EXIT_NORETURN
;
3344 l
= tcg_const_i32(l2
);
3345 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3346 tcg_temp_free_i32(l
);
3350 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3352 gen_helper_popcnt(o
->out
, o
->in2
);
3356 #ifndef CONFIG_USER_ONLY
3357 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3359 check_privileged(s
);
3360 gen_helper_ptlb(cpu_env
);
3365 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3367 int i3
= get_field(s
->fields
, i3
);
3368 int i4
= get_field(s
->fields
, i4
);
3369 int i5
= get_field(s
->fields
, i5
);
3370 int do_zero
= i4
& 0x80;
3371 uint64_t mask
, imask
, pmask
;
3374 /* Adjust the arguments for the specific insn. */
3375 switch (s
->fields
->op2
) {
3376 case 0x55: /* risbg */
3381 case 0x5d: /* risbhg */
3384 pmask
= 0xffffffff00000000ull
;
3386 case 0x51: /* risblg */
3389 pmask
= 0x00000000ffffffffull
;
3395 /* MASK is the set of bits to be inserted from R2.
3396 Take care for I3/I4 wraparound. */
3399 mask
^= pmask
>> i4
>> 1;
3401 mask
|= ~(pmask
>> i4
>> 1);
3405 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3406 insns, we need to keep the other half of the register. */
3407 imask
= ~mask
| ~pmask
;
3409 if (s
->fields
->op2
== 0x55) {
3419 if (s
->fields
->op2
== 0x5d) {
3423 /* In some cases we can implement this with extract. */
3424 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3425 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3429 /* In some cases we can implement this with deposit. */
3430 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3431 /* Note that we rotate the bits to be inserted to the lsb, not to
3432 the position as described in the PoO. */
3433 rot
= (rot
- pos
) & 63;
3438 /* Rotate the input as necessary. */
3439 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3441 /* Insert the selected bits into the output. */
3444 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3446 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3448 } else if (imask
== 0) {
3449 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3451 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3452 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3453 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3458 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3460 int i3
= get_field(s
->fields
, i3
);
3461 int i4
= get_field(s
->fields
, i4
);
3462 int i5
= get_field(s
->fields
, i5
);
3465 /* If this is a test-only form, arrange to discard the result. */
3467 o
->out
= tcg_temp_new_i64();
3475 /* MASK is the set of bits to be operated on from R2.
3476 Take care for I3/I4 wraparound. */
3479 mask
^= ~0ull >> i4
>> 1;
3481 mask
|= ~(~0ull >> i4
>> 1);
3484 /* Rotate the input as necessary. */
3485 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3488 switch (s
->fields
->op2
) {
3489 case 0x55: /* AND */
3490 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3491 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3494 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3495 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3497 case 0x57: /* XOR */
3498 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3499 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3506 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3507 set_cc_nz_u64(s
, cc_dst
);
3511 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3513 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3517 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3519 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3523 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3525 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3529 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3531 TCGv_i32 t1
= tcg_temp_new_i32();
3532 TCGv_i32 t2
= tcg_temp_new_i32();
3533 TCGv_i32 to
= tcg_temp_new_i32();
3534 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3535 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3536 tcg_gen_rotl_i32(to
, t1
, t2
);
3537 tcg_gen_extu_i32_i64(o
->out
, to
);
3538 tcg_temp_free_i32(t1
);
3539 tcg_temp_free_i32(t2
);
3540 tcg_temp_free_i32(to
);
3544 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3546 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3550 #ifndef CONFIG_USER_ONLY
3551 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3553 check_privileged(s
);
3554 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3559 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3561 check_privileged(s
);
3562 gen_helper_sacf(cpu_env
, o
->in2
);
3563 /* Addressing mode has changed, so end the block. */
3564 return EXIT_PC_STALE
;
3568 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3570 int sam
= s
->insn
->data
;
3586 /* Bizarre but true, we check the address of the current insn for the
3587 specification exception, not the next to be executed. Thus the PoO
3588 documents that Bad Things Happen two bytes before the end. */
3589 if (s
->pc
& ~mask
) {
3590 gen_program_exception(s
, PGM_SPECIFICATION
);
3591 return EXIT_NORETURN
;
3595 tsam
= tcg_const_i64(sam
);
3596 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3597 tcg_temp_free_i64(tsam
);
3599 /* Always exit the TB, since we (may have) changed execution mode. */
3600 return EXIT_PC_STALE
;
3603 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3605 int r1
= get_field(s
->fields
, r1
);
3606 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3610 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3612 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3616 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3618 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3622 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3624 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3625 return_low128(o
->out2
);
3629 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3631 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3635 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3637 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3641 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3643 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3644 return_low128(o
->out2
);
3648 #ifndef CONFIG_USER_ONLY
3649 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3651 check_privileged(s
);
3652 potential_page_fault(s
);
3653 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3658 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3660 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3661 check_privileged(s
);
3662 potential_page_fault(s
);
3663 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3665 tcg_temp_free_i32(r1
);
3670 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3677 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3679 /* We want to store when the condition is fulfilled, so branch
3680 out when it's not */
3681 c
.cond
= tcg_invert_cond(c
.cond
);
3683 lab
= gen_new_label();
3685 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3687 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3691 r1
= get_field(s
->fields
, r1
);
3692 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3693 switch (s
->insn
->data
) {
3695 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3698 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3700 case 2: /* STOCFH */
3701 h
= tcg_temp_new_i64();
3702 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3703 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3704 tcg_temp_free_i64(h
);
3707 g_assert_not_reached();
3709 tcg_temp_free_i64(a
);
3715 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3717 uint64_t sign
= 1ull << s
->insn
->data
;
3718 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3719 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3720 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3721 /* The arithmetic left shift is curious in that it does not affect
3722 the sign bit. Copy that over from the source unchanged. */
3723 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3724 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3725 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3729 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3731 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3735 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3737 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3741 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3743 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3747 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3749 gen_helper_sfpc(cpu_env
, o
->in2
);
3753 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3755 gen_helper_sfas(cpu_env
, o
->in2
);
3759 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3761 int b2
= get_field(s
->fields
, b2
);
3762 int d2
= get_field(s
->fields
, d2
);
3763 TCGv_i64 t1
= tcg_temp_new_i64();
3764 TCGv_i64 t2
= tcg_temp_new_i64();
3767 switch (s
->fields
->op2
) {
3768 case 0x99: /* SRNM */
3771 case 0xb8: /* SRNMB */
3774 case 0xb9: /* SRNMT */
3780 mask
= (1 << len
) - 1;
3782 /* Insert the value into the appropriate field of the FPC. */
3784 tcg_gen_movi_i64(t1
, d2
& mask
);
3786 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3787 tcg_gen_andi_i64(t1
, t1
, mask
);
3789 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3790 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3791 tcg_temp_free_i64(t1
);
3793 /* Then install the new FPC to set the rounding mode in fpu_status. */
3794 gen_helper_sfpc(cpu_env
, t2
);
3795 tcg_temp_free_i64(t2
);
3799 #ifndef CONFIG_USER_ONLY
3800 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3802 check_privileged(s
);
3803 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3804 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3808 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3810 check_privileged(s
);
3811 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3815 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3817 check_privileged(s
);
3818 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3819 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3820 return EXIT_PC_STALE_NOCHAIN
;
3823 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3825 check_privileged(s
);
3826 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
3830 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3832 gen_helper_stck(o
->out
, cpu_env
);
3833 /* ??? We don't implement clock states. */
3834 gen_op_movi_cc(s
, 0);
3838 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3840 TCGv_i64 c1
= tcg_temp_new_i64();
3841 TCGv_i64 c2
= tcg_temp_new_i64();
3842 gen_helper_stck(c1
, cpu_env
);
3843 /* Shift the 64-bit value into its place as a zero-extended
3844 104-bit value. Note that "bit positions 64-103 are always
3845 non-zero so that they compare differently to STCK"; we set
3846 the least significant bit to 1. */
3847 tcg_gen_shli_i64(c2
, c1
, 56);
3848 tcg_gen_shri_i64(c1
, c1
, 8);
3849 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3850 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3851 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3852 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3853 tcg_temp_free_i64(c1
);
3854 tcg_temp_free_i64(c2
);
3855 /* ??? We don't implement clock states. */
3856 gen_op_movi_cc(s
, 0);
3860 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3862 check_privileged(s
);
3863 gen_helper_sckc(cpu_env
, o
->in2
);
3867 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3869 check_privileged(s
);
3870 gen_helper_stckc(o
->out
, cpu_env
);
3874 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3876 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3877 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3878 check_privileged(s
);
3879 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3880 tcg_temp_free_i32(r1
);
3881 tcg_temp_free_i32(r3
);
3885 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3887 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3888 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3889 check_privileged(s
);
3890 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3891 tcg_temp_free_i32(r1
);
3892 tcg_temp_free_i32(r3
);
3896 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3898 check_privileged(s
);
3899 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
3900 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
3904 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3906 check_privileged(s
);
3907 gen_helper_spt(cpu_env
, o
->in2
);
3911 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3913 check_privileged(s
);
3914 gen_helper_stfl(cpu_env
);
3918 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3920 check_privileged(s
);
3921 gen_helper_stpt(o
->out
, cpu_env
);
3925 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3927 check_privileged(s
);
3928 potential_page_fault(s
);
3929 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3934 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3936 check_privileged(s
);
3937 gen_helper_spx(cpu_env
, o
->in2
);
3941 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3943 check_privileged(s
);
3944 potential_page_fault(s
);
3945 gen_helper_xsch(cpu_env
, regs
[1]);
3950 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3952 check_privileged(s
);
3953 potential_page_fault(s
);
3954 gen_helper_csch(cpu_env
, regs
[1]);
3959 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3961 check_privileged(s
);
3962 potential_page_fault(s
);
3963 gen_helper_hsch(cpu_env
, regs
[1]);
3968 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3970 check_privileged(s
);
3971 potential_page_fault(s
);
3972 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3977 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3979 check_privileged(s
);
3980 potential_page_fault(s
);
3981 gen_helper_rchp(cpu_env
, regs
[1]);
3986 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3988 check_privileged(s
);
3989 potential_page_fault(s
);
3990 gen_helper_rsch(cpu_env
, regs
[1]);
3995 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3997 check_privileged(s
);
3998 potential_page_fault(s
);
3999 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4004 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
4006 check_privileged(s
);
4007 potential_page_fault(s
);
4008 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4013 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
4015 check_privileged(s
);
4016 potential_page_fault(s
);
4017 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4022 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
4024 check_privileged(s
);
4025 potential_page_fault(s
);
4026 gen_helper_chsc(cpu_env
, o
->in2
);
4031 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
4033 check_privileged(s
);
4034 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4035 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4039 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4041 uint64_t i2
= get_field(s
->fields
, i2
);
4044 check_privileged(s
);
4046 /* It is important to do what the instruction name says: STORE THEN.
4047 If we let the output hook perform the store then if we fault and
4048 restart, we'll have the wrong SYSTEM MASK in place. */
4049 t
= tcg_temp_new_i64();
4050 tcg_gen_shri_i64(t
, psw_mask
, 56);
4051 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4052 tcg_temp_free_i64(t
);
4054 if (s
->fields
->op
== 0xac) {
4055 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4056 (i2
<< 56) | 0x00ffffffffffffffull
);
4058 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4061 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4062 return EXIT_PC_STALE_NOCHAIN
;
4065 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
4067 check_privileged(s
);
4068 potential_page_fault(s
);
4069 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4073 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
4075 check_privileged(s
);
4076 potential_page_fault(s
);
4077 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4082 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
4084 potential_page_fault(s
);
4085 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4090 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
4092 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4096 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
4098 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4102 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
4104 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4108 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
4110 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4114 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
4116 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4117 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4118 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4119 tcg_temp_free_i32(r1
);
4120 tcg_temp_free_i32(r3
);
4124 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
4126 int m3
= get_field(s
->fields
, m3
);
4127 int pos
, base
= s
->insn
->data
;
4128 TCGv_i64 tmp
= tcg_temp_new_i64();
4130 pos
= base
+ ctz32(m3
) * 8;
4133 /* Effectively a 32-bit store. */
4134 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4135 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4141 /* Effectively a 16-bit store. */
4142 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4143 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4150 /* Effectively an 8-bit store. */
4151 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4152 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4156 /* This is going to be a sequence of shifts and stores. */
4157 pos
= base
+ 32 - 8;
4160 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4161 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4162 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4164 m3
= (m3
<< 1) & 0xf;
4169 tcg_temp_free_i64(tmp
);
4173 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
4175 int r1
= get_field(s
->fields
, r1
);
4176 int r3
= get_field(s
->fields
, r3
);
4177 int size
= s
->insn
->data
;
4178 TCGv_i64 tsize
= tcg_const_i64(size
);
4182 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4184 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4189 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4193 tcg_temp_free_i64(tsize
);
4197 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
4199 int r1
= get_field(s
->fields
, r1
);
4200 int r3
= get_field(s
->fields
, r3
);
4201 TCGv_i64 t
= tcg_temp_new_i64();
4202 TCGv_i64 t4
= tcg_const_i64(4);
4203 TCGv_i64 t32
= tcg_const_i64(32);
4206 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4207 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4211 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4215 tcg_temp_free_i64(t
);
4216 tcg_temp_free_i64(t4
);
4217 tcg_temp_free_i64(t32
);
4221 static ExitStatus
op_stpq(DisasContext
*s
, DisasOps
*o
)
4223 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4227 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
4229 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4230 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4232 gen_helper_srst(cpu_env
, r1
, r2
);
4234 tcg_temp_free_i32(r1
);
4235 tcg_temp_free_i32(r2
);
4240 static ExitStatus
op_srstu(DisasContext
*s
, DisasOps
*o
)
4242 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4243 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4245 gen_helper_srstu(cpu_env
, r1
, r2
);
4247 tcg_temp_free_i32(r1
);
4248 tcg_temp_free_i32(r2
);
4253 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
4255 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4259 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
4264 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4266 /* The !borrow flag is the msb of CC. Since we want the inverse of
4267 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4268 disas_jcc(s
, &cmp
, 8 | 4);
4269 borrow
= tcg_temp_new_i64();
4271 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4273 TCGv_i32 t
= tcg_temp_new_i32();
4274 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4275 tcg_gen_extu_i32_i64(borrow
, t
);
4276 tcg_temp_free_i32(t
);
4280 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4281 tcg_temp_free_i64(borrow
);
4285 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4292 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4293 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4294 tcg_temp_free_i32(t
);
4296 t
= tcg_const_i32(s
->ilen
);
4297 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4298 tcg_temp_free_i32(t
);
4300 gen_exception(EXCP_SVC
);
4301 return EXIT_NORETURN
;
4304 static ExitStatus
op_tam(DisasContext
*s
, DisasOps
*o
)
4308 cc
|= (s
->tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4309 cc
|= (s
->tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4310 gen_op_movi_cc(s
, cc
);
4314 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4316 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4321 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4323 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4328 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4330 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4335 #ifndef CONFIG_USER_ONLY
4337 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4339 check_privileged(s
);
4340 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4345 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4347 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4354 static ExitStatus
op_tp(DisasContext
*s
, DisasOps
*o
)
4356 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4357 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4358 tcg_temp_free_i32(l1
);
4363 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4365 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4366 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4367 tcg_temp_free_i32(l
);
4372 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4374 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4375 return_low128(o
->out2
);
4380 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4382 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4383 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4384 tcg_temp_free_i32(l
);
4389 static ExitStatus
op_trtr(DisasContext
*s
, DisasOps
*o
)
4391 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4392 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4393 tcg_temp_free_i32(l
);
4398 static ExitStatus
op_trXX(DisasContext
*s
, DisasOps
*o
)
4400 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4401 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4402 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4403 TCGv_i32 tst
= tcg_temp_new_i32();
4404 int m3
= get_field(s
->fields
, m3
);
4406 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4410 tcg_gen_movi_i32(tst
, -1);
4412 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4413 if (s
->insn
->opc
& 3) {
4414 tcg_gen_ext8u_i32(tst
, tst
);
4416 tcg_gen_ext16u_i32(tst
, tst
);
4419 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4421 tcg_temp_free_i32(r1
);
4422 tcg_temp_free_i32(r2
);
4423 tcg_temp_free_i32(sizes
);
4424 tcg_temp_free_i32(tst
);
4429 static ExitStatus
op_ts(DisasContext
*s
, DisasOps
*o
)
4431 TCGv_i32 t1
= tcg_const_i32(0xff);
4432 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4433 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4434 tcg_temp_free_i32(t1
);
4439 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4441 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4442 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4443 tcg_temp_free_i32(l
);
4447 static ExitStatus
op_unpka(DisasContext
*s
, DisasOps
*o
)
4449 int l1
= get_field(s
->fields
, l1
) + 1;
4452 /* The length must not exceed 32 bytes. */
4454 gen_program_exception(s
, PGM_SPECIFICATION
);
4455 return EXIT_NORETURN
;
4457 l
= tcg_const_i32(l1
);
4458 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4459 tcg_temp_free_i32(l
);
4464 static ExitStatus
op_unpku(DisasContext
*s
, DisasOps
*o
)
4466 int l1
= get_field(s
->fields
, l1
) + 1;
4469 /* The length must be even and should not exceed 64 bytes. */
4470 if ((l1
& 1) || (l1
> 64)) {
4471 gen_program_exception(s
, PGM_SPECIFICATION
);
4472 return EXIT_NORETURN
;
4474 l
= tcg_const_i32(l1
);
4475 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4476 tcg_temp_free_i32(l
);
4482 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4484 int d1
= get_field(s
->fields
, d1
);
4485 int d2
= get_field(s
->fields
, d2
);
4486 int b1
= get_field(s
->fields
, b1
);
4487 int b2
= get_field(s
->fields
, b2
);
4488 int l
= get_field(s
->fields
, l1
);
4491 o
->addr1
= get_address(s
, 0, b1
, d1
);
4493 /* If the addresses are identical, this is a store/memset of zero. */
4494 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4495 o
->in2
= tcg_const_i64(0);
4499 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4502 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4506 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4509 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4513 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4516 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4520 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4522 gen_op_movi_cc(s
, 0);
4526 /* But in general we'll defer to a helper. */
4527 o
->in2
= get_address(s
, 0, b2
, d2
);
4528 t32
= tcg_const_i32(l
);
4529 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4530 tcg_temp_free_i32(t32
);
4535 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4537 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4541 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4543 int shift
= s
->insn
->data
& 0xff;
4544 int size
= s
->insn
->data
>> 8;
4545 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4548 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4549 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4551 /* Produce the CC from only the bits manipulated. */
4552 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4553 set_cc_nz_u64(s
, cc_dst
);
4557 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4559 o
->out
= tcg_const_i64(0);
4563 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4565 o
->out
= tcg_const_i64(0);
4571 /* ====================================================================== */
4572 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4573 the original inputs), update the various cc data structures in order to
4574 be able to compute the new condition code. */
4576 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4578 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4581 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4583 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4586 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4588 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4591 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4593 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4596 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4598 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4601 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4603 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4606 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4608 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4611 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4613 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4616 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4618 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4621 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4623 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4626 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4628 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4631 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4633 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4636 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4638 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4641 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4643 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4646 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4648 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4651 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4653 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4656 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4658 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4661 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4663 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4666 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4668 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4671 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4673 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4674 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4677 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4679 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4682 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4684 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4687 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4689 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4692 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4694 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4697 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4699 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4702 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4704 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4707 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4709 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4712 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4714 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4717 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4719 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4722 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4724 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4727 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4729 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4732 /* ====================================================================== */
4733 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4734 with the TCG register to which we will write. Used in combination with
4735 the "wout" generators, in some cases we need a new temporary, and in
4736 some cases we can write to a TCG global. */
4738 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4740 o
->out
= tcg_temp_new_i64();
4742 #define SPEC_prep_new 0
4744 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4746 o
->out
= tcg_temp_new_i64();
4747 o
->out2
= tcg_temp_new_i64();
4749 #define SPEC_prep_new_P 0
4751 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4753 o
->out
= regs
[get_field(f
, r1
)];
4756 #define SPEC_prep_r1 0
4758 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4760 int r1
= get_field(f
, r1
);
4762 o
->out2
= regs
[r1
+ 1];
4763 o
->g_out
= o
->g_out2
= true;
4765 #define SPEC_prep_r1_P SPEC_r1_even
4767 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4769 o
->out
= fregs
[get_field(f
, r1
)];
4772 #define SPEC_prep_f1 0
4774 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4776 int r1
= get_field(f
, r1
);
4778 o
->out2
= fregs
[r1
+ 2];
4779 o
->g_out
= o
->g_out2
= true;
4781 #define SPEC_prep_x1 SPEC_r1_f128
4783 /* ====================================================================== */
4784 /* The "Write OUTput" generators. These generally perform some non-trivial
4785 copy of data to TCG globals, or to main memory. The trivial cases are
4786 generally handled by having a "prep" generator install the TCG global
4787 as the destination of the operation. */
4789 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4791 store_reg(get_field(f
, r1
), o
->out
);
4793 #define SPEC_wout_r1 0
4795 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4797 int r1
= get_field(f
, r1
);
4798 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4800 #define SPEC_wout_r1_8 0
4802 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4804 int r1
= get_field(f
, r1
);
4805 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4807 #define SPEC_wout_r1_16 0
4809 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4811 store_reg32_i64(get_field(f
, r1
), o
->out
);
4813 #define SPEC_wout_r1_32 0
4815 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4817 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4819 #define SPEC_wout_r1_32h 0
4821 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4823 int r1
= get_field(f
, r1
);
4824 store_reg32_i64(r1
, o
->out
);
4825 store_reg32_i64(r1
+ 1, o
->out2
);
4827 #define SPEC_wout_r1_P32 SPEC_r1_even
4829 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4831 int r1
= get_field(f
, r1
);
4832 store_reg32_i64(r1
+ 1, o
->out
);
4833 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4834 store_reg32_i64(r1
, o
->out
);
4836 #define SPEC_wout_r1_D32 SPEC_r1_even
4838 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4840 int r3
= get_field(f
, r3
);
4841 store_reg32_i64(r3
, o
->out
);
4842 store_reg32_i64(r3
+ 1, o
->out2
);
4844 #define SPEC_wout_r3_P32 SPEC_r3_even
4846 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4848 int r3
= get_field(f
, r3
);
4849 store_reg(r3
, o
->out
);
4850 store_reg(r3
+ 1, o
->out2
);
4852 #define SPEC_wout_r3_P64 SPEC_r3_even
4854 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4856 store_freg32_i64(get_field(f
, r1
), o
->out
);
4858 #define SPEC_wout_e1 0
4860 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4862 store_freg(get_field(f
, r1
), o
->out
);
4864 #define SPEC_wout_f1 0
4866 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4868 int f1
= get_field(s
->fields
, r1
);
4869 store_freg(f1
, o
->out
);
4870 store_freg(f1
+ 2, o
->out2
);
4872 #define SPEC_wout_x1 SPEC_r1_f128
4874 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4876 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4877 store_reg32_i64(get_field(f
, r1
), o
->out
);
4880 #define SPEC_wout_cond_r1r2_32 0
4882 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4884 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4885 store_freg32_i64(get_field(f
, r1
), o
->out
);
4888 #define SPEC_wout_cond_e1e2 0
4890 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4892 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4894 #define SPEC_wout_m1_8 0
4896 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4898 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4900 #define SPEC_wout_m1_16 0
4902 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4904 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4906 #define SPEC_wout_m1_32 0
4908 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4910 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4912 #define SPEC_wout_m1_64 0
4914 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4916 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4918 #define SPEC_wout_m2_32 0
4920 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4922 store_reg(get_field(f
, r1
), o
->in2
);
4924 #define SPEC_wout_in2_r1 0
4926 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4928 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4930 #define SPEC_wout_in2_r1_32 0
4932 /* ====================================================================== */
4933 /* The "INput 1" generators. These load the first operand to an insn. */
4935 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4937 o
->in1
= load_reg(get_field(f
, r1
));
4939 #define SPEC_in1_r1 0
4941 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4943 o
->in1
= regs
[get_field(f
, r1
)];
4946 #define SPEC_in1_r1_o 0
4948 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4950 o
->in1
= tcg_temp_new_i64();
4951 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4953 #define SPEC_in1_r1_32s 0
4955 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4957 o
->in1
= tcg_temp_new_i64();
4958 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4960 #define SPEC_in1_r1_32u 0
4962 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4964 o
->in1
= tcg_temp_new_i64();
4965 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4967 #define SPEC_in1_r1_sr32 0
4969 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4971 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4973 #define SPEC_in1_r1p1 SPEC_r1_even
4975 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4977 o
->in1
= tcg_temp_new_i64();
4978 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4980 #define SPEC_in1_r1p1_32s SPEC_r1_even
4982 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4984 o
->in1
= tcg_temp_new_i64();
4985 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4987 #define SPEC_in1_r1p1_32u SPEC_r1_even
4989 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4991 int r1
= get_field(f
, r1
);
4992 o
->in1
= tcg_temp_new_i64();
4993 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4995 #define SPEC_in1_r1_D32 SPEC_r1_even
4997 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4999 o
->in1
= load_reg(get_field(f
, r2
));
5001 #define SPEC_in1_r2 0
5003 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5005 o
->in1
= tcg_temp_new_i64();
5006 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5008 #define SPEC_in1_r2_sr32 0
5010 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5012 o
->in1
= load_reg(get_field(f
, r3
));
5014 #define SPEC_in1_r3 0
5016 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5018 o
->in1
= regs
[get_field(f
, r3
)];
5021 #define SPEC_in1_r3_o 0
5023 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5025 o
->in1
= tcg_temp_new_i64();
5026 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5028 #define SPEC_in1_r3_32s 0
5030 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5032 o
->in1
= tcg_temp_new_i64();
5033 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5035 #define SPEC_in1_r3_32u 0
5037 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5039 int r3
= get_field(f
, r3
);
5040 o
->in1
= tcg_temp_new_i64();
5041 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5043 #define SPEC_in1_r3_D32 SPEC_r3_even
5045 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5047 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5049 #define SPEC_in1_e1 0
5051 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5053 o
->in1
= fregs
[get_field(f
, r1
)];
5056 #define SPEC_in1_f1_o 0
5058 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5060 int r1
= get_field(f
, r1
);
5062 o
->out2
= fregs
[r1
+ 2];
5063 o
->g_out
= o
->g_out2
= true;
5065 #define SPEC_in1_x1_o SPEC_r1_f128
5067 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5069 o
->in1
= fregs
[get_field(f
, r3
)];
5072 #define SPEC_in1_f3_o 0
5074 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5076 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5078 #define SPEC_in1_la1 0
5080 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5082 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5083 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5085 #define SPEC_in1_la2 0
5087 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5090 o
->in1
= tcg_temp_new_i64();
5091 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5093 #define SPEC_in1_m1_8u 0
5095 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5098 o
->in1
= tcg_temp_new_i64();
5099 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5101 #define SPEC_in1_m1_16s 0
5103 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5106 o
->in1
= tcg_temp_new_i64();
5107 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5109 #define SPEC_in1_m1_16u 0
5111 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5114 o
->in1
= tcg_temp_new_i64();
5115 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5117 #define SPEC_in1_m1_32s 0
5119 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5122 o
->in1
= tcg_temp_new_i64();
5123 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5125 #define SPEC_in1_m1_32u 0
5127 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5130 o
->in1
= tcg_temp_new_i64();
5131 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5133 #define SPEC_in1_m1_64 0
5135 /* ====================================================================== */
5136 /* The "INput 2" generators. These load the second operand to an insn. */
5138 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5140 o
->in2
= regs
[get_field(f
, r1
)];
5143 #define SPEC_in2_r1_o 0
5145 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5147 o
->in2
= tcg_temp_new_i64();
5148 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5150 #define SPEC_in2_r1_16u 0
5152 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5154 o
->in2
= tcg_temp_new_i64();
5155 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5157 #define SPEC_in2_r1_32u 0
5159 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5161 int r1
= get_field(f
, r1
);
5162 o
->in2
= tcg_temp_new_i64();
5163 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5165 #define SPEC_in2_r1_D32 SPEC_r1_even
5167 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5169 o
->in2
= load_reg(get_field(f
, r2
));
5171 #define SPEC_in2_r2 0
5173 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5175 o
->in2
= regs
[get_field(f
, r2
)];
5178 #define SPEC_in2_r2_o 0
5180 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5182 int r2
= get_field(f
, r2
);
5184 o
->in2
= load_reg(r2
);
5187 #define SPEC_in2_r2_nz 0
5189 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5191 o
->in2
= tcg_temp_new_i64();
5192 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5194 #define SPEC_in2_r2_8s 0
5196 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5198 o
->in2
= tcg_temp_new_i64();
5199 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5201 #define SPEC_in2_r2_8u 0
5203 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5205 o
->in2
= tcg_temp_new_i64();
5206 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5208 #define SPEC_in2_r2_16s 0
5210 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5212 o
->in2
= tcg_temp_new_i64();
5213 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5215 #define SPEC_in2_r2_16u 0
5217 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5219 o
->in2
= load_reg(get_field(f
, r3
));
5221 #define SPEC_in2_r3 0
5223 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5225 o
->in2
= tcg_temp_new_i64();
5226 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5228 #define SPEC_in2_r3_sr32 0
5230 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5232 o
->in2
= tcg_temp_new_i64();
5233 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5235 #define SPEC_in2_r2_32s 0
5237 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5239 o
->in2
= tcg_temp_new_i64();
5240 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5242 #define SPEC_in2_r2_32u 0
5244 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5246 o
->in2
= tcg_temp_new_i64();
5247 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5249 #define SPEC_in2_r2_sr32 0
5251 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5253 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5255 #define SPEC_in2_e2 0
5257 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5259 o
->in2
= fregs
[get_field(f
, r2
)];
5262 #define SPEC_in2_f2_o 0
5264 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5266 int r2
= get_field(f
, r2
);
5268 o
->in2
= fregs
[r2
+ 2];
5269 o
->g_in1
= o
->g_in2
= true;
5271 #define SPEC_in2_x2_o SPEC_r2_f128
5273 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5275 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5277 #define SPEC_in2_ra2 0
5279 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5281 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5282 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5284 #define SPEC_in2_a2 0
5286 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5288 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
5290 #define SPEC_in2_ri2 0
5292 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5294 help_l2_shift(s
, f
, o
, 31);
5296 #define SPEC_in2_sh32 0
5298 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5300 help_l2_shift(s
, f
, o
, 63);
5302 #define SPEC_in2_sh64 0
5304 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5307 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5309 #define SPEC_in2_m2_8u 0
5311 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5314 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5316 #define SPEC_in2_m2_16s 0
5318 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5321 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5323 #define SPEC_in2_m2_16u 0
5325 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5328 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5330 #define SPEC_in2_m2_32s 0
5332 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5335 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5337 #define SPEC_in2_m2_32u 0
5339 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5342 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5344 #define SPEC_in2_m2_64 0
5346 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5349 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5351 #define SPEC_in2_mri2_16u 0
5353 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5356 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5358 #define SPEC_in2_mri2_32s 0
5360 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5363 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5365 #define SPEC_in2_mri2_32u 0
5367 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5370 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5372 #define SPEC_in2_mri2_64 0
5374 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5376 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5378 #define SPEC_in2_i2 0
5380 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5382 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5384 #define SPEC_in2_i2_8u 0
5386 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5388 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5390 #define SPEC_in2_i2_16u 0
5392 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5394 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5396 #define SPEC_in2_i2_32u 0
5398 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5400 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5401 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5403 #define SPEC_in2_i2_16u_shl 0
5405 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5407 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5408 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5410 #define SPEC_in2_i2_32u_shl 0
5412 #ifndef CONFIG_USER_ONLY
5413 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5415 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5417 #define SPEC_in2_insn 0
5420 /* ====================================================================== */
5422 /* Find opc within the table of insns. This is formulated as a switch
5423 statement so that (1) we get compile-time notice of cut-paste errors
5424 for duplicated opcodes, and (2) the compiler generates the binary
5425 search tree, rather than us having to post-process the table. */
5427 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5428 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5430 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5432 enum DisasInsnEnum
{
5433 #include "insn-data.def"
5437 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5441 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5443 .help_in1 = in1_##I1, \
5444 .help_in2 = in2_##I2, \
5445 .help_prep = prep_##P, \
5446 .help_wout = wout_##W, \
5447 .help_cout = cout_##CC, \
5448 .help_op = op_##OP, \
5452 /* Allow 0 to be used for NULL in the table below. */
5460 #define SPEC_in1_0 0
5461 #define SPEC_in2_0 0
5462 #define SPEC_prep_0 0
5463 #define SPEC_wout_0 0
5465 /* Give smaller names to the various facilities. */
5466 #define FAC_Z S390_FEAT_ZARCH
5467 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5468 #define FAC_DFP S390_FEAT_DFP
5469 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5470 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5471 #define FAC_EE S390_FEAT_EXECUTE_EXT
5472 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5473 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5474 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5475 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5476 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5477 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5478 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5479 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5480 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5481 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5482 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5483 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5484 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5485 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5486 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5487 #define FAC_SFLE S390_FEAT_STFLE
5488 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5489 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5490 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5491 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5492 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5493 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5494 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5495 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5496 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5498 static const DisasInsn insn_info
[] = {
5499 #include "insn-data.def"
5503 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5504 case OPC: return &insn_info[insn_ ## NM];
5506 static const DisasInsn
*lookup_opc(uint16_t opc
)
5509 #include "insn-data.def"
5518 /* Extract a field from the insn. The INSN should be left-aligned in
5519 the uint64_t so that we can more easily utilize the big-bit-endian
5520 definitions we extract from the Principals of Operation. */
5522 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5530 /* Zero extract the field from the insn. */
5531 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5533 /* Sign-extend, or un-swap the field as necessary. */
5535 case 0: /* unsigned */
5537 case 1: /* signed */
5538 assert(f
->size
<= 32);
5539 m
= 1u << (f
->size
- 1);
5542 case 2: /* dl+dh split, signed 20 bit. */
5543 r
= ((int8_t)r
<< 12) | (r
>> 8);
5549 /* Validate that the "compressed" encoding we selected above is valid.
5550 I.e. we havn't make two different original fields overlap. */
5551 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5552 o
->presentC
|= 1 << f
->indexC
;
5553 o
->presentO
|= 1 << f
->indexO
;
5555 o
->c
[f
->indexC
] = r
;
5558 /* Lookup the insn at the current PC, extracting the operands into O and
5559 returning the info struct for the insn. Returns NULL for invalid insn. */
5561 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5564 uint64_t insn
, pc
= s
->pc
;
5566 const DisasInsn
*info
;
5568 if (unlikely(s
->ex_value
)) {
5569 /* Drop the EX data now, so that it's clear on exception paths. */
5570 TCGv_i64 zero
= tcg_const_i64(0);
5571 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5572 tcg_temp_free_i64(zero
);
5574 /* Extract the values saved by EXECUTE. */
5575 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5576 ilen
= s
->ex_value
& 0xf;
5579 insn
= ld_code2(env
, pc
);
5580 op
= (insn
>> 8) & 0xff;
5581 ilen
= get_ilen(op
);
5587 insn
= ld_code4(env
, pc
) << 32;
5590 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5593 g_assert_not_reached();
5596 s
->next_pc
= s
->pc
+ ilen
;
5599 /* We can't actually determine the insn format until we've looked up
5600 the full insn opcode. Which we can't do without locating the
5601 secondary opcode. Assume by default that OP2 is at bit 40; for
5602 those smaller insns that don't actually have a secondary opcode
5603 this will correctly result in OP2 = 0. */
5609 case 0xb2: /* S, RRF, RRE, IE */
5610 case 0xb3: /* RRE, RRD, RRF */
5611 case 0xb9: /* RRE, RRF */
5612 case 0xe5: /* SSE, SIL */
5613 op2
= (insn
<< 8) >> 56;
5617 case 0xc0: /* RIL */
5618 case 0xc2: /* RIL */
5619 case 0xc4: /* RIL */
5620 case 0xc6: /* RIL */
5621 case 0xc8: /* SSF */
5622 case 0xcc: /* RIL */
5623 op2
= (insn
<< 12) >> 60;
5625 case 0xc5: /* MII */
5626 case 0xc7: /* SMI */
5627 case 0xd0 ... 0xdf: /* SS */
5633 case 0xee ... 0xf3: /* SS */
5634 case 0xf8 ... 0xfd: /* SS */
5638 op2
= (insn
<< 40) >> 56;
5642 memset(f
, 0, sizeof(*f
));
5647 /* Lookup the instruction. */
5648 info
= lookup_opc(op
<< 8 | op2
);
5650 /* If we found it, extract the operands. */
5652 DisasFormat fmt
= info
->fmt
;
5655 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5656 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5662 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5664 const DisasInsn
*insn
;
5665 ExitStatus ret
= NO_EXIT
;
5669 /* Search for the insn in the table. */
5670 insn
= extract_insn(env
, s
, &f
);
5672 /* Not found means unimplemented/illegal opcode. */
5674 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5676 gen_illegal_opcode(s
);
5677 return EXIT_NORETURN
;
5680 #ifndef CONFIG_USER_ONLY
5681 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5682 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5683 gen_helper_per_ifetch(cpu_env
, addr
);
5684 tcg_temp_free_i64(addr
);
5688 /* Check for insn specification exceptions. */
5690 int spec
= insn
->spec
, excp
= 0, r
;
5692 if (spec
& SPEC_r1_even
) {
5693 r
= get_field(&f
, r1
);
5695 excp
= PGM_SPECIFICATION
;
5698 if (spec
& SPEC_r2_even
) {
5699 r
= get_field(&f
, r2
);
5701 excp
= PGM_SPECIFICATION
;
5704 if (spec
& SPEC_r3_even
) {
5705 r
= get_field(&f
, r3
);
5707 excp
= PGM_SPECIFICATION
;
5710 if (spec
& SPEC_r1_f128
) {
5711 r
= get_field(&f
, r1
);
5713 excp
= PGM_SPECIFICATION
;
5716 if (spec
& SPEC_r2_f128
) {
5717 r
= get_field(&f
, r2
);
5719 excp
= PGM_SPECIFICATION
;
5723 gen_program_exception(s
, excp
);
5724 return EXIT_NORETURN
;
5728 /* Set up the strutures we use to communicate with the helpers. */
5731 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5732 TCGV_UNUSED_I64(o
.out
);
5733 TCGV_UNUSED_I64(o
.out2
);
5734 TCGV_UNUSED_I64(o
.in1
);
5735 TCGV_UNUSED_I64(o
.in2
);
5736 TCGV_UNUSED_I64(o
.addr1
);
5738 /* Implement the instruction. */
5739 if (insn
->help_in1
) {
5740 insn
->help_in1(s
, &f
, &o
);
5742 if (insn
->help_in2
) {
5743 insn
->help_in2(s
, &f
, &o
);
5745 if (insn
->help_prep
) {
5746 insn
->help_prep(s
, &f
, &o
);
5748 if (insn
->help_op
) {
5749 ret
= insn
->help_op(s
, &o
);
5751 if (insn
->help_wout
) {
5752 insn
->help_wout(s
, &f
, &o
);
5754 if (insn
->help_cout
) {
5755 insn
->help_cout(s
, &o
);
5758 /* Free any temporaries created by the helpers. */
5759 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5760 tcg_temp_free_i64(o
.out
);
5762 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5763 tcg_temp_free_i64(o
.out2
);
5765 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5766 tcg_temp_free_i64(o
.in1
);
5768 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5769 tcg_temp_free_i64(o
.in2
);
5771 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5772 tcg_temp_free_i64(o
.addr1
);
5775 #ifndef CONFIG_USER_ONLY
5776 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5777 /* An exception might be triggered, save PSW if not already done. */
5778 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5779 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5785 /* Call the helper to check for a possible PER exception. */
5786 gen_helper_per_check_exception(cpu_env
);
5790 /* Advance to the next instruction. */
5795 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
5797 CPUS390XState
*env
= cs
->env_ptr
;
5799 target_ulong pc_start
;
5800 uint64_t next_page_start
;
5801 int num_insns
, max_insns
;
5808 if (!(tb
->flags
& FLAG_MASK_64
)) {
5809 pc_start
&= 0x7fffffff;
5814 dc
.cc_op
= CC_OP_DYNAMIC
;
5815 dc
.ex_value
= tb
->cs_base
;
5816 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5818 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5821 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5822 if (max_insns
== 0) {
5823 max_insns
= CF_COUNT_MASK
;
5825 if (max_insns
> TCG_MAX_INSNS
) {
5826 max_insns
= TCG_MAX_INSNS
;
5832 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5835 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5836 status
= EXIT_PC_STALE
;
5838 /* The address covered by the breakpoint must be included in
5839 [tb->pc, tb->pc + tb->size) in order to for it to be
5840 properly cleared -- thus we increment the PC here so that
5841 the logic setting tb->size below does the right thing. */
5846 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5850 status
= translate_one(env
, &dc
);
5852 /* If we reach a page boundary, are single stepping,
5853 or exhaust instruction count, stop generation. */
5854 if (status
== NO_EXIT
5855 && (dc
.pc
>= next_page_start
5856 || tcg_op_buf_full()
5857 || num_insns
>= max_insns
5859 || cs
->singlestep_enabled
5861 status
= EXIT_PC_STALE
;
5863 } while (status
== NO_EXIT
);
5865 if (tb
->cflags
& CF_LAST_IO
) {
5874 case EXIT_PC_STALE_NOCHAIN
:
5875 update_psw_addr(&dc
);
5877 case EXIT_PC_UPDATED
:
5878 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5879 cc op type is in env */
5882 case EXIT_PC_CC_UPDATED
:
5883 /* Exit the TB, either by raising a debug exception or by return. */
5885 gen_exception(EXCP_DEBUG
);
5886 } else if (use_exit_tb(&dc
) || status
== EXIT_PC_STALE_NOCHAIN
) {
5889 tcg_gen_lookup_and_goto_ptr(psw_addr
);
5893 g_assert_not_reached();
5896 gen_tb_end(tb
, num_insns
);
5898 tb
->size
= dc
.pc
- pc_start
;
5899 tb
->icount
= num_insns
;
5901 #if defined(S390X_DEBUG_DISAS)
5902 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5903 && qemu_log_in_addr_range(pc_start
)) {
5905 if (unlikely(dc
.ex_value
)) {
5906 /* ??? Unfortunately log_target_disas can't use host memory. */
5907 qemu_log("IN: EXECUTE %016" PRIx64
"\n", dc
.ex_value
);
5909 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5910 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5918 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5921 int cc_op
= data
[1];
5922 env
->psw
.addr
= data
[0];
5923 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {