4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env
;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext
;
50 typedef struct DisasInsn DisasInsn
;
51 typedef struct DisasFields DisasFields
;
54 struct TranslationBlock
*tb
;
55 const DisasInsn
*insn
;
59 bool singlestep_enabled
;
62 /* Information carried about a condition to be evaluated. */
69 struct { TCGv_i64 a
, b
; } s64
;
70 struct { TCGv_i32 a
, b
; } s32
;
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit
[CC_OP_MAX
];
78 static uint64_t inline_branch_miss
[CC_OP_MAX
];
81 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
83 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
84 if (s
->tb
->flags
& FLAG_MASK_32
) {
85 return pc
| 0x80000000;
91 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
94 S390CPU
*cpu
= S390_CPU(cs
);
95 CPUS390XState
*env
= &cpu
->env
;
99 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
100 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
103 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
106 for (i
= 0; i
< 16; i
++) {
107 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
109 cpu_fprintf(f
, "\n");
115 for (i
= 0; i
< 16; i
++) {
116 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
118 cpu_fprintf(f
, "\n");
124 for (i
= 0; i
< 32; i
++) {
125 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
126 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
127 cpu_fprintf(f
, (i
% 2) ? " " : "\n");
130 #ifndef CONFIG_USER_ONLY
131 for (i
= 0; i
< 16; i
++) {
132 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
134 cpu_fprintf(f
, "\n");
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i
= 0; i
< CC_OP_MAX
; i
++) {
143 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
144 inline_branch_miss
[i
], inline_branch_hit
[i
]);
148 cpu_fprintf(f
, "\n");
151 static TCGv_i64 psw_addr
;
152 static TCGv_i64 psw_mask
;
154 static TCGv_i32 cc_op
;
155 static TCGv_i64 cc_src
;
156 static TCGv_i64 cc_dst
;
157 static TCGv_i64 cc_vr
;
159 static char cpu_reg_names
[32][4];
160 static TCGv_i64 regs
[16];
161 static TCGv_i64 fregs
[16];
163 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
165 void s390x_translate_init(void)
169 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
170 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
171 offsetof(CPUS390XState
, psw
.addr
),
173 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
174 offsetof(CPUS390XState
, psw
.mask
),
177 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
179 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
181 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
183 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
186 for (i
= 0; i
< 16; i
++) {
187 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
188 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
189 offsetof(CPUS390XState
, regs
[i
]),
193 for (i
= 0; i
< 16; i
++) {
194 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
195 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
196 offsetof(CPUS390XState
, vregs
[i
][0].d
),
197 cpu_reg_names
[i
+ 16]);
201 static TCGv_i64
load_reg(int reg
)
203 TCGv_i64 r
= tcg_temp_new_i64();
204 tcg_gen_mov_i64(r
, regs
[reg
]);
208 static TCGv_i64
load_freg32_i64(int reg
)
210 TCGv_i64 r
= tcg_temp_new_i64();
211 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
215 static void store_reg(int reg
, TCGv_i64 v
)
217 tcg_gen_mov_i64(regs
[reg
], v
);
220 static void store_freg(int reg
, TCGv_i64 v
)
222 tcg_gen_mov_i64(fregs
[reg
], v
);
225 static void store_reg32_i64(int reg
, TCGv_i64 v
)
227 /* 32 bit register writes keep the upper half */
228 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
231 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
233 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
236 static void store_freg32_i64(int reg
, TCGv_i64 v
)
238 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
241 static void return_low128(TCGv_i64 dest
)
243 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
246 static void update_psw_addr(DisasContext
*s
)
249 tcg_gen_movi_i64(psw_addr
, s
->pc
);
252 static void update_cc_op(DisasContext
*s
)
254 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
255 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
259 static void potential_page_fault(DisasContext
*s
)
265 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
267 return (uint64_t)cpu_lduw_code(env
, pc
);
270 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
272 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
275 static int get_mem_index(DisasContext
*s
)
277 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
278 case PSW_ASC_PRIMARY
>> 32:
280 case PSW_ASC_SECONDARY
>> 32:
282 case PSW_ASC_HOME
>> 32:
290 static void gen_exception(int excp
)
292 TCGv_i32 tmp
= tcg_const_i32(excp
);
293 gen_helper_exception(cpu_env
, tmp
);
294 tcg_temp_free_i32(tmp
);
297 static void gen_program_exception(DisasContext
*s
, int code
)
301 /* Remember what pgm exeption this was. */
302 tmp
= tcg_const_i32(code
);
303 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
304 tcg_temp_free_i32(tmp
);
306 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
307 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
308 tcg_temp_free_i32(tmp
);
310 /* Advance past instruction. */
317 /* Trigger exception. */
318 gen_exception(EXCP_PGM
);
321 static inline void gen_illegal_opcode(DisasContext
*s
)
323 gen_program_exception(s
, PGM_OPERATION
);
326 static inline void gen_trap(DisasContext
*s
)
330 /* Set DXC to 0xff. */
331 t
= tcg_temp_new_i32();
332 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
333 tcg_gen_ori_i32(t
, t
, 0xff00);
334 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
335 tcg_temp_free_i32(t
);
337 gen_program_exception(s
, PGM_DATA
);
340 #ifndef CONFIG_USER_ONLY
341 static void check_privileged(DisasContext
*s
)
343 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
344 gen_program_exception(s
, PGM_PRIVILEGED
);
349 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
351 TCGv_i64 tmp
= tcg_temp_new_i64();
352 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
354 /* Note that d2 is limited to 20 bits, signed. If we crop negative
355 displacements early we create larger immedate addends. */
357 /* Note that addi optimizes the imm==0 case. */
359 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
360 tcg_gen_addi_i64(tmp
, tmp
, d2
);
362 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
364 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
370 tcg_gen_movi_i64(tmp
, d2
);
373 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
379 static inline bool live_cc_data(DisasContext
*s
)
381 return (s
->cc_op
!= CC_OP_DYNAMIC
382 && s
->cc_op
!= CC_OP_STATIC
386 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
388 if (live_cc_data(s
)) {
389 tcg_gen_discard_i64(cc_src
);
390 tcg_gen_discard_i64(cc_dst
);
391 tcg_gen_discard_i64(cc_vr
);
393 s
->cc_op
= CC_OP_CONST0
+ val
;
396 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
398 if (live_cc_data(s
)) {
399 tcg_gen_discard_i64(cc_src
);
400 tcg_gen_discard_i64(cc_vr
);
402 tcg_gen_mov_i64(cc_dst
, dst
);
406 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
409 if (live_cc_data(s
)) {
410 tcg_gen_discard_i64(cc_vr
);
412 tcg_gen_mov_i64(cc_src
, src
);
413 tcg_gen_mov_i64(cc_dst
, dst
);
417 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
418 TCGv_i64 dst
, TCGv_i64 vr
)
420 tcg_gen_mov_i64(cc_src
, src
);
421 tcg_gen_mov_i64(cc_dst
, dst
);
422 tcg_gen_mov_i64(cc_vr
, vr
);
426 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
428 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
431 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
433 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
436 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
438 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
441 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
443 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
446 /* CC value is in env->cc_op */
447 static void set_cc_static(DisasContext
*s
)
449 if (live_cc_data(s
)) {
450 tcg_gen_discard_i64(cc_src
);
451 tcg_gen_discard_i64(cc_dst
);
452 tcg_gen_discard_i64(cc_vr
);
454 s
->cc_op
= CC_OP_STATIC
;
457 /* calculates cc into cc_op */
458 static void gen_op_calc_cc(DisasContext
*s
)
460 TCGv_i32 local_cc_op
;
463 TCGV_UNUSED_I32(local_cc_op
);
464 TCGV_UNUSED_I64(dummy
);
467 dummy
= tcg_const_i64(0);
481 local_cc_op
= tcg_const_i32(s
->cc_op
);
497 /* s->cc_op is the cc value */
498 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
501 /* env->cc_op already is the cc value */
516 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
521 case CC_OP_LTUGTU_32
:
522 case CC_OP_LTUGTU_64
:
529 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
544 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
547 /* unknown operation - assume 3 arguments and cc_op in env */
548 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
554 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
555 tcg_temp_free_i32(local_cc_op
);
557 if (!TCGV_IS_UNUSED_I64(dummy
)) {
558 tcg_temp_free_i64(dummy
);
561 /* We now have cc in cc_op as constant */
565 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
567 /* NOTE: we handle the case where the TB spans two pages here */
568 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
569 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
570 && !s
->singlestep_enabled
571 && !(s
->tb
->cflags
& CF_LAST_IO
));
574 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_miss
[cc_op
]++;
581 static void account_inline_branch(DisasContext
*s
, int cc_op
)
583 #ifdef DEBUG_INLINE_BRANCHES
584 inline_branch_hit
[cc_op
]++;
588 /* Table of mask values to comparison codes, given a comparison as input.
589 For such, CC=3 should not be possible. */
590 static const TCGCond ltgt_cond
[16] = {
591 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
592 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
593 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
594 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
595 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
596 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
597 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
598 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
601 /* Table of mask values to comparison codes, given a logic op as input.
602 For such, only CC=0 and CC=1 should be possible. */
603 static const TCGCond nz_cond
[16] = {
604 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
605 TCG_COND_NEVER
, TCG_COND_NEVER
,
606 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
607 TCG_COND_NE
, TCG_COND_NE
,
608 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
609 TCG_COND_EQ
, TCG_COND_EQ
,
610 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
611 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
614 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
615 details required to generate a TCG comparison. */
616 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
619 enum cc_op old_cc_op
= s
->cc_op
;
621 if (mask
== 15 || mask
== 0) {
622 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
625 c
->g1
= c
->g2
= true;
630 /* Find the TCG condition for the mask + cc op. */
636 cond
= ltgt_cond
[mask
];
637 if (cond
== TCG_COND_NEVER
) {
640 account_inline_branch(s
, old_cc_op
);
643 case CC_OP_LTUGTU_32
:
644 case CC_OP_LTUGTU_64
:
645 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
646 if (cond
== TCG_COND_NEVER
) {
649 account_inline_branch(s
, old_cc_op
);
653 cond
= nz_cond
[mask
];
654 if (cond
== TCG_COND_NEVER
) {
657 account_inline_branch(s
, old_cc_op
);
672 account_inline_branch(s
, old_cc_op
);
687 account_inline_branch(s
, old_cc_op
);
691 switch (mask
& 0xa) {
692 case 8: /* src == 0 -> no one bit found */
695 case 2: /* src != 0 -> one bit found */
701 account_inline_branch(s
, old_cc_op
);
707 case 8 | 2: /* vr == 0 */
710 case 4 | 1: /* vr != 0 */
713 case 8 | 4: /* no carry -> vr >= src */
716 case 2 | 1: /* carry -> vr < src */
722 account_inline_branch(s
, old_cc_op
);
727 /* Note that CC=0 is impossible; treat it as dont-care. */
729 case 2: /* zero -> op1 == op2 */
732 case 4 | 1: /* !zero -> op1 != op2 */
735 case 4: /* borrow (!carry) -> op1 < op2 */
738 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
744 account_inline_branch(s
, old_cc_op
);
749 /* Calculate cc value. */
754 /* Jump based on CC. We'll load up the real cond below;
755 the assignment here merely avoids a compiler warning. */
756 account_noninline_branch(s
, old_cc_op
);
757 old_cc_op
= CC_OP_STATIC
;
758 cond
= TCG_COND_NEVER
;
762 /* Load up the arguments of the comparison. */
764 c
->g1
= c
->g2
= false;
768 c
->u
.s32
.a
= tcg_temp_new_i32();
769 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
770 c
->u
.s32
.b
= tcg_const_i32(0);
773 case CC_OP_LTUGTU_32
:
776 c
->u
.s32
.a
= tcg_temp_new_i32();
777 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
778 c
->u
.s32
.b
= tcg_temp_new_i32();
779 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
786 c
->u
.s64
.b
= tcg_const_i64(0);
790 case CC_OP_LTUGTU_64
:
794 c
->g1
= c
->g2
= true;
800 c
->u
.s64
.a
= tcg_temp_new_i64();
801 c
->u
.s64
.b
= tcg_const_i64(0);
802 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
807 c
->u
.s32
.a
= tcg_temp_new_i32();
808 c
->u
.s32
.b
= tcg_temp_new_i32();
809 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_vr
);
810 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
811 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
813 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_src
);
820 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
821 c
->u
.s64
.b
= tcg_const_i64(0);
833 case 0x8 | 0x4 | 0x2: /* cc != 3 */
835 c
->u
.s32
.b
= tcg_const_i32(3);
837 case 0x8 | 0x4 | 0x1: /* cc != 2 */
839 c
->u
.s32
.b
= tcg_const_i32(2);
841 case 0x8 | 0x2 | 0x1: /* cc != 1 */
843 c
->u
.s32
.b
= tcg_const_i32(1);
845 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
848 c
->u
.s32
.a
= tcg_temp_new_i32();
849 c
->u
.s32
.b
= tcg_const_i32(0);
850 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
852 case 0x8 | 0x4: /* cc < 2 */
854 c
->u
.s32
.b
= tcg_const_i32(2);
856 case 0x8: /* cc == 0 */
858 c
->u
.s32
.b
= tcg_const_i32(0);
860 case 0x4 | 0x2 | 0x1: /* cc != 0 */
862 c
->u
.s32
.b
= tcg_const_i32(0);
864 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
867 c
->u
.s32
.a
= tcg_temp_new_i32();
868 c
->u
.s32
.b
= tcg_const_i32(0);
869 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
871 case 0x4: /* cc == 1 */
873 c
->u
.s32
.b
= tcg_const_i32(1);
875 case 0x2 | 0x1: /* cc > 1 */
877 c
->u
.s32
.b
= tcg_const_i32(1);
879 case 0x2: /* cc == 2 */
881 c
->u
.s32
.b
= tcg_const_i32(2);
883 case 0x1: /* cc == 3 */
885 c
->u
.s32
.b
= tcg_const_i32(3);
888 /* CC is masked by something else: (8 >> cc) & mask. */
891 c
->u
.s32
.a
= tcg_const_i32(8);
892 c
->u
.s32
.b
= tcg_const_i32(0);
893 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
894 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
905 static void free_compare(DisasCompare
*c
)
909 tcg_temp_free_i64(c
->u
.s64
.a
);
911 tcg_temp_free_i32(c
->u
.s32
.a
);
916 tcg_temp_free_i64(c
->u
.s64
.b
);
918 tcg_temp_free_i32(c
->u
.s32
.b
);
923 /* ====================================================================== */
924 /* Define the insn format enumeration. */
925 #define F0(N) FMT_##N,
926 #define F1(N, X1) F0(N)
927 #define F2(N, X1, X2) F0(N)
928 #define F3(N, X1, X2, X3) F0(N)
929 #define F4(N, X1, X2, X3, X4) F0(N)
930 #define F5(N, X1, X2, X3, X4, X5) F0(N)
933 #include "insn-format.def"
943 /* Define a structure to hold the decoded fields. We'll store each inside
944 an array indexed by an enum. In order to conserve memory, we'll arrange
945 for fields that do not exist at the same time to overlap, thus the "C"
946 for compact. For checking purposes there is an "O" for original index
947 as well that will be applied to availability bitmaps. */
949 enum DisasFieldIndexO
{
972 enum DisasFieldIndexC
{
1003 struct DisasFields
{
1006 unsigned presentC
:16;
1007 unsigned int presentO
;
1011 /* This is the way fields are to be accessed out of DisasFields. */
1012 #define have_field(S, F) have_field1((S), FLD_O_##F)
1013 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1015 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1017 return (f
->presentO
>> c
) & 1;
1020 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1021 enum DisasFieldIndexC c
)
1023 assert(have_field1(f
, o
));
1027 /* Describe the layout of each field in each format. */
1028 typedef struct DisasField
{
1030 unsigned int size
:8;
1031 unsigned int type
:2;
1032 unsigned int indexC
:6;
1033 enum DisasFieldIndexO indexO
:8;
1036 typedef struct DisasFormatInfo
{
1037 DisasField op
[NUM_C_FIELD
];
1040 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1041 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1042 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1043 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1044 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1045 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1046 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1047 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1048 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1049 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1050 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1051 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1052 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1053 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1055 #define F0(N) { { } },
1056 #define F1(N, X1) { { X1 } },
1057 #define F2(N, X1, X2) { { X1, X2 } },
1058 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1059 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1060 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1062 static const DisasFormatInfo format_info
[] = {
1063 #include "insn-format.def"
1081 /* Generally, we'll extract operands into this structures, operate upon
1082 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1083 of routines below for more details. */
1085 bool g_out
, g_out2
, g_in1
, g_in2
;
1086 TCGv_i64 out
, out2
, in1
, in2
;
1090 /* Instructions can place constraints on their operands, raising specification
1091 exceptions if they are violated. To make this easy to automate, each "in1",
1092 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1093 of the following, or 0. To make this easy to document, we'll put the
1094 SPEC_<name> defines next to <name>. */
1096 #define SPEC_r1_even 1
1097 #define SPEC_r2_even 2
1098 #define SPEC_r3_even 4
1099 #define SPEC_r1_f128 8
1100 #define SPEC_r2_f128 16
1102 /* Return values from translate_one, indicating the state of the TB. */
1104 /* Continue the TB. */
1106 /* We have emitted one or more goto_tb. No fixup required. */
1108 /* We are not using a goto_tb (for whatever reason), but have updated
1109 the PC (for whatever reason), so there's no need to do it again on
1112 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1113 updated the PC for the next instruction to be executed. */
1115 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1116 No following code will be executed. */
1120 typedef enum DisasFacility
{
1121 FAC_Z
, /* zarch (default) */
1122 FAC_CASS
, /* compare and swap and store */
1123 FAC_CASS2
, /* compare and swap and store 2*/
1124 FAC_DFP
, /* decimal floating point */
1125 FAC_DFPR
, /* decimal floating point rounding */
1126 FAC_DO
, /* distinct operands */
1127 FAC_EE
, /* execute extensions */
1128 FAC_EI
, /* extended immediate */
1129 FAC_FPE
, /* floating point extension */
1130 FAC_FPSSH
, /* floating point support sign handling */
1131 FAC_FPRGR
, /* FPR-GR transfer */
1132 FAC_GIE
, /* general instructions extension */
1133 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1134 FAC_HW
, /* high-word */
1135 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1136 FAC_MIE
, /* miscellaneous-instruction-extensions */
1137 FAC_LAT
, /* load-and-trap */
1138 FAC_LOC
, /* load/store on condition */
1139 FAC_LD
, /* long displacement */
1140 FAC_PC
, /* population count */
1141 FAC_SCF
, /* store clock fast */
1142 FAC_SFLE
, /* store facility list extended */
1143 FAC_ILA
, /* interlocked access facility 1 */
1149 DisasFacility fac
:8;
1154 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1155 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1156 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1157 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1158 void (*help_cout
)(DisasContext
*, DisasOps
*);
1159 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1164 /* ====================================================================== */
1165 /* Miscellaneous helpers, used by several operations. */
1167 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1168 DisasOps
*o
, int mask
)
1170 int b2
= get_field(f
, b2
);
1171 int d2
= get_field(f
, d2
);
1174 o
->in2
= tcg_const_i64(d2
& mask
);
1176 o
->in2
= get_address(s
, 0, b2
, d2
);
1177 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1181 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1183 if (dest
== s
->next_pc
) {
1186 if (use_goto_tb(s
, dest
)) {
1189 tcg_gen_movi_i64(psw_addr
, dest
);
1190 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1191 return EXIT_GOTO_TB
;
1193 tcg_gen_movi_i64(psw_addr
, dest
);
1194 return EXIT_PC_UPDATED
;
1198 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1199 bool is_imm
, int imm
, TCGv_i64 cdest
)
1202 uint64_t dest
= s
->pc
+ 2 * imm
;
1205 /* Take care of the special cases first. */
1206 if (c
->cond
== TCG_COND_NEVER
) {
1211 if (dest
== s
->next_pc
) {
1212 /* Branch to next. */
1216 if (c
->cond
== TCG_COND_ALWAYS
) {
1217 ret
= help_goto_direct(s
, dest
);
1221 if (TCGV_IS_UNUSED_I64(cdest
)) {
1222 /* E.g. bcr %r0 -> no branch. */
1226 if (c
->cond
== TCG_COND_ALWAYS
) {
1227 tcg_gen_mov_i64(psw_addr
, cdest
);
1228 ret
= EXIT_PC_UPDATED
;
1233 if (use_goto_tb(s
, s
->next_pc
)) {
1234 if (is_imm
&& use_goto_tb(s
, dest
)) {
1235 /* Both exits can use goto_tb. */
1238 lab
= gen_new_label();
1240 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1242 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1245 /* Branch not taken. */
1247 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1248 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1253 tcg_gen_movi_i64(psw_addr
, dest
);
1254 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1258 /* Fallthru can use goto_tb, but taken branch cannot. */
1259 /* Store taken branch destination before the brcond. This
1260 avoids having to allocate a new local temp to hold it.
1261 We'll overwrite this in the not taken case anyway. */
1263 tcg_gen_mov_i64(psw_addr
, cdest
);
1266 lab
= gen_new_label();
1268 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1270 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1273 /* Branch not taken. */
1276 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1277 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1281 tcg_gen_movi_i64(psw_addr
, dest
);
1283 ret
= EXIT_PC_UPDATED
;
1286 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1287 Most commonly we're single-stepping or some other condition that
1288 disables all use of goto_tb. Just update the PC and exit. */
1290 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1292 cdest
= tcg_const_i64(dest
);
1296 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1299 TCGv_i32 t0
= tcg_temp_new_i32();
1300 TCGv_i64 t1
= tcg_temp_new_i64();
1301 TCGv_i64 z
= tcg_const_i64(0);
1302 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1303 tcg_gen_extu_i32_i64(t1
, t0
);
1304 tcg_temp_free_i32(t0
);
1305 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1306 tcg_temp_free_i64(t1
);
1307 tcg_temp_free_i64(z
);
1311 tcg_temp_free_i64(cdest
);
1313 tcg_temp_free_i64(next
);
1315 ret
= EXIT_PC_UPDATED
;
1323 /* ====================================================================== */
1324 /* The operations. These perform the bulk of the work for any insn,
1325 usually after the operands have been loaded and output initialized. */
1327 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1330 z
= tcg_const_i64(0);
1331 n
= tcg_temp_new_i64();
1332 tcg_gen_neg_i64(n
, o
->in2
);
1333 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1334 tcg_temp_free_i64(n
);
1335 tcg_temp_free_i64(z
);
1339 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1341 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1345 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1347 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1351 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1353 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1354 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1358 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1360 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1364 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1369 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1371 /* The carry flag is the msb of CC, therefore the branch mask that would
1372 create that comparison is 3. Feeding the generated comparison to
1373 setcond produces the carry flag that we desire. */
1374 disas_jcc(s
, &cmp
, 3);
1375 carry
= tcg_temp_new_i64();
1377 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1379 TCGv_i32 t
= tcg_temp_new_i32();
1380 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1381 tcg_gen_extu_i32_i64(carry
, t
);
1382 tcg_temp_free_i32(t
);
1386 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1387 tcg_temp_free_i64(carry
);
1391 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1393 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1397 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1399 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1403 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1405 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1406 return_low128(o
->out2
);
1410 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1412 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1416 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1418 int shift
= s
->insn
->data
& 0xff;
1419 int size
= s
->insn
->data
>> 8;
1420 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1423 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1424 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1425 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1427 /* Produce the CC from only the bits manipulated. */
1428 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1429 set_cc_nz_u64(s
, cc_dst
);
1433 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1435 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1436 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1437 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1438 return EXIT_PC_UPDATED
;
1444 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1446 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1447 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1450 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1452 int m1
= get_field(s
->fields
, m1
);
1453 bool is_imm
= have_field(s
->fields
, i2
);
1454 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1457 disas_jcc(s
, &c
, m1
);
1458 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1461 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1463 int r1
= get_field(s
->fields
, r1
);
1464 bool is_imm
= have_field(s
->fields
, i2
);
1465 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1469 c
.cond
= TCG_COND_NE
;
1474 t
= tcg_temp_new_i64();
1475 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1476 store_reg32_i64(r1
, t
);
1477 c
.u
.s32
.a
= tcg_temp_new_i32();
1478 c
.u
.s32
.b
= tcg_const_i32(0);
1479 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1480 tcg_temp_free_i64(t
);
1482 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1485 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1487 int r1
= get_field(s
->fields
, r1
);
1488 bool is_imm
= have_field(s
->fields
, i2
);
1489 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1492 c
.cond
= TCG_COND_NE
;
1497 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1498 c
.u
.s64
.a
= regs
[r1
];
1499 c
.u
.s64
.b
= tcg_const_i64(0);
1501 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1504 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1506 int r1
= get_field(s
->fields
, r1
);
1507 int r3
= get_field(s
->fields
, r3
);
1508 bool is_imm
= have_field(s
->fields
, i2
);
1509 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1513 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1518 t
= tcg_temp_new_i64();
1519 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1520 c
.u
.s32
.a
= tcg_temp_new_i32();
1521 c
.u
.s32
.b
= tcg_temp_new_i32();
1522 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1523 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1524 store_reg32_i64(r1
, t
);
1525 tcg_temp_free_i64(t
);
1527 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1530 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1532 int r1
= get_field(s
->fields
, r1
);
1533 int r3
= get_field(s
->fields
, r3
);
1534 bool is_imm
= have_field(s
->fields
, i2
);
1535 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1538 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1541 if (r1
== (r3
| 1)) {
1542 c
.u
.s64
.b
= load_reg(r3
| 1);
1545 c
.u
.s64
.b
= regs
[r3
| 1];
1549 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1550 c
.u
.s64
.a
= regs
[r1
];
1553 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1556 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1558 int imm
, m3
= get_field(s
->fields
, m3
);
1562 c
.cond
= ltgt_cond
[m3
];
1563 if (s
->insn
->data
) {
1564 c
.cond
= tcg_unsigned_cond(c
.cond
);
1566 c
.is_64
= c
.g1
= c
.g2
= true;
1570 is_imm
= have_field(s
->fields
, i4
);
1572 imm
= get_field(s
->fields
, i4
);
1575 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1576 get_field(s
->fields
, d4
));
1579 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1582 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1584 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1589 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1591 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1596 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1598 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1603 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1605 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1606 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1607 tcg_temp_free_i32(m3
);
1608 gen_set_cc_nz_f32(s
, o
->in2
);
1612 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1614 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1615 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1616 tcg_temp_free_i32(m3
);
1617 gen_set_cc_nz_f64(s
, o
->in2
);
1621 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1623 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1624 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1625 tcg_temp_free_i32(m3
);
1626 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1630 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1632 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1633 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1634 tcg_temp_free_i32(m3
);
1635 gen_set_cc_nz_f32(s
, o
->in2
);
1639 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1641 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1642 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1643 tcg_temp_free_i32(m3
);
1644 gen_set_cc_nz_f64(s
, o
->in2
);
1648 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1650 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1651 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1652 tcg_temp_free_i32(m3
);
1653 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1657 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1659 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1660 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1661 tcg_temp_free_i32(m3
);
1662 gen_set_cc_nz_f32(s
, o
->in2
);
1666 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1668 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1669 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1670 tcg_temp_free_i32(m3
);
1671 gen_set_cc_nz_f64(s
, o
->in2
);
1675 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1677 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1678 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1679 tcg_temp_free_i32(m3
);
1680 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1684 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1686 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1687 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1688 tcg_temp_free_i32(m3
);
1689 gen_set_cc_nz_f32(s
, o
->in2
);
1693 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1695 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1696 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1697 tcg_temp_free_i32(m3
);
1698 gen_set_cc_nz_f64(s
, o
->in2
);
1702 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1704 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1705 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1706 tcg_temp_free_i32(m3
);
1707 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1711 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1713 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1714 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1715 tcg_temp_free_i32(m3
);
1719 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1721 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1722 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1723 tcg_temp_free_i32(m3
);
1727 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1729 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1730 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1731 tcg_temp_free_i32(m3
);
1732 return_low128(o
->out2
);
1736 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1738 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1739 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1740 tcg_temp_free_i32(m3
);
1744 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1746 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1747 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1748 tcg_temp_free_i32(m3
);
1752 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1754 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1755 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1756 tcg_temp_free_i32(m3
);
1757 return_low128(o
->out2
);
1761 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1763 int r2
= get_field(s
->fields
, r2
);
1764 TCGv_i64 len
= tcg_temp_new_i64();
1766 potential_page_fault(s
);
1767 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1769 return_low128(o
->out
);
1771 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1772 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1773 tcg_temp_free_i64(len
);
1778 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1780 int l
= get_field(s
->fields
, l1
);
1785 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1786 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1789 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1790 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1793 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1794 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1797 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1798 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1801 potential_page_fault(s
);
1802 vl
= tcg_const_i32(l
);
1803 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1804 tcg_temp_free_i32(vl
);
1808 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1812 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1814 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1815 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1816 potential_page_fault(s
);
1817 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1818 tcg_temp_free_i32(r1
);
1819 tcg_temp_free_i32(r3
);
1824 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1826 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1827 TCGv_i32 t1
= tcg_temp_new_i32();
1828 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1829 potential_page_fault(s
);
1830 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1832 tcg_temp_free_i32(t1
);
1833 tcg_temp_free_i32(m3
);
1837 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1839 potential_page_fault(s
);
1840 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1842 return_low128(o
->in2
);
1846 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1848 TCGv_i64 t
= tcg_temp_new_i64();
1849 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1850 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1851 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1852 tcg_temp_free_i64(t
);
1856 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1858 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1859 int d2
= get_field(s
->fields
, d2
);
1860 int b2
= get_field(s
->fields
, b2
);
1861 int is_64
= s
->insn
->data
;
1862 TCGv_i64 addr
, mem
, cc
, z
;
1864 /* Note that in1 = R3 (new value) and
1865 in2 = (zero-extended) R1 (expected value). */
1867 /* Load the memory into the (temporary) output. While the PoO only talks
1868 about moving the memory to R1 on inequality, if we include equality it
1869 means that R1 is equal to the memory in all conditions. */
1870 addr
= get_address(s
, 0, b2
, d2
);
1872 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1874 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1877 /* Are the memory and expected values (un)equal? Note that this setcond
1878 produces the output CC value, thus the NE sense of the test. */
1879 cc
= tcg_temp_new_i64();
1880 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1882 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1883 Recall that we are allowed to unconditionally issue the store (and
1884 thus any possible write trap), so (re-)store the original contents
1885 of MEM in case of inequality. */
1886 z
= tcg_const_i64(0);
1887 mem
= tcg_temp_new_i64();
1888 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1890 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1892 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1894 tcg_temp_free_i64(z
);
1895 tcg_temp_free_i64(mem
);
1896 tcg_temp_free_i64(addr
);
1898 /* Store CC back to cc_op. Wait until after the store so that any
1899 exception gets the old cc_op value. */
1900 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1901 tcg_temp_free_i64(cc
);
1906 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1908 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1909 int r1
= get_field(s
->fields
, r1
);
1910 int r3
= get_field(s
->fields
, r3
);
1911 int d2
= get_field(s
->fields
, d2
);
1912 int b2
= get_field(s
->fields
, b2
);
1913 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
1915 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1917 addrh
= get_address(s
, 0, b2
, d2
);
1918 addrl
= get_address(s
, 0, b2
, d2
+ 8);
1919 outh
= tcg_temp_new_i64();
1920 outl
= tcg_temp_new_i64();
1922 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
1923 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
1925 /* Fold the double-word compare with arithmetic. */
1926 cc
= tcg_temp_new_i64();
1927 z
= tcg_temp_new_i64();
1928 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
1929 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
1930 tcg_gen_or_i64(cc
, cc
, z
);
1931 tcg_gen_movi_i64(z
, 0);
1932 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
1934 memh
= tcg_temp_new_i64();
1935 meml
= tcg_temp_new_i64();
1936 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
1937 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
1938 tcg_temp_free_i64(z
);
1940 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
1941 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
1942 tcg_temp_free_i64(memh
);
1943 tcg_temp_free_i64(meml
);
1944 tcg_temp_free_i64(addrh
);
1945 tcg_temp_free_i64(addrl
);
1947 /* Save back state now that we've passed all exceptions. */
1948 tcg_gen_mov_i64(regs
[r1
], outh
);
1949 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
1950 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1951 tcg_temp_free_i64(outh
);
1952 tcg_temp_free_i64(outl
);
1953 tcg_temp_free_i64(cc
);
1958 #ifndef CONFIG_USER_ONLY
1959 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1961 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1962 check_privileged(s
);
1963 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1964 tcg_temp_free_i32(r1
);
1970 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1972 TCGv_i64 t1
= tcg_temp_new_i64();
1973 TCGv_i32 t2
= tcg_temp_new_i32();
1974 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1975 gen_helper_cvd(t1
, t2
);
1976 tcg_temp_free_i32(t2
);
1977 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1978 tcg_temp_free_i64(t1
);
1982 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
1984 int m3
= get_field(s
->fields
, m3
);
1985 TCGLabel
*lab
= gen_new_label();
1988 c
= tcg_invert_cond(ltgt_cond
[m3
]);
1989 if (s
->insn
->data
) {
1990 c
= tcg_unsigned_cond(c
);
1992 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2001 #ifndef CONFIG_USER_ONLY
2002 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2006 check_privileged(s
);
2007 potential_page_fault(s
);
2009 /* We pretend the format is RX_a so that D2 is the field we want. */
2010 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2011 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2012 tcg_temp_free_i32(tmp
);
2017 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2019 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2020 return_low128(o
->out
);
2024 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2026 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2027 return_low128(o
->out
);
2031 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2033 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2034 return_low128(o
->out
);
2038 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2040 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2041 return_low128(o
->out
);
2045 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2047 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2051 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2053 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2057 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2059 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2060 return_low128(o
->out2
);
2064 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2066 int r2
= get_field(s
->fields
, r2
);
2067 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2071 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2073 /* No cache information provided. */
2074 tcg_gen_movi_i64(o
->out
, -1);
2078 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2080 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2084 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2086 int r1
= get_field(s
->fields
, r1
);
2087 int r2
= get_field(s
->fields
, r2
);
2088 TCGv_i64 t
= tcg_temp_new_i64();
2090 /* Note the "subsequently" in the PoO, which implies a defined result
2091 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2092 tcg_gen_shri_i64(t
, psw_mask
, 32);
2093 store_reg32_i64(r1
, t
);
2095 store_reg32_i64(r2
, psw_mask
);
2098 tcg_temp_free_i64(t
);
2102 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2104 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2105 tb->flags, (ab)use the tb->cs_base field as the address of
2106 the template in memory, and grab 8 bits of tb->flags/cflags for
2107 the contents of the register. We would then recognize all this
2108 in gen_intermediate_code_internal, generating code for exactly
2109 one instruction. This new TB then gets executed normally.
2111 On the other hand, this seems to be mostly used for modifying
2112 MVC inside of memcpy, which needs a helper call anyway. So
2113 perhaps this doesn't bear thinking about any further. */
2120 tmp
= tcg_const_i64(s
->next_pc
);
2121 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2122 tcg_temp_free_i64(tmp
);
2127 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2129 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2130 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2131 tcg_temp_free_i32(m3
);
2135 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2137 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2138 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2139 tcg_temp_free_i32(m3
);
2143 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2145 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2146 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2147 return_low128(o
->out2
);
2148 tcg_temp_free_i32(m3
);
2152 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2154 /* We'll use the original input for cc computation, since we get to
2155 compare that against 0, which ought to be better than comparing
2156 the real output against 64. It also lets cc_dst be a convenient
2157 temporary during our computation. */
2158 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2160 /* R1 = IN ? CLZ(IN) : 64. */
2161 gen_helper_clz(o
->out
, o
->in2
);
2163 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2164 value by 64, which is undefined. But since the shift is 64 iff the
2165 input is zero, we still get the correct result after and'ing. */
2166 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2167 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2168 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2172 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2174 int m3
= get_field(s
->fields
, m3
);
2175 int pos
, len
, base
= s
->insn
->data
;
2176 TCGv_i64 tmp
= tcg_temp_new_i64();
2181 /* Effectively a 32-bit load. */
2182 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2189 /* Effectively a 16-bit load. */
2190 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2198 /* Effectively an 8-bit load. */
2199 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2204 pos
= base
+ ctz32(m3
) * 8;
2205 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2206 ccm
= ((1ull << len
) - 1) << pos
;
2210 /* This is going to be a sequence of loads and inserts. */
2211 pos
= base
+ 32 - 8;
2215 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2216 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2217 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2220 m3
= (m3
<< 1) & 0xf;
2226 tcg_gen_movi_i64(tmp
, ccm
);
2227 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2228 tcg_temp_free_i64(tmp
);
2232 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2234 int shift
= s
->insn
->data
& 0xff;
2235 int size
= s
->insn
->data
>> 8;
2236 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2240 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2245 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2247 t1
= tcg_temp_new_i64();
2248 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2249 tcg_gen_shri_i64(t1
, t1
, 36);
2250 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2252 tcg_gen_extu_i32_i64(t1
, cc_op
);
2253 tcg_gen_shli_i64(t1
, t1
, 28);
2254 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2255 tcg_temp_free_i64(t1
);
2259 #ifndef CONFIG_USER_ONLY
2260 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2262 check_privileged(s
);
2263 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2267 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2269 check_privileged(s
);
2270 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2275 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2277 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2281 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2283 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2287 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2289 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2293 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2295 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2299 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2301 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2302 return_low128(o
->out2
);
2306 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2308 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2309 return_low128(o
->out2
);
2313 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2315 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2319 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2321 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2325 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2327 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2331 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2333 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2337 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2339 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2343 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2345 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2349 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2351 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2355 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2357 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2361 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2363 TCGLabel
*lab
= gen_new_label();
2364 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2365 /* The value is stored even in case of trap. */
2366 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2372 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2374 TCGLabel
*lab
= gen_new_label();
2375 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2376 /* The value is stored even in case of trap. */
2377 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2383 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2385 TCGLabel
*lab
= gen_new_label();
2386 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2387 /* The value is stored even in case of trap. */
2388 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2394 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2396 TCGLabel
*lab
= gen_new_label();
2397 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2398 /* The value is stored even in case of trap. */
2399 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2405 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2407 TCGLabel
*lab
= gen_new_label();
2408 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2409 /* The value is stored even in case of trap. */
2410 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2416 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2420 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2423 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2427 TCGv_i32 t32
= tcg_temp_new_i32();
2430 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2433 t
= tcg_temp_new_i64();
2434 tcg_gen_extu_i32_i64(t
, t32
);
2435 tcg_temp_free_i32(t32
);
2437 z
= tcg_const_i64(0);
2438 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2439 tcg_temp_free_i64(t
);
2440 tcg_temp_free_i64(z
);
2446 #ifndef CONFIG_USER_ONLY
2447 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2449 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2450 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2451 check_privileged(s
);
2452 potential_page_fault(s
);
2453 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2454 tcg_temp_free_i32(r1
);
2455 tcg_temp_free_i32(r3
);
2459 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2461 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2462 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2463 check_privileged(s
);
2464 potential_page_fault(s
);
2465 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2466 tcg_temp_free_i32(r1
);
2467 tcg_temp_free_i32(r3
);
2470 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2472 check_privileged(s
);
2473 potential_page_fault(s
);
2474 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2479 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2483 check_privileged(s
);
2485 t1
= tcg_temp_new_i64();
2486 t2
= tcg_temp_new_i64();
2487 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2488 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2489 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2490 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2491 tcg_gen_shli_i64(t1
, t1
, 32);
2492 gen_helper_load_psw(cpu_env
, t1
, t2
);
2493 tcg_temp_free_i64(t1
);
2494 tcg_temp_free_i64(t2
);
2495 return EXIT_NORETURN
;
2498 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2502 check_privileged(s
);
2504 t1
= tcg_temp_new_i64();
2505 t2
= tcg_temp_new_i64();
2506 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2507 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2508 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2509 gen_helper_load_psw(cpu_env
, t1
, t2
);
2510 tcg_temp_free_i64(t1
);
2511 tcg_temp_free_i64(t2
);
2512 return EXIT_NORETURN
;
2516 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2518 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2519 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2520 potential_page_fault(s
);
2521 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2522 tcg_temp_free_i32(r1
);
2523 tcg_temp_free_i32(r3
);
2527 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2529 int r1
= get_field(s
->fields
, r1
);
2530 int r3
= get_field(s
->fields
, r3
);
2533 /* Only one register to read. */
2534 t1
= tcg_temp_new_i64();
2535 if (unlikely(r1
== r3
)) {
2536 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2537 store_reg32_i64(r1
, t1
);
2542 /* First load the values of the first and last registers to trigger
2543 possible page faults. */
2544 t2
= tcg_temp_new_i64();
2545 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2546 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2547 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2548 store_reg32_i64(r1
, t1
);
2549 store_reg32_i64(r3
, t2
);
2551 /* Only two registers to read. */
2552 if (((r1
+ 1) & 15) == r3
) {
2558 /* Then load the remaining registers. Page fault can't occur. */
2560 tcg_gen_movi_i64(t2
, 4);
2563 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2564 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2565 store_reg32_i64(r1
, t1
);
2573 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2575 int r1
= get_field(s
->fields
, r1
);
2576 int r3
= get_field(s
->fields
, r3
);
2579 /* Only one register to read. */
2580 t1
= tcg_temp_new_i64();
2581 if (unlikely(r1
== r3
)) {
2582 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2583 store_reg32h_i64(r1
, t1
);
2588 /* First load the values of the first and last registers to trigger
2589 possible page faults. */
2590 t2
= tcg_temp_new_i64();
2591 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2592 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2593 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2594 store_reg32h_i64(r1
, t1
);
2595 store_reg32h_i64(r3
, t2
);
2597 /* Only two registers to read. */
2598 if (((r1
+ 1) & 15) == r3
) {
2604 /* Then load the remaining registers. Page fault can't occur. */
2606 tcg_gen_movi_i64(t2
, 4);
2609 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2610 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2611 store_reg32h_i64(r1
, t1
);
2619 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2621 int r1
= get_field(s
->fields
, r1
);
2622 int r3
= get_field(s
->fields
, r3
);
2625 /* Only one register to read. */
2626 if (unlikely(r1
== r3
)) {
2627 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2631 /* First load the values of the first and last registers to trigger
2632 possible page faults. */
2633 t1
= tcg_temp_new_i64();
2634 t2
= tcg_temp_new_i64();
2635 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2636 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2637 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2638 tcg_gen_mov_i64(regs
[r1
], t1
);
2641 /* Only two registers to read. */
2642 if (((r1
+ 1) & 15) == r3
) {
2647 /* Then load the remaining registers. Page fault can't occur. */
2649 tcg_gen_movi_i64(t1
, 8);
2652 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2653 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2660 #ifndef CONFIG_USER_ONLY
2661 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2663 check_privileged(s
);
2664 potential_page_fault(s
);
2665 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2669 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2671 check_privileged(s
);
2672 potential_page_fault(s
);
2673 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2678 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2681 o
->g_out
= o
->g_in2
;
2682 TCGV_UNUSED_I64(o
->in2
);
2687 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2689 int b2
= get_field(s
->fields
, b2
);
2690 TCGv ar1
= tcg_temp_new_i64();
2693 o
->g_out
= o
->g_in2
;
2694 TCGV_UNUSED_I64(o
->in2
);
2697 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2698 case PSW_ASC_PRIMARY
>> 32:
2699 tcg_gen_movi_i64(ar1
, 0);
2701 case PSW_ASC_ACCREG
>> 32:
2702 tcg_gen_movi_i64(ar1
, 1);
2704 case PSW_ASC_SECONDARY
>> 32:
2706 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2708 tcg_gen_movi_i64(ar1
, 0);
2711 case PSW_ASC_HOME
>> 32:
2712 tcg_gen_movi_i64(ar1
, 2);
2716 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2717 tcg_temp_free_i64(ar1
);
2722 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2726 o
->g_out
= o
->g_in1
;
2727 o
->g_out2
= o
->g_in2
;
2728 TCGV_UNUSED_I64(o
->in1
);
2729 TCGV_UNUSED_I64(o
->in2
);
2730 o
->g_in1
= o
->g_in2
= false;
2734 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2736 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2737 potential_page_fault(s
);
2738 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2739 tcg_temp_free_i32(l
);
2743 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2745 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2746 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2747 potential_page_fault(s
);
2748 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2749 tcg_temp_free_i32(r1
);
2750 tcg_temp_free_i32(r2
);
2755 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2757 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2758 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2759 potential_page_fault(s
);
2760 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2761 tcg_temp_free_i32(r1
);
2762 tcg_temp_free_i32(r3
);
2767 #ifndef CONFIG_USER_ONLY
2768 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2770 int r1
= get_field(s
->fields
, l1
);
2771 check_privileged(s
);
2772 potential_page_fault(s
);
2773 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2778 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2780 int r1
= get_field(s
->fields
, l1
);
2781 check_privileged(s
);
2782 potential_page_fault(s
);
2783 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2789 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2791 potential_page_fault(s
);
2792 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2797 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2799 potential_page_fault(s
);
2800 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2802 return_low128(o
->in2
);
2806 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2808 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2812 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2814 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2818 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2820 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2824 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2826 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2830 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2832 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2836 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2838 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2839 return_low128(o
->out2
);
2843 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2845 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2846 return_low128(o
->out2
);
2850 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2852 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2853 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2854 tcg_temp_free_i64(r3
);
2858 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2860 int r3
= get_field(s
->fields
, r3
);
2861 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2865 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2867 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2868 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2869 tcg_temp_free_i64(r3
);
2873 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2875 int r3
= get_field(s
->fields
, r3
);
2876 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2880 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2883 z
= tcg_const_i64(0);
2884 n
= tcg_temp_new_i64();
2885 tcg_gen_neg_i64(n
, o
->in2
);
2886 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
2887 tcg_temp_free_i64(n
);
2888 tcg_temp_free_i64(z
);
2892 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2894 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2898 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2900 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2904 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2906 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2907 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2911 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2913 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2914 potential_page_fault(s
);
2915 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2916 tcg_temp_free_i32(l
);
2921 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2923 tcg_gen_neg_i64(o
->out
, o
->in2
);
2927 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2929 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2933 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2935 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2939 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2941 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2942 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2946 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2948 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2949 potential_page_fault(s
);
2950 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2951 tcg_temp_free_i32(l
);
2956 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2958 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2962 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2964 int shift
= s
->insn
->data
& 0xff;
2965 int size
= s
->insn
->data
>> 8;
2966 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2969 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2970 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2972 /* Produce the CC from only the bits manipulated. */
2973 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2974 set_cc_nz_u64(s
, cc_dst
);
2978 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
2980 gen_helper_popcnt(o
->out
, o
->in2
);
2984 #ifndef CONFIG_USER_ONLY
2985 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2987 check_privileged(s
);
2988 gen_helper_ptlb(cpu_env
);
2993 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
2995 int i3
= get_field(s
->fields
, i3
);
2996 int i4
= get_field(s
->fields
, i4
);
2997 int i5
= get_field(s
->fields
, i5
);
2998 int do_zero
= i4
& 0x80;
2999 uint64_t mask
, imask
, pmask
;
3002 /* Adjust the arguments for the specific insn. */
3003 switch (s
->fields
->op2
) {
3004 case 0x55: /* risbg */
3009 case 0x5d: /* risbhg */
3012 pmask
= 0xffffffff00000000ull
;
3014 case 0x51: /* risblg */
3017 pmask
= 0x00000000ffffffffull
;
3023 /* MASK is the set of bits to be inserted from R2.
3024 Take care for I3/I4 wraparound. */
3027 mask
^= pmask
>> i4
>> 1;
3029 mask
|= ~(pmask
>> i4
>> 1);
3033 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3034 insns, we need to keep the other half of the register. */
3035 imask
= ~mask
| ~pmask
;
3037 if (s
->fields
->op2
== 0x55) {
3044 /* In some cases we can implement this with deposit, which can be more
3045 efficient on some hosts. */
3046 if (~mask
== imask
&& i3
<= i4
) {
3047 if (s
->fields
->op2
== 0x5d) {
3050 /* Note that we rotate the bits to be inserted to the lsb, not to
3051 the position as described in the PoO. */
3054 rot
= (i5
- pos
) & 63;
3060 /* Rotate the input as necessary. */
3061 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3063 /* Insert the selected bits into the output. */
3065 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3066 } else if (imask
== 0) {
3067 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3069 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3070 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3071 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3076 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3078 int i3
= get_field(s
->fields
, i3
);
3079 int i4
= get_field(s
->fields
, i4
);
3080 int i5
= get_field(s
->fields
, i5
);
3083 /* If this is a test-only form, arrange to discard the result. */
3085 o
->out
= tcg_temp_new_i64();
3093 /* MASK is the set of bits to be operated on from R2.
3094 Take care for I3/I4 wraparound. */
3097 mask
^= ~0ull >> i4
>> 1;
3099 mask
|= ~(~0ull >> i4
>> 1);
3102 /* Rotate the input as necessary. */
3103 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3106 switch (s
->fields
->op2
) {
3107 case 0x55: /* AND */
3108 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3109 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3112 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3113 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3115 case 0x57: /* XOR */
3116 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3117 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3124 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3125 set_cc_nz_u64(s
, cc_dst
);
3129 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3131 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3135 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3137 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3141 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3143 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3147 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3149 TCGv_i32 t1
= tcg_temp_new_i32();
3150 TCGv_i32 t2
= tcg_temp_new_i32();
3151 TCGv_i32 to
= tcg_temp_new_i32();
3152 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
3153 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
3154 tcg_gen_rotl_i32(to
, t1
, t2
);
3155 tcg_gen_extu_i32_i64(o
->out
, to
);
3156 tcg_temp_free_i32(t1
);
3157 tcg_temp_free_i32(t2
);
3158 tcg_temp_free_i32(to
);
3162 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3164 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3168 #ifndef CONFIG_USER_ONLY
3169 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3171 check_privileged(s
);
3172 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3177 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3179 check_privileged(s
);
3180 gen_helper_sacf(cpu_env
, o
->in2
);
3181 /* Addressing mode has changed, so end the block. */
3182 return EXIT_PC_STALE
;
3186 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3188 int sam
= s
->insn
->data
;
3204 /* Bizarre but true, we check the address of the current insn for the
3205 specification exception, not the next to be executed. Thus the PoO
3206 documents that Bad Things Happen two bytes before the end. */
3207 if (s
->pc
& ~mask
) {
3208 gen_program_exception(s
, PGM_SPECIFICATION
);
3209 return EXIT_NORETURN
;
3213 tsam
= tcg_const_i64(sam
);
3214 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3215 tcg_temp_free_i64(tsam
);
3217 /* Always exit the TB, since we (may have) changed execution mode. */
3218 return EXIT_PC_STALE
;
3221 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3223 int r1
= get_field(s
->fields
, r1
);
3224 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3228 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3230 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3234 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3236 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3240 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3242 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3243 return_low128(o
->out2
);
3247 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3249 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3253 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3255 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3259 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3261 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3262 return_low128(o
->out2
);
3266 #ifndef CONFIG_USER_ONLY
3267 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3269 check_privileged(s
);
3270 potential_page_fault(s
);
3271 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3276 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3278 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3279 check_privileged(s
);
3280 potential_page_fault(s
);
3281 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3282 tcg_temp_free_i32(r1
);
3287 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3294 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3296 /* We want to store when the condition is fulfilled, so branch
3297 out when it's not */
3298 c
.cond
= tcg_invert_cond(c
.cond
);
3300 lab
= gen_new_label();
3302 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3304 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3308 r1
= get_field(s
->fields
, r1
);
3309 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3310 if (s
->insn
->data
) {
3311 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3313 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3315 tcg_temp_free_i64(a
);
3321 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3323 uint64_t sign
= 1ull << s
->insn
->data
;
3324 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3325 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3326 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3327 /* The arithmetic left shift is curious in that it does not affect
3328 the sign bit. Copy that over from the source unchanged. */
3329 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3330 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3331 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3335 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3337 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3341 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3343 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3347 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3349 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3353 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3355 gen_helper_sfpc(cpu_env
, o
->in2
);
3359 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3361 gen_helper_sfas(cpu_env
, o
->in2
);
3365 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3367 int b2
= get_field(s
->fields
, b2
);
3368 int d2
= get_field(s
->fields
, d2
);
3369 TCGv_i64 t1
= tcg_temp_new_i64();
3370 TCGv_i64 t2
= tcg_temp_new_i64();
3373 switch (s
->fields
->op2
) {
3374 case 0x99: /* SRNM */
3377 case 0xb8: /* SRNMB */
3380 case 0xb9: /* SRNMT */
3386 mask
= (1 << len
) - 1;
3388 /* Insert the value into the appropriate field of the FPC. */
3390 tcg_gen_movi_i64(t1
, d2
& mask
);
3392 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3393 tcg_gen_andi_i64(t1
, t1
, mask
);
3395 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3396 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3397 tcg_temp_free_i64(t1
);
3399 /* Then install the new FPC to set the rounding mode in fpu_status. */
3400 gen_helper_sfpc(cpu_env
, t2
);
3401 tcg_temp_free_i64(t2
);
3405 #ifndef CONFIG_USER_ONLY
3406 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3408 check_privileged(s
);
3409 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3410 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3414 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3416 check_privileged(s
);
3417 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3421 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3423 check_privileged(s
);
3424 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3428 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3430 check_privileged(s
);
3431 /* ??? Surely cpu address != cpu number. In any case the previous
3432 version of this stored more than the required half-word, so it
3433 is unlikely this has ever been tested. */
3434 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3438 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3440 gen_helper_stck(o
->out
, cpu_env
);
3441 /* ??? We don't implement clock states. */
3442 gen_op_movi_cc(s
, 0);
3446 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3448 TCGv_i64 c1
= tcg_temp_new_i64();
3449 TCGv_i64 c2
= tcg_temp_new_i64();
3450 gen_helper_stck(c1
, cpu_env
);
3451 /* Shift the 64-bit value into its place as a zero-extended
3452 104-bit value. Note that "bit positions 64-103 are always
3453 non-zero so that they compare differently to STCK"; we set
3454 the least significant bit to 1. */
3455 tcg_gen_shli_i64(c2
, c1
, 56);
3456 tcg_gen_shri_i64(c1
, c1
, 8);
3457 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3458 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3459 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3460 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3461 tcg_temp_free_i64(c1
);
3462 tcg_temp_free_i64(c2
);
3463 /* ??? We don't implement clock states. */
3464 gen_op_movi_cc(s
, 0);
3468 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3470 check_privileged(s
);
3471 gen_helper_sckc(cpu_env
, o
->in2
);
3475 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3477 check_privileged(s
);
3478 gen_helper_stckc(o
->out
, cpu_env
);
3482 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3484 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3485 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3486 check_privileged(s
);
3487 potential_page_fault(s
);
3488 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3489 tcg_temp_free_i32(r1
);
3490 tcg_temp_free_i32(r3
);
3494 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3496 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3497 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3498 check_privileged(s
);
3499 potential_page_fault(s
);
3500 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3501 tcg_temp_free_i32(r1
);
3502 tcg_temp_free_i32(r3
);
3506 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3508 TCGv_i64 t1
= tcg_temp_new_i64();
3510 check_privileged(s
);
3511 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3512 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3513 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3514 tcg_temp_free_i64(t1
);
3519 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3521 check_privileged(s
);
3522 gen_helper_spt(cpu_env
, o
->in2
);
3526 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3529 /* We really ought to have more complete indication of facilities
3530 that we implement. Address this when STFLE is implemented. */
3531 check_privileged(s
);
3532 f
= tcg_const_i64(0xc0000000);
3533 a
= tcg_const_i64(200);
3534 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3535 tcg_temp_free_i64(f
);
3536 tcg_temp_free_i64(a
);
3540 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3542 check_privileged(s
);
3543 gen_helper_stpt(o
->out
, cpu_env
);
3547 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3549 check_privileged(s
);
3550 potential_page_fault(s
);
3551 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3556 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3558 check_privileged(s
);
3559 gen_helper_spx(cpu_env
, o
->in2
);
3563 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
3565 check_privileged(s
);
3566 /* Not operational. */
3567 gen_op_movi_cc(s
, 3);
3571 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3573 check_privileged(s
);
3574 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3575 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3579 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3581 uint64_t i2
= get_field(s
->fields
, i2
);
3584 check_privileged(s
);
3586 /* It is important to do what the instruction name says: STORE THEN.
3587 If we let the output hook perform the store then if we fault and
3588 restart, we'll have the wrong SYSTEM MASK in place. */
3589 t
= tcg_temp_new_i64();
3590 tcg_gen_shri_i64(t
, psw_mask
, 56);
3591 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3592 tcg_temp_free_i64(t
);
3594 if (s
->fields
->op
== 0xac) {
3595 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3596 (i2
<< 56) | 0x00ffffffffffffffull
);
3598 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3603 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3605 check_privileged(s
);
3606 potential_page_fault(s
);
3607 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3611 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3613 check_privileged(s
);
3614 potential_page_fault(s
);
3615 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3620 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3622 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3626 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3628 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3632 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3634 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3638 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3640 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3644 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3646 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3647 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3648 potential_page_fault(s
);
3649 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3650 tcg_temp_free_i32(r1
);
3651 tcg_temp_free_i32(r3
);
3655 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3657 int m3
= get_field(s
->fields
, m3
);
3658 int pos
, base
= s
->insn
->data
;
3659 TCGv_i64 tmp
= tcg_temp_new_i64();
3661 pos
= base
+ ctz32(m3
) * 8;
3664 /* Effectively a 32-bit store. */
3665 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3666 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3672 /* Effectively a 16-bit store. */
3673 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3674 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3681 /* Effectively an 8-bit store. */
3682 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3683 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3687 /* This is going to be a sequence of shifts and stores. */
3688 pos
= base
+ 32 - 8;
3691 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3692 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3693 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3695 m3
= (m3
<< 1) & 0xf;
3700 tcg_temp_free_i64(tmp
);
3704 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3706 int r1
= get_field(s
->fields
, r1
);
3707 int r3
= get_field(s
->fields
, r3
);
3708 int size
= s
->insn
->data
;
3709 TCGv_i64 tsize
= tcg_const_i64(size
);
3713 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3715 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3720 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3724 tcg_temp_free_i64(tsize
);
3728 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3730 int r1
= get_field(s
->fields
, r1
);
3731 int r3
= get_field(s
->fields
, r3
);
3732 TCGv_i64 t
= tcg_temp_new_i64();
3733 TCGv_i64 t4
= tcg_const_i64(4);
3734 TCGv_i64 t32
= tcg_const_i64(32);
3737 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3738 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3742 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3746 tcg_temp_free_i64(t
);
3747 tcg_temp_free_i64(t4
);
3748 tcg_temp_free_i64(t32
);
3752 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3754 potential_page_fault(s
);
3755 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3757 return_low128(o
->in2
);
3761 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3763 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3767 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3772 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3774 /* The !borrow flag is the msb of CC. Since we want the inverse of
3775 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3776 disas_jcc(s
, &cmp
, 8 | 4);
3777 borrow
= tcg_temp_new_i64();
3779 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3781 TCGv_i32 t
= tcg_temp_new_i32();
3782 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3783 tcg_gen_extu_i32_i64(borrow
, t
);
3784 tcg_temp_free_i32(t
);
3788 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3789 tcg_temp_free_i64(borrow
);
3793 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3800 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3801 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3802 tcg_temp_free_i32(t
);
3804 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3805 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3806 tcg_temp_free_i32(t
);
3808 gen_exception(EXCP_SVC
);
3809 return EXIT_NORETURN
;
3812 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3814 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3819 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3821 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3826 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3828 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3833 #ifndef CONFIG_USER_ONLY
3834 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3836 potential_page_fault(s
);
3837 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3843 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3845 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3846 potential_page_fault(s
);
3847 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3848 tcg_temp_free_i32(l
);
3853 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
3855 potential_page_fault(s
);
3856 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3857 return_low128(o
->out2
);
3862 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
3864 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3865 potential_page_fault(s
);
3866 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3867 tcg_temp_free_i32(l
);
3872 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3874 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3875 potential_page_fault(s
);
3876 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3877 tcg_temp_free_i32(l
);
3881 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3883 int d1
= get_field(s
->fields
, d1
);
3884 int d2
= get_field(s
->fields
, d2
);
3885 int b1
= get_field(s
->fields
, b1
);
3886 int b2
= get_field(s
->fields
, b2
);
3887 int l
= get_field(s
->fields
, l1
);
3890 o
->addr1
= get_address(s
, 0, b1
, d1
);
3892 /* If the addresses are identical, this is a store/memset of zero. */
3893 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
3894 o
->in2
= tcg_const_i64(0);
3898 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
3901 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
3905 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
3908 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
3912 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
3915 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
3919 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
3921 gen_op_movi_cc(s
, 0);
3925 /* But in general we'll defer to a helper. */
3926 o
->in2
= get_address(s
, 0, b2
, d2
);
3927 t32
= tcg_const_i32(l
);
3928 potential_page_fault(s
);
3929 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
3930 tcg_temp_free_i32(t32
);
3935 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3937 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3941 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3943 int shift
= s
->insn
->data
& 0xff;
3944 int size
= s
->insn
->data
>> 8;
3945 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3948 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3949 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3951 /* Produce the CC from only the bits manipulated. */
3952 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3953 set_cc_nz_u64(s
, cc_dst
);
3957 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3959 o
->out
= tcg_const_i64(0);
3963 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3965 o
->out
= tcg_const_i64(0);
3971 /* ====================================================================== */
3972 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3973 the original inputs), update the various cc data structures in order to
3974 be able to compute the new condition code. */
3976 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3978 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3981 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3983 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3986 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3988 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3991 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3993 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3996 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3998 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4001 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4003 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4006 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4008 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4011 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4013 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4016 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4018 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4021 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4023 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4026 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4028 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4031 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4033 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4036 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4038 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4041 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4043 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4046 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4048 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4051 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4053 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4056 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4058 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4061 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4063 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4066 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4068 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4071 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4073 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4074 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4077 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4079 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4082 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4084 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4087 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4089 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4092 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4094 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4097 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4099 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4102 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4104 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4107 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4109 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4112 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4114 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4117 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4119 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4122 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4124 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4127 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4129 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4132 /* ====================================================================== */
4133 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4134 with the TCG register to which we will write. Used in combination with
4135 the "wout" generators, in some cases we need a new temporary, and in
4136 some cases we can write to a TCG global. */
4138 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4140 o
->out
= tcg_temp_new_i64();
4142 #define SPEC_prep_new 0
4144 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4146 o
->out
= tcg_temp_new_i64();
4147 o
->out2
= tcg_temp_new_i64();
4149 #define SPEC_prep_new_P 0
4151 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4153 o
->out
= regs
[get_field(f
, r1
)];
4156 #define SPEC_prep_r1 0
4158 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4160 int r1
= get_field(f
, r1
);
4162 o
->out2
= regs
[r1
+ 1];
4163 o
->g_out
= o
->g_out2
= true;
4165 #define SPEC_prep_r1_P SPEC_r1_even
4167 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4169 o
->out
= fregs
[get_field(f
, r1
)];
4172 #define SPEC_prep_f1 0
4174 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4176 int r1
= get_field(f
, r1
);
4178 o
->out2
= fregs
[r1
+ 2];
4179 o
->g_out
= o
->g_out2
= true;
4181 #define SPEC_prep_x1 SPEC_r1_f128
4183 /* ====================================================================== */
4184 /* The "Write OUTput" generators. These generally perform some non-trivial
4185 copy of data to TCG globals, or to main memory. The trivial cases are
4186 generally handled by having a "prep" generator install the TCG global
4187 as the destination of the operation. */
4189 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4191 store_reg(get_field(f
, r1
), o
->out
);
4193 #define SPEC_wout_r1 0
4195 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4197 int r1
= get_field(f
, r1
);
4198 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4200 #define SPEC_wout_r1_8 0
4202 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4204 int r1
= get_field(f
, r1
);
4205 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4207 #define SPEC_wout_r1_16 0
4209 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4211 store_reg32_i64(get_field(f
, r1
), o
->out
);
4213 #define SPEC_wout_r1_32 0
4215 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4217 int r1
= get_field(f
, r1
);
4218 store_reg32_i64(r1
, o
->out
);
4219 store_reg32_i64(r1
+ 1, o
->out2
);
4221 #define SPEC_wout_r1_P32 SPEC_r1_even
4223 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4225 int r1
= get_field(f
, r1
);
4226 store_reg32_i64(r1
+ 1, o
->out
);
4227 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4228 store_reg32_i64(r1
, o
->out
);
4230 #define SPEC_wout_r1_D32 SPEC_r1_even
4232 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4234 store_freg32_i64(get_field(f
, r1
), o
->out
);
4236 #define SPEC_wout_e1 0
4238 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4240 store_freg(get_field(f
, r1
), o
->out
);
4242 #define SPEC_wout_f1 0
4244 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4246 int f1
= get_field(s
->fields
, r1
);
4247 store_freg(f1
, o
->out
);
4248 store_freg(f1
+ 2, o
->out2
);
4250 #define SPEC_wout_x1 SPEC_r1_f128
4252 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4254 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4255 store_reg32_i64(get_field(f
, r1
), o
->out
);
4258 #define SPEC_wout_cond_r1r2_32 0
4260 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4262 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4263 store_freg32_i64(get_field(f
, r1
), o
->out
);
4266 #define SPEC_wout_cond_e1e2 0
4268 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4270 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4272 #define SPEC_wout_m1_8 0
4274 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4276 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4278 #define SPEC_wout_m1_16 0
4280 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4282 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4284 #define SPEC_wout_m1_32 0
4286 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4288 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4290 #define SPEC_wout_m1_64 0
4292 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4294 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4296 #define SPEC_wout_m2_32 0
4298 static void wout_m2_32_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4300 /* XXX release reservation */
4301 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4302 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4304 #define SPEC_wout_m2_32_r1_atomic 0
4306 static void wout_m2_64_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4308 /* XXX release reservation */
4309 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4310 store_reg(get_field(f
, r1
), o
->in2
);
4312 #define SPEC_wout_m2_64_r1_atomic 0
4314 /* ====================================================================== */
4315 /* The "INput 1" generators. These load the first operand to an insn. */
4317 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4319 o
->in1
= load_reg(get_field(f
, r1
));
4321 #define SPEC_in1_r1 0
4323 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4325 o
->in1
= regs
[get_field(f
, r1
)];
4328 #define SPEC_in1_r1_o 0
4330 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4332 o
->in1
= tcg_temp_new_i64();
4333 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4335 #define SPEC_in1_r1_32s 0
4337 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4339 o
->in1
= tcg_temp_new_i64();
4340 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4342 #define SPEC_in1_r1_32u 0
4344 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4346 o
->in1
= tcg_temp_new_i64();
4347 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4349 #define SPEC_in1_r1_sr32 0
4351 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4353 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4355 #define SPEC_in1_r1p1 SPEC_r1_even
4357 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4359 o
->in1
= tcg_temp_new_i64();
4360 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4362 #define SPEC_in1_r1p1_32s SPEC_r1_even
4364 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4366 o
->in1
= tcg_temp_new_i64();
4367 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4369 #define SPEC_in1_r1p1_32u SPEC_r1_even
4371 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4373 int r1
= get_field(f
, r1
);
4374 o
->in1
= tcg_temp_new_i64();
4375 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4377 #define SPEC_in1_r1_D32 SPEC_r1_even
4379 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4381 o
->in1
= load_reg(get_field(f
, r2
));
4383 #define SPEC_in1_r2 0
4385 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4387 o
->in1
= load_reg(get_field(f
, r3
));
4389 #define SPEC_in1_r3 0
4391 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4393 o
->in1
= regs
[get_field(f
, r3
)];
4396 #define SPEC_in1_r3_o 0
4398 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4400 o
->in1
= tcg_temp_new_i64();
4401 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4403 #define SPEC_in1_r3_32s 0
4405 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4407 o
->in1
= tcg_temp_new_i64();
4408 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4410 #define SPEC_in1_r3_32u 0
4412 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4414 int r3
= get_field(f
, r3
);
4415 o
->in1
= tcg_temp_new_i64();
4416 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4418 #define SPEC_in1_r3_D32 SPEC_r3_even
4420 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4422 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4424 #define SPEC_in1_e1 0
4426 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4428 o
->in1
= fregs
[get_field(f
, r1
)];
4431 #define SPEC_in1_f1_o 0
4433 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4435 int r1
= get_field(f
, r1
);
4437 o
->out2
= fregs
[r1
+ 2];
4438 o
->g_out
= o
->g_out2
= true;
4440 #define SPEC_in1_x1_o SPEC_r1_f128
4442 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4444 o
->in1
= fregs
[get_field(f
, r3
)];
4447 #define SPEC_in1_f3_o 0
4449 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4451 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4453 #define SPEC_in1_la1 0
4455 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4457 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4458 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4460 #define SPEC_in1_la2 0
4462 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4465 o
->in1
= tcg_temp_new_i64();
4466 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4468 #define SPEC_in1_m1_8u 0
4470 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4473 o
->in1
= tcg_temp_new_i64();
4474 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4476 #define SPEC_in1_m1_16s 0
4478 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4481 o
->in1
= tcg_temp_new_i64();
4482 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4484 #define SPEC_in1_m1_16u 0
4486 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4489 o
->in1
= tcg_temp_new_i64();
4490 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4492 #define SPEC_in1_m1_32s 0
4494 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4497 o
->in1
= tcg_temp_new_i64();
4498 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4500 #define SPEC_in1_m1_32u 0
4502 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4505 o
->in1
= tcg_temp_new_i64();
4506 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4508 #define SPEC_in1_m1_64 0
4510 /* ====================================================================== */
4511 /* The "INput 2" generators. These load the second operand to an insn. */
4513 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4515 o
->in2
= regs
[get_field(f
, r1
)];
4518 #define SPEC_in2_r1_o 0
4520 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4522 o
->in2
= tcg_temp_new_i64();
4523 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4525 #define SPEC_in2_r1_16u 0
4527 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4529 o
->in2
= tcg_temp_new_i64();
4530 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4532 #define SPEC_in2_r1_32u 0
4534 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4536 int r1
= get_field(f
, r1
);
4537 o
->in2
= tcg_temp_new_i64();
4538 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4540 #define SPEC_in2_r1_D32 SPEC_r1_even
4542 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4544 o
->in2
= load_reg(get_field(f
, r2
));
4546 #define SPEC_in2_r2 0
4548 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4550 o
->in2
= regs
[get_field(f
, r2
)];
4553 #define SPEC_in2_r2_o 0
4555 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4557 int r2
= get_field(f
, r2
);
4559 o
->in2
= load_reg(r2
);
4562 #define SPEC_in2_r2_nz 0
4564 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4566 o
->in2
= tcg_temp_new_i64();
4567 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4569 #define SPEC_in2_r2_8s 0
4571 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4573 o
->in2
= tcg_temp_new_i64();
4574 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4576 #define SPEC_in2_r2_8u 0
4578 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4580 o
->in2
= tcg_temp_new_i64();
4581 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4583 #define SPEC_in2_r2_16s 0
4585 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4587 o
->in2
= tcg_temp_new_i64();
4588 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4590 #define SPEC_in2_r2_16u 0
4592 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4594 o
->in2
= load_reg(get_field(f
, r3
));
4596 #define SPEC_in2_r3 0
4598 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4600 o
->in2
= tcg_temp_new_i64();
4601 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4603 #define SPEC_in2_r2_32s 0
4605 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4607 o
->in2
= tcg_temp_new_i64();
4608 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4610 #define SPEC_in2_r2_32u 0
4612 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4614 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4616 #define SPEC_in2_e2 0
4618 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4620 o
->in2
= fregs
[get_field(f
, r2
)];
4623 #define SPEC_in2_f2_o 0
4625 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4627 int r2
= get_field(f
, r2
);
4629 o
->in2
= fregs
[r2
+ 2];
4630 o
->g_in1
= o
->g_in2
= true;
4632 #define SPEC_in2_x2_o SPEC_r2_f128
4634 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4636 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4638 #define SPEC_in2_ra2 0
4640 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4642 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4643 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4645 #define SPEC_in2_a2 0
4647 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4649 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4651 #define SPEC_in2_ri2 0
4653 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4655 help_l2_shift(s
, f
, o
, 31);
4657 #define SPEC_in2_sh32 0
4659 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4661 help_l2_shift(s
, f
, o
, 63);
4663 #define SPEC_in2_sh64 0
4665 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4668 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4670 #define SPEC_in2_m2_8u 0
4672 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4675 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4677 #define SPEC_in2_m2_16s 0
4679 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4682 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4684 #define SPEC_in2_m2_16u 0
4686 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4689 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4691 #define SPEC_in2_m2_32s 0
4693 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4696 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4698 #define SPEC_in2_m2_32u 0
4700 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4703 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4705 #define SPEC_in2_m2_64 0
4707 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4710 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4712 #define SPEC_in2_mri2_16u 0
4714 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4717 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4719 #define SPEC_in2_mri2_32s 0
4721 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4724 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4726 #define SPEC_in2_mri2_32u 0
4728 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4731 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4733 #define SPEC_in2_mri2_64 0
4735 static void in2_m2_32s_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4737 /* XXX should reserve the address */
4739 o
->in2
= tcg_temp_new_i64();
4740 tcg_gen_qemu_ld32s(o
->in2
, o
->addr1
, get_mem_index(s
));
4742 #define SPEC_in2_m2_32s_atomic 0
4744 static void in2_m2_64_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4746 /* XXX should reserve the address */
4748 o
->in2
= tcg_temp_new_i64();
4749 tcg_gen_qemu_ld64(o
->in2
, o
->addr1
, get_mem_index(s
));
4751 #define SPEC_in2_m2_64_atomic 0
4753 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4755 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4757 #define SPEC_in2_i2 0
4759 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4761 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4763 #define SPEC_in2_i2_8u 0
4765 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4767 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4769 #define SPEC_in2_i2_16u 0
4771 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4773 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4775 #define SPEC_in2_i2_32u 0
4777 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4779 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4780 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4782 #define SPEC_in2_i2_16u_shl 0
4784 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4786 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4787 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4789 #define SPEC_in2_i2_32u_shl 0
4791 /* ====================================================================== */
4793 /* Find opc within the table of insns. This is formulated as a switch
4794 statement so that (1) we get compile-time notice of cut-paste errors
4795 for duplicated opcodes, and (2) the compiler generates the binary
4796 search tree, rather than us having to post-process the table. */
4798 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4799 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4801 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4803 enum DisasInsnEnum
{
4804 #include "insn-data.def"
4808 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4812 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4814 .help_in1 = in1_##I1, \
4815 .help_in2 = in2_##I2, \
4816 .help_prep = prep_##P, \
4817 .help_wout = wout_##W, \
4818 .help_cout = cout_##CC, \
4819 .help_op = op_##OP, \
4823 /* Allow 0 to be used for NULL in the table below. */
4831 #define SPEC_in1_0 0
4832 #define SPEC_in2_0 0
4833 #define SPEC_prep_0 0
4834 #define SPEC_wout_0 0
4836 static const DisasInsn insn_info
[] = {
4837 #include "insn-data.def"
4841 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4842 case OPC: return &insn_info[insn_ ## NM];
4844 static const DisasInsn
*lookup_opc(uint16_t opc
)
4847 #include "insn-data.def"
4856 /* Extract a field from the insn. The INSN should be left-aligned in
4857 the uint64_t so that we can more easily utilize the big-bit-endian
4858 definitions we extract from the Principals of Operation. */
4860 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4868 /* Zero extract the field from the insn. */
4869 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4871 /* Sign-extend, or un-swap the field as necessary. */
4873 case 0: /* unsigned */
4875 case 1: /* signed */
4876 assert(f
->size
<= 32);
4877 m
= 1u << (f
->size
- 1);
4880 case 2: /* dl+dh split, signed 20 bit. */
4881 r
= ((int8_t)r
<< 12) | (r
>> 8);
4887 /* Validate that the "compressed" encoding we selected above is valid.
4888 I.e. we havn't make two different original fields overlap. */
4889 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4890 o
->presentC
|= 1 << f
->indexC
;
4891 o
->presentO
|= 1 << f
->indexO
;
4893 o
->c
[f
->indexC
] = r
;
4896 /* Lookup the insn at the current PC, extracting the operands into O and
4897 returning the info struct for the insn. Returns NULL for invalid insn. */
4899 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4902 uint64_t insn
, pc
= s
->pc
;
4904 const DisasInsn
*info
;
4906 insn
= ld_code2(env
, pc
);
4907 op
= (insn
>> 8) & 0xff;
4908 ilen
= get_ilen(op
);
4909 s
->next_pc
= s
->pc
+ ilen
;
4916 insn
= ld_code4(env
, pc
) << 32;
4919 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4925 /* We can't actually determine the insn format until we've looked up
4926 the full insn opcode. Which we can't do without locating the
4927 secondary opcode. Assume by default that OP2 is at bit 40; for
4928 those smaller insns that don't actually have a secondary opcode
4929 this will correctly result in OP2 = 0. */
4935 case 0xb2: /* S, RRF, RRE */
4936 case 0xb3: /* RRE, RRD, RRF */
4937 case 0xb9: /* RRE, RRF */
4938 case 0xe5: /* SSE, SIL */
4939 op2
= (insn
<< 8) >> 56;
4943 case 0xc0: /* RIL */
4944 case 0xc2: /* RIL */
4945 case 0xc4: /* RIL */
4946 case 0xc6: /* RIL */
4947 case 0xc8: /* SSF */
4948 case 0xcc: /* RIL */
4949 op2
= (insn
<< 12) >> 60;
4951 case 0xd0 ... 0xdf: /* SS */
4957 case 0xee ... 0xf3: /* SS */
4958 case 0xf8 ... 0xfd: /* SS */
4962 op2
= (insn
<< 40) >> 56;
4966 memset(f
, 0, sizeof(*f
));
4970 /* Lookup the instruction. */
4971 info
= lookup_opc(op
<< 8 | op2
);
4973 /* If we found it, extract the operands. */
4975 DisasFormat fmt
= info
->fmt
;
4978 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4979 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4985 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4987 const DisasInsn
*insn
;
4988 ExitStatus ret
= NO_EXIT
;
4992 /* Search for the insn in the table. */
4993 insn
= extract_insn(env
, s
, &f
);
4995 /* Not found means unimplemented/illegal opcode. */
4997 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
4999 gen_illegal_opcode(s
);
5000 return EXIT_NORETURN
;
5003 /* Check for insn specification exceptions. */
5005 int spec
= insn
->spec
, excp
= 0, r
;
5007 if (spec
& SPEC_r1_even
) {
5008 r
= get_field(&f
, r1
);
5010 excp
= PGM_SPECIFICATION
;
5013 if (spec
& SPEC_r2_even
) {
5014 r
= get_field(&f
, r2
);
5016 excp
= PGM_SPECIFICATION
;
5019 if (spec
& SPEC_r3_even
) {
5020 r
= get_field(&f
, r3
);
5022 excp
= PGM_SPECIFICATION
;
5025 if (spec
& SPEC_r1_f128
) {
5026 r
= get_field(&f
, r1
);
5028 excp
= PGM_SPECIFICATION
;
5031 if (spec
& SPEC_r2_f128
) {
5032 r
= get_field(&f
, r2
);
5034 excp
= PGM_SPECIFICATION
;
5038 gen_program_exception(s
, excp
);
5039 return EXIT_NORETURN
;
5043 /* Set up the strutures we use to communicate with the helpers. */
5046 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5047 TCGV_UNUSED_I64(o
.out
);
5048 TCGV_UNUSED_I64(o
.out2
);
5049 TCGV_UNUSED_I64(o
.in1
);
5050 TCGV_UNUSED_I64(o
.in2
);
5051 TCGV_UNUSED_I64(o
.addr1
);
5053 /* Implement the instruction. */
5054 if (insn
->help_in1
) {
5055 insn
->help_in1(s
, &f
, &o
);
5057 if (insn
->help_in2
) {
5058 insn
->help_in2(s
, &f
, &o
);
5060 if (insn
->help_prep
) {
5061 insn
->help_prep(s
, &f
, &o
);
5063 if (insn
->help_op
) {
5064 ret
= insn
->help_op(s
, &o
);
5066 if (insn
->help_wout
) {
5067 insn
->help_wout(s
, &f
, &o
);
5069 if (insn
->help_cout
) {
5070 insn
->help_cout(s
, &o
);
5073 /* Free any temporaries created by the helpers. */
5074 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5075 tcg_temp_free_i64(o
.out
);
5077 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5078 tcg_temp_free_i64(o
.out2
);
5080 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5081 tcg_temp_free_i64(o
.in1
);
5083 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5084 tcg_temp_free_i64(o
.in2
);
5086 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5087 tcg_temp_free_i64(o
.addr1
);
5090 /* Advance to the next instruction. */
5095 static inline void gen_intermediate_code_internal(S390CPU
*cpu
,
5096 TranslationBlock
*tb
,
5099 CPUState
*cs
= CPU(cpu
);
5100 CPUS390XState
*env
= &cpu
->env
;
5102 target_ulong pc_start
;
5103 uint64_t next_page_start
;
5105 int num_insns
, max_insns
;
5113 if (!(tb
->flags
& FLAG_MASK_64
)) {
5114 pc_start
&= 0x7fffffff;
5119 dc
.cc_op
= CC_OP_DYNAMIC
;
5120 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5122 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5125 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5126 if (max_insns
== 0) {
5127 max_insns
= CF_COUNT_MASK
;
5134 j
= tcg_op_buf_count();
5138 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5141 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
5142 gen_opc_cc_op
[lj
] = dc
.cc_op
;
5143 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
5144 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
5146 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5150 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
5151 tcg_gen_debug_insn_start(dc
.pc
);
5155 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
5156 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
5157 if (bp
->pc
== dc
.pc
) {
5158 status
= EXIT_PC_STALE
;
5164 if (status
== NO_EXIT
) {
5165 status
= translate_one(env
, &dc
);
5168 /* If we reach a page boundary, are single stepping,
5169 or exhaust instruction count, stop generation. */
5170 if (status
== NO_EXIT
5171 && (dc
.pc
>= next_page_start
5172 || tcg_op_buf_full()
5173 || num_insns
>= max_insns
5175 || cs
->singlestep_enabled
)) {
5176 status
= EXIT_PC_STALE
;
5178 } while (status
== NO_EXIT
);
5180 if (tb
->cflags
& CF_LAST_IO
) {
5189 update_psw_addr(&dc
);
5191 case EXIT_PC_UPDATED
:
5192 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5193 cc op type is in env */
5195 /* Exit the TB, either by raising a debug exception or by return. */
5197 gen_exception(EXCP_DEBUG
);
5206 gen_tb_end(tb
, num_insns
);
5209 j
= tcg_op_buf_count();
5212 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5215 tb
->size
= dc
.pc
- pc_start
;
5216 tb
->icount
= num_insns
;
5219 #if defined(S390X_DEBUG_DISAS)
5220 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5221 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5222 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
5228 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
5230 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, false);
5233 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
5235 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, true);
5238 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
5241 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
5242 cc_op
= gen_opc_cc_op
[pc_pos
];
5243 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {