4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env
;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext
;
50 typedef struct DisasInsn DisasInsn
;
51 typedef struct DisasFields DisasFields
;
54 struct TranslationBlock
*tb
;
55 const DisasInsn
*insn
;
59 bool singlestep_enabled
;
62 /* Information carried about a condition to be evaluated. */
69 struct { TCGv_i64 a
, b
; } s64
;
70 struct { TCGv_i32 a
, b
; } s32
;
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit
[CC_OP_MAX
];
78 static uint64_t inline_branch_miss
[CC_OP_MAX
];
81 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
83 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
84 if (s
->tb
->flags
& FLAG_MASK_32
) {
85 return pc
| 0x80000000;
91 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
94 S390CPU
*cpu
= S390_CPU(cs
);
95 CPUS390XState
*env
= &cpu
->env
;
99 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
100 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
103 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
106 for (i
= 0; i
< 16; i
++) {
107 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
109 cpu_fprintf(f
, "\n");
115 for (i
= 0; i
< 16; i
++) {
116 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
118 cpu_fprintf(f
, "\n");
124 for (i
= 0; i
< 32; i
++) {
125 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
126 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
127 cpu_fprintf(f
, (i
% 2) ? " " : "\n");
130 #ifndef CONFIG_USER_ONLY
131 for (i
= 0; i
< 16; i
++) {
132 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
134 cpu_fprintf(f
, "\n");
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i
= 0; i
< CC_OP_MAX
; i
++) {
143 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
144 inline_branch_miss
[i
], inline_branch_hit
[i
]);
148 cpu_fprintf(f
, "\n");
151 static TCGv_i64 psw_addr
;
152 static TCGv_i64 psw_mask
;
154 static TCGv_i32 cc_op
;
155 static TCGv_i64 cc_src
;
156 static TCGv_i64 cc_dst
;
157 static TCGv_i64 cc_vr
;
159 static char cpu_reg_names
[32][4];
160 static TCGv_i64 regs
[16];
161 static TCGv_i64 fregs
[16];
163 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
165 void s390x_translate_init(void)
169 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
170 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
171 offsetof(CPUS390XState
, psw
.addr
),
173 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
174 offsetof(CPUS390XState
, psw
.mask
),
177 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
179 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
181 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
183 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
186 for (i
= 0; i
< 16; i
++) {
187 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
188 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
189 offsetof(CPUS390XState
, regs
[i
]),
193 for (i
= 0; i
< 16; i
++) {
194 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
195 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
196 offsetof(CPUS390XState
, vregs
[i
][0].d
),
197 cpu_reg_names
[i
+ 16]);
201 static TCGv_i64
load_reg(int reg
)
203 TCGv_i64 r
= tcg_temp_new_i64();
204 tcg_gen_mov_i64(r
, regs
[reg
]);
208 static TCGv_i64
load_freg32_i64(int reg
)
210 TCGv_i64 r
= tcg_temp_new_i64();
211 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
215 static void store_reg(int reg
, TCGv_i64 v
)
217 tcg_gen_mov_i64(regs
[reg
], v
);
220 static void store_freg(int reg
, TCGv_i64 v
)
222 tcg_gen_mov_i64(fregs
[reg
], v
);
225 static void store_reg32_i64(int reg
, TCGv_i64 v
)
227 /* 32 bit register writes keep the upper half */
228 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
231 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
233 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
236 static void store_freg32_i64(int reg
, TCGv_i64 v
)
238 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
241 static void return_low128(TCGv_i64 dest
)
243 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
246 static void update_psw_addr(DisasContext
*s
)
249 tcg_gen_movi_i64(psw_addr
, s
->pc
);
252 static void update_cc_op(DisasContext
*s
)
254 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
255 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
259 static void potential_page_fault(DisasContext
*s
)
265 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
267 return (uint64_t)cpu_lduw_code(env
, pc
);
270 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
272 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
275 static int get_mem_index(DisasContext
*s
)
277 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
278 case PSW_ASC_PRIMARY
>> 32:
280 case PSW_ASC_SECONDARY
>> 32:
282 case PSW_ASC_HOME
>> 32:
290 static void gen_exception(int excp
)
292 TCGv_i32 tmp
= tcg_const_i32(excp
);
293 gen_helper_exception(cpu_env
, tmp
);
294 tcg_temp_free_i32(tmp
);
297 static void gen_program_exception(DisasContext
*s
, int code
)
301 /* Remember what pgm exeption this was. */
302 tmp
= tcg_const_i32(code
);
303 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
304 tcg_temp_free_i32(tmp
);
306 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
307 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
308 tcg_temp_free_i32(tmp
);
310 /* Advance past instruction. */
317 /* Trigger exception. */
318 gen_exception(EXCP_PGM
);
321 static inline void gen_illegal_opcode(DisasContext
*s
)
323 gen_program_exception(s
, PGM_OPERATION
);
326 static inline void gen_trap(DisasContext
*s
)
330 /* Set DXC to 0xff. */
331 t
= tcg_temp_new_i32();
332 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
333 tcg_gen_ori_i32(t
, t
, 0xff00);
334 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
335 tcg_temp_free_i32(t
);
337 gen_program_exception(s
, PGM_DATA
);
340 #ifndef CONFIG_USER_ONLY
341 static void check_privileged(DisasContext
*s
)
343 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
344 gen_program_exception(s
, PGM_PRIVILEGED
);
349 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
351 TCGv_i64 tmp
= tcg_temp_new_i64();
352 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
354 /* Note that d2 is limited to 20 bits, signed. If we crop negative
355 displacements early we create larger immedate addends. */
357 /* Note that addi optimizes the imm==0 case. */
359 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
360 tcg_gen_addi_i64(tmp
, tmp
, d2
);
362 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
364 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
370 tcg_gen_movi_i64(tmp
, d2
);
373 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
379 static inline bool live_cc_data(DisasContext
*s
)
381 return (s
->cc_op
!= CC_OP_DYNAMIC
382 && s
->cc_op
!= CC_OP_STATIC
386 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
388 if (live_cc_data(s
)) {
389 tcg_gen_discard_i64(cc_src
);
390 tcg_gen_discard_i64(cc_dst
);
391 tcg_gen_discard_i64(cc_vr
);
393 s
->cc_op
= CC_OP_CONST0
+ val
;
396 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
398 if (live_cc_data(s
)) {
399 tcg_gen_discard_i64(cc_src
);
400 tcg_gen_discard_i64(cc_vr
);
402 tcg_gen_mov_i64(cc_dst
, dst
);
406 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
409 if (live_cc_data(s
)) {
410 tcg_gen_discard_i64(cc_vr
);
412 tcg_gen_mov_i64(cc_src
, src
);
413 tcg_gen_mov_i64(cc_dst
, dst
);
417 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
418 TCGv_i64 dst
, TCGv_i64 vr
)
420 tcg_gen_mov_i64(cc_src
, src
);
421 tcg_gen_mov_i64(cc_dst
, dst
);
422 tcg_gen_mov_i64(cc_vr
, vr
);
426 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
428 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
431 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
433 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
436 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
438 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
441 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
443 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
446 /* CC value is in env->cc_op */
447 static void set_cc_static(DisasContext
*s
)
449 if (live_cc_data(s
)) {
450 tcg_gen_discard_i64(cc_src
);
451 tcg_gen_discard_i64(cc_dst
);
452 tcg_gen_discard_i64(cc_vr
);
454 s
->cc_op
= CC_OP_STATIC
;
457 /* calculates cc into cc_op */
458 static void gen_op_calc_cc(DisasContext
*s
)
460 TCGv_i32 local_cc_op
;
463 TCGV_UNUSED_I32(local_cc_op
);
464 TCGV_UNUSED_I64(dummy
);
467 dummy
= tcg_const_i64(0);
481 local_cc_op
= tcg_const_i32(s
->cc_op
);
497 /* s->cc_op is the cc value */
498 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
501 /* env->cc_op already is the cc value */
516 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
521 case CC_OP_LTUGTU_32
:
522 case CC_OP_LTUGTU_64
:
529 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
544 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
547 /* unknown operation - assume 3 arguments and cc_op in env */
548 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
554 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
555 tcg_temp_free_i32(local_cc_op
);
557 if (!TCGV_IS_UNUSED_I64(dummy
)) {
558 tcg_temp_free_i64(dummy
);
561 /* We now have cc in cc_op as constant */
565 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
567 /* NOTE: we handle the case where the TB spans two pages here */
568 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
569 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
570 && !s
->singlestep_enabled
571 && !(s
->tb
->cflags
& CF_LAST_IO
));
574 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_miss
[cc_op
]++;
581 static void account_inline_branch(DisasContext
*s
, int cc_op
)
583 #ifdef DEBUG_INLINE_BRANCHES
584 inline_branch_hit
[cc_op
]++;
588 /* Table of mask values to comparison codes, given a comparison as input.
589 For such, CC=3 should not be possible. */
590 static const TCGCond ltgt_cond
[16] = {
591 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
592 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
593 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
594 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
595 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
596 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
597 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
598 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
601 /* Table of mask values to comparison codes, given a logic op as input.
602 For such, only CC=0 and CC=1 should be possible. */
603 static const TCGCond nz_cond
[16] = {
604 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
605 TCG_COND_NEVER
, TCG_COND_NEVER
,
606 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
607 TCG_COND_NE
, TCG_COND_NE
,
608 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
609 TCG_COND_EQ
, TCG_COND_EQ
,
610 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
611 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
614 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
615 details required to generate a TCG comparison. */
616 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
619 enum cc_op old_cc_op
= s
->cc_op
;
621 if (mask
== 15 || mask
== 0) {
622 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
625 c
->g1
= c
->g2
= true;
630 /* Find the TCG condition for the mask + cc op. */
636 cond
= ltgt_cond
[mask
];
637 if (cond
== TCG_COND_NEVER
) {
640 account_inline_branch(s
, old_cc_op
);
643 case CC_OP_LTUGTU_32
:
644 case CC_OP_LTUGTU_64
:
645 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
646 if (cond
== TCG_COND_NEVER
) {
649 account_inline_branch(s
, old_cc_op
);
653 cond
= nz_cond
[mask
];
654 if (cond
== TCG_COND_NEVER
) {
657 account_inline_branch(s
, old_cc_op
);
672 account_inline_branch(s
, old_cc_op
);
687 account_inline_branch(s
, old_cc_op
);
691 switch (mask
& 0xa) {
692 case 8: /* src == 0 -> no one bit found */
695 case 2: /* src != 0 -> one bit found */
701 account_inline_branch(s
, old_cc_op
);
707 case 8 | 2: /* vr == 0 */
710 case 4 | 1: /* vr != 0 */
713 case 8 | 4: /* no carry -> vr >= src */
716 case 2 | 1: /* carry -> vr < src */
722 account_inline_branch(s
, old_cc_op
);
727 /* Note that CC=0 is impossible; treat it as dont-care. */
729 case 2: /* zero -> op1 == op2 */
732 case 4 | 1: /* !zero -> op1 != op2 */
735 case 4: /* borrow (!carry) -> op1 < op2 */
738 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
744 account_inline_branch(s
, old_cc_op
);
749 /* Calculate cc value. */
754 /* Jump based on CC. We'll load up the real cond below;
755 the assignment here merely avoids a compiler warning. */
756 account_noninline_branch(s
, old_cc_op
);
757 old_cc_op
= CC_OP_STATIC
;
758 cond
= TCG_COND_NEVER
;
762 /* Load up the arguments of the comparison. */
764 c
->g1
= c
->g2
= false;
768 c
->u
.s32
.a
= tcg_temp_new_i32();
769 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
770 c
->u
.s32
.b
= tcg_const_i32(0);
773 case CC_OP_LTUGTU_32
:
776 c
->u
.s32
.a
= tcg_temp_new_i32();
777 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
778 c
->u
.s32
.b
= tcg_temp_new_i32();
779 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
786 c
->u
.s64
.b
= tcg_const_i64(0);
790 case CC_OP_LTUGTU_64
:
794 c
->g1
= c
->g2
= true;
800 c
->u
.s64
.a
= tcg_temp_new_i64();
801 c
->u
.s64
.b
= tcg_const_i64(0);
802 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
807 c
->u
.s32
.a
= tcg_temp_new_i32();
808 c
->u
.s32
.b
= tcg_temp_new_i32();
809 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_vr
);
810 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
811 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
813 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_src
);
820 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
821 c
->u
.s64
.b
= tcg_const_i64(0);
833 case 0x8 | 0x4 | 0x2: /* cc != 3 */
835 c
->u
.s32
.b
= tcg_const_i32(3);
837 case 0x8 | 0x4 | 0x1: /* cc != 2 */
839 c
->u
.s32
.b
= tcg_const_i32(2);
841 case 0x8 | 0x2 | 0x1: /* cc != 1 */
843 c
->u
.s32
.b
= tcg_const_i32(1);
845 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
848 c
->u
.s32
.a
= tcg_temp_new_i32();
849 c
->u
.s32
.b
= tcg_const_i32(0);
850 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
852 case 0x8 | 0x4: /* cc < 2 */
854 c
->u
.s32
.b
= tcg_const_i32(2);
856 case 0x8: /* cc == 0 */
858 c
->u
.s32
.b
= tcg_const_i32(0);
860 case 0x4 | 0x2 | 0x1: /* cc != 0 */
862 c
->u
.s32
.b
= tcg_const_i32(0);
864 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
867 c
->u
.s32
.a
= tcg_temp_new_i32();
868 c
->u
.s32
.b
= tcg_const_i32(0);
869 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
871 case 0x4: /* cc == 1 */
873 c
->u
.s32
.b
= tcg_const_i32(1);
875 case 0x2 | 0x1: /* cc > 1 */
877 c
->u
.s32
.b
= tcg_const_i32(1);
879 case 0x2: /* cc == 2 */
881 c
->u
.s32
.b
= tcg_const_i32(2);
883 case 0x1: /* cc == 3 */
885 c
->u
.s32
.b
= tcg_const_i32(3);
888 /* CC is masked by something else: (8 >> cc) & mask. */
891 c
->u
.s32
.a
= tcg_const_i32(8);
892 c
->u
.s32
.b
= tcg_const_i32(0);
893 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
894 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
905 static void free_compare(DisasCompare
*c
)
909 tcg_temp_free_i64(c
->u
.s64
.a
);
911 tcg_temp_free_i32(c
->u
.s32
.a
);
916 tcg_temp_free_i64(c
->u
.s64
.b
);
918 tcg_temp_free_i32(c
->u
.s32
.b
);
923 /* ====================================================================== */
924 /* Define the insn format enumeration. */
925 #define F0(N) FMT_##N,
926 #define F1(N, X1) F0(N)
927 #define F2(N, X1, X2) F0(N)
928 #define F3(N, X1, X2, X3) F0(N)
929 #define F4(N, X1, X2, X3, X4) F0(N)
930 #define F5(N, X1, X2, X3, X4, X5) F0(N)
933 #include "insn-format.def"
943 /* Define a structure to hold the decoded fields. We'll store each inside
944 an array indexed by an enum. In order to conserve memory, we'll arrange
945 for fields that do not exist at the same time to overlap, thus the "C"
946 for compact. For checking purposes there is an "O" for original index
947 as well that will be applied to availability bitmaps. */
949 enum DisasFieldIndexO
{
972 enum DisasFieldIndexC
{
1003 struct DisasFields
{
1006 unsigned presentC
:16;
1007 unsigned int presentO
;
1011 /* This is the way fields are to be accessed out of DisasFields. */
1012 #define have_field(S, F) have_field1((S), FLD_O_##F)
1013 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1015 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1017 return (f
->presentO
>> c
) & 1;
1020 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1021 enum DisasFieldIndexC c
)
1023 assert(have_field1(f
, o
));
1027 /* Describe the layout of each field in each format. */
1028 typedef struct DisasField
{
1030 unsigned int size
:8;
1031 unsigned int type
:2;
1032 unsigned int indexC
:6;
1033 enum DisasFieldIndexO indexO
:8;
1036 typedef struct DisasFormatInfo
{
1037 DisasField op
[NUM_C_FIELD
];
1040 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1041 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1042 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1043 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1044 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1045 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1046 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1047 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1048 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1049 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1050 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1051 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1052 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1053 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1055 #define F0(N) { { } },
1056 #define F1(N, X1) { { X1 } },
1057 #define F2(N, X1, X2) { { X1, X2 } },
1058 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1059 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1060 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1062 static const DisasFormatInfo format_info
[] = {
1063 #include "insn-format.def"
1081 /* Generally, we'll extract operands into this structures, operate upon
1082 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1083 of routines below for more details. */
1085 bool g_out
, g_out2
, g_in1
, g_in2
;
1086 TCGv_i64 out
, out2
, in1
, in2
;
1090 /* Instructions can place constraints on their operands, raising specification
1091 exceptions if they are violated. To make this easy to automate, each "in1",
1092 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1093 of the following, or 0. To make this easy to document, we'll put the
1094 SPEC_<name> defines next to <name>. */
1096 #define SPEC_r1_even 1
1097 #define SPEC_r2_even 2
1098 #define SPEC_r3_even 4
1099 #define SPEC_r1_f128 8
1100 #define SPEC_r2_f128 16
1102 /* Return values from translate_one, indicating the state of the TB. */
1104 /* Continue the TB. */
1106 /* We have emitted one or more goto_tb. No fixup required. */
1108 /* We are not using a goto_tb (for whatever reason), but have updated
1109 the PC (for whatever reason), so there's no need to do it again on
1112 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1113 updated the PC for the next instruction to be executed. */
1115 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1116 No following code will be executed. */
1120 typedef enum DisasFacility
{
1121 FAC_Z
, /* zarch (default) */
1122 FAC_CASS
, /* compare and swap and store */
1123 FAC_CASS2
, /* compare and swap and store 2*/
1124 FAC_DFP
, /* decimal floating point */
1125 FAC_DFPR
, /* decimal floating point rounding */
1126 FAC_DO
, /* distinct operands */
1127 FAC_EE
, /* execute extensions */
1128 FAC_EI
, /* extended immediate */
1129 FAC_FPE
, /* floating point extension */
1130 FAC_FPSSH
, /* floating point support sign handling */
1131 FAC_FPRGR
, /* FPR-GR transfer */
1132 FAC_GIE
, /* general instructions extension */
1133 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1134 FAC_HW
, /* high-word */
1135 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1136 FAC_MIE
, /* miscellaneous-instruction-extensions */
1137 FAC_LAT
, /* load-and-trap */
1138 FAC_LOC
, /* load/store on condition */
1139 FAC_LD
, /* long displacement */
1140 FAC_PC
, /* population count */
1141 FAC_SCF
, /* store clock fast */
1142 FAC_SFLE
, /* store facility list extended */
1143 FAC_ILA
, /* interlocked access facility 1 */
1149 DisasFacility fac
:8;
1154 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1155 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1156 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1157 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1158 void (*help_cout
)(DisasContext
*, DisasOps
*);
1159 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1164 /* ====================================================================== */
1165 /* Miscellaneous helpers, used by several operations. */
1167 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1168 DisasOps
*o
, int mask
)
1170 int b2
= get_field(f
, b2
);
1171 int d2
= get_field(f
, d2
);
1174 o
->in2
= tcg_const_i64(d2
& mask
);
1176 o
->in2
= get_address(s
, 0, b2
, d2
);
1177 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1181 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1183 if (dest
== s
->next_pc
) {
1186 if (use_goto_tb(s
, dest
)) {
1189 tcg_gen_movi_i64(psw_addr
, dest
);
1190 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1191 return EXIT_GOTO_TB
;
1193 tcg_gen_movi_i64(psw_addr
, dest
);
1194 return EXIT_PC_UPDATED
;
1198 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1199 bool is_imm
, int imm
, TCGv_i64 cdest
)
1202 uint64_t dest
= s
->pc
+ 2 * imm
;
1205 /* Take care of the special cases first. */
1206 if (c
->cond
== TCG_COND_NEVER
) {
1211 if (dest
== s
->next_pc
) {
1212 /* Branch to next. */
1216 if (c
->cond
== TCG_COND_ALWAYS
) {
1217 ret
= help_goto_direct(s
, dest
);
1221 if (TCGV_IS_UNUSED_I64(cdest
)) {
1222 /* E.g. bcr %r0 -> no branch. */
1226 if (c
->cond
== TCG_COND_ALWAYS
) {
1227 tcg_gen_mov_i64(psw_addr
, cdest
);
1228 ret
= EXIT_PC_UPDATED
;
1233 if (use_goto_tb(s
, s
->next_pc
)) {
1234 if (is_imm
&& use_goto_tb(s
, dest
)) {
1235 /* Both exits can use goto_tb. */
1238 lab
= gen_new_label();
1240 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1242 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1245 /* Branch not taken. */
1247 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1248 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1253 tcg_gen_movi_i64(psw_addr
, dest
);
1254 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1258 /* Fallthru can use goto_tb, but taken branch cannot. */
1259 /* Store taken branch destination before the brcond. This
1260 avoids having to allocate a new local temp to hold it.
1261 We'll overwrite this in the not taken case anyway. */
1263 tcg_gen_mov_i64(psw_addr
, cdest
);
1266 lab
= gen_new_label();
1268 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1270 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1273 /* Branch not taken. */
1276 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1277 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1281 tcg_gen_movi_i64(psw_addr
, dest
);
1283 ret
= EXIT_PC_UPDATED
;
1286 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1287 Most commonly we're single-stepping or some other condition that
1288 disables all use of goto_tb. Just update the PC and exit. */
1290 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1292 cdest
= tcg_const_i64(dest
);
1296 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1299 TCGv_i32 t0
= tcg_temp_new_i32();
1300 TCGv_i64 t1
= tcg_temp_new_i64();
1301 TCGv_i64 z
= tcg_const_i64(0);
1302 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1303 tcg_gen_extu_i32_i64(t1
, t0
);
1304 tcg_temp_free_i32(t0
);
1305 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1306 tcg_temp_free_i64(t1
);
1307 tcg_temp_free_i64(z
);
1311 tcg_temp_free_i64(cdest
);
1313 tcg_temp_free_i64(next
);
1315 ret
= EXIT_PC_UPDATED
;
1323 /* ====================================================================== */
1324 /* The operations. These perform the bulk of the work for any insn,
1325 usually after the operands have been loaded and output initialized. */
1327 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1330 z
= tcg_const_i64(0);
1331 n
= tcg_temp_new_i64();
1332 tcg_gen_neg_i64(n
, o
->in2
);
1333 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1334 tcg_temp_free_i64(n
);
1335 tcg_temp_free_i64(z
);
1339 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1341 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1345 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1347 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1351 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1353 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1354 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1358 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1360 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1364 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1369 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1371 /* The carry flag is the msb of CC, therefore the branch mask that would
1372 create that comparison is 3. Feeding the generated comparison to
1373 setcond produces the carry flag that we desire. */
1374 disas_jcc(s
, &cmp
, 3);
1375 carry
= tcg_temp_new_i64();
1377 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1379 TCGv_i32 t
= tcg_temp_new_i32();
1380 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1381 tcg_gen_extu_i32_i64(carry
, t
);
1382 tcg_temp_free_i32(t
);
1386 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1387 tcg_temp_free_i64(carry
);
1391 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1393 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1397 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1399 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1403 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1405 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1406 return_low128(o
->out2
);
1410 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1412 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1416 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1418 int shift
= s
->insn
->data
& 0xff;
1419 int size
= s
->insn
->data
>> 8;
1420 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1423 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1424 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1425 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1427 /* Produce the CC from only the bits manipulated. */
1428 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1429 set_cc_nz_u64(s
, cc_dst
);
1433 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1435 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1436 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1437 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1438 return EXIT_PC_UPDATED
;
1444 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1446 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1447 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1450 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1452 int m1
= get_field(s
->fields
, m1
);
1453 bool is_imm
= have_field(s
->fields
, i2
);
1454 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1457 disas_jcc(s
, &c
, m1
);
1458 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1461 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1463 int r1
= get_field(s
->fields
, r1
);
1464 bool is_imm
= have_field(s
->fields
, i2
);
1465 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1469 c
.cond
= TCG_COND_NE
;
1474 t
= tcg_temp_new_i64();
1475 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1476 store_reg32_i64(r1
, t
);
1477 c
.u
.s32
.a
= tcg_temp_new_i32();
1478 c
.u
.s32
.b
= tcg_const_i32(0);
1479 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1480 tcg_temp_free_i64(t
);
1482 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1485 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1487 int r1
= get_field(s
->fields
, r1
);
1488 int imm
= get_field(s
->fields
, i2
);
1492 c
.cond
= TCG_COND_NE
;
1497 t
= tcg_temp_new_i64();
1498 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1499 tcg_gen_subi_i64(t
, t
, 1);
1500 store_reg32h_i64(r1
, t
);
1501 c
.u
.s32
.a
= tcg_temp_new_i32();
1502 c
.u
.s32
.b
= tcg_const_i32(0);
1503 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1504 tcg_temp_free_i64(t
);
1506 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1509 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1511 int r1
= get_field(s
->fields
, r1
);
1512 bool is_imm
= have_field(s
->fields
, i2
);
1513 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1516 c
.cond
= TCG_COND_NE
;
1521 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1522 c
.u
.s64
.a
= regs
[r1
];
1523 c
.u
.s64
.b
= tcg_const_i64(0);
1525 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1528 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1530 int r1
= get_field(s
->fields
, r1
);
1531 int r3
= get_field(s
->fields
, r3
);
1532 bool is_imm
= have_field(s
->fields
, i2
);
1533 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1537 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1542 t
= tcg_temp_new_i64();
1543 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1544 c
.u
.s32
.a
= tcg_temp_new_i32();
1545 c
.u
.s32
.b
= tcg_temp_new_i32();
1546 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1547 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1548 store_reg32_i64(r1
, t
);
1549 tcg_temp_free_i64(t
);
1551 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1554 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1556 int r1
= get_field(s
->fields
, r1
);
1557 int r3
= get_field(s
->fields
, r3
);
1558 bool is_imm
= have_field(s
->fields
, i2
);
1559 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1562 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1565 if (r1
== (r3
| 1)) {
1566 c
.u
.s64
.b
= load_reg(r3
| 1);
1569 c
.u
.s64
.b
= regs
[r3
| 1];
1573 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1574 c
.u
.s64
.a
= regs
[r1
];
1577 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1580 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1582 int imm
, m3
= get_field(s
->fields
, m3
);
1586 c
.cond
= ltgt_cond
[m3
];
1587 if (s
->insn
->data
) {
1588 c
.cond
= tcg_unsigned_cond(c
.cond
);
1590 c
.is_64
= c
.g1
= c
.g2
= true;
1594 is_imm
= have_field(s
->fields
, i4
);
1596 imm
= get_field(s
->fields
, i4
);
1599 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1600 get_field(s
->fields
, d4
));
1603 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1606 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1608 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1613 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1615 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1620 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1622 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1627 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1629 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1630 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1631 tcg_temp_free_i32(m3
);
1632 gen_set_cc_nz_f32(s
, o
->in2
);
1636 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1638 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1639 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1640 tcg_temp_free_i32(m3
);
1641 gen_set_cc_nz_f64(s
, o
->in2
);
1645 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1647 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1648 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1649 tcg_temp_free_i32(m3
);
1650 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1654 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1656 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1657 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1658 tcg_temp_free_i32(m3
);
1659 gen_set_cc_nz_f32(s
, o
->in2
);
1663 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1665 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1666 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1667 tcg_temp_free_i32(m3
);
1668 gen_set_cc_nz_f64(s
, o
->in2
);
1672 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1674 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1675 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1676 tcg_temp_free_i32(m3
);
1677 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1681 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1683 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1684 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1685 tcg_temp_free_i32(m3
);
1686 gen_set_cc_nz_f32(s
, o
->in2
);
1690 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1692 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1693 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1694 tcg_temp_free_i32(m3
);
1695 gen_set_cc_nz_f64(s
, o
->in2
);
1699 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1701 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1702 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1703 tcg_temp_free_i32(m3
);
1704 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1708 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1710 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1711 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1712 tcg_temp_free_i32(m3
);
1713 gen_set_cc_nz_f32(s
, o
->in2
);
1717 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1719 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1720 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1721 tcg_temp_free_i32(m3
);
1722 gen_set_cc_nz_f64(s
, o
->in2
);
1726 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1728 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1729 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1730 tcg_temp_free_i32(m3
);
1731 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1735 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1737 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1738 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1739 tcg_temp_free_i32(m3
);
1743 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1745 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1746 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1747 tcg_temp_free_i32(m3
);
1751 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1753 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1754 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1755 tcg_temp_free_i32(m3
);
1756 return_low128(o
->out2
);
1760 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1762 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1763 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1764 tcg_temp_free_i32(m3
);
1768 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1770 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1771 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1772 tcg_temp_free_i32(m3
);
1776 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1778 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1779 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1780 tcg_temp_free_i32(m3
);
1781 return_low128(o
->out2
);
1785 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1787 int r2
= get_field(s
->fields
, r2
);
1788 TCGv_i64 len
= tcg_temp_new_i64();
1790 potential_page_fault(s
);
1791 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1793 return_low128(o
->out
);
1795 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1796 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1797 tcg_temp_free_i64(len
);
1802 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1804 int l
= get_field(s
->fields
, l1
);
1809 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1810 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1813 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1814 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1817 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1818 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1821 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1822 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1825 potential_page_fault(s
);
1826 vl
= tcg_const_i32(l
);
1827 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1828 tcg_temp_free_i32(vl
);
1832 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1836 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1838 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1839 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1840 potential_page_fault(s
);
1841 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1842 tcg_temp_free_i32(r1
);
1843 tcg_temp_free_i32(r3
);
1848 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1850 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1851 TCGv_i32 t1
= tcg_temp_new_i32();
1852 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1853 potential_page_fault(s
);
1854 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1856 tcg_temp_free_i32(t1
);
1857 tcg_temp_free_i32(m3
);
1861 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1863 potential_page_fault(s
);
1864 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1866 return_low128(o
->in2
);
1870 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1872 TCGv_i64 t
= tcg_temp_new_i64();
1873 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1874 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1875 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1876 tcg_temp_free_i64(t
);
1880 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1882 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1883 int d2
= get_field(s
->fields
, d2
);
1884 int b2
= get_field(s
->fields
, b2
);
1885 int is_64
= s
->insn
->data
;
1886 TCGv_i64 addr
, mem
, cc
, z
;
1888 /* Note that in1 = R3 (new value) and
1889 in2 = (zero-extended) R1 (expected value). */
1891 /* Load the memory into the (temporary) output. While the PoO only talks
1892 about moving the memory to R1 on inequality, if we include equality it
1893 means that R1 is equal to the memory in all conditions. */
1894 addr
= get_address(s
, 0, b2
, d2
);
1896 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1898 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1901 /* Are the memory and expected values (un)equal? Note that this setcond
1902 produces the output CC value, thus the NE sense of the test. */
1903 cc
= tcg_temp_new_i64();
1904 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1906 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1907 Recall that we are allowed to unconditionally issue the store (and
1908 thus any possible write trap), so (re-)store the original contents
1909 of MEM in case of inequality. */
1910 z
= tcg_const_i64(0);
1911 mem
= tcg_temp_new_i64();
1912 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1914 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1916 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1918 tcg_temp_free_i64(z
);
1919 tcg_temp_free_i64(mem
);
1920 tcg_temp_free_i64(addr
);
1922 /* Store CC back to cc_op. Wait until after the store so that any
1923 exception gets the old cc_op value. */
1924 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1925 tcg_temp_free_i64(cc
);
1930 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1932 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1933 int r1
= get_field(s
->fields
, r1
);
1934 int r3
= get_field(s
->fields
, r3
);
1935 int d2
= get_field(s
->fields
, d2
);
1936 int b2
= get_field(s
->fields
, b2
);
1937 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
1939 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1941 addrh
= get_address(s
, 0, b2
, d2
);
1942 addrl
= get_address(s
, 0, b2
, d2
+ 8);
1943 outh
= tcg_temp_new_i64();
1944 outl
= tcg_temp_new_i64();
1946 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
1947 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
1949 /* Fold the double-word compare with arithmetic. */
1950 cc
= tcg_temp_new_i64();
1951 z
= tcg_temp_new_i64();
1952 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
1953 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
1954 tcg_gen_or_i64(cc
, cc
, z
);
1955 tcg_gen_movi_i64(z
, 0);
1956 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
1958 memh
= tcg_temp_new_i64();
1959 meml
= tcg_temp_new_i64();
1960 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
1961 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
1962 tcg_temp_free_i64(z
);
1964 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
1965 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
1966 tcg_temp_free_i64(memh
);
1967 tcg_temp_free_i64(meml
);
1968 tcg_temp_free_i64(addrh
);
1969 tcg_temp_free_i64(addrl
);
1971 /* Save back state now that we've passed all exceptions. */
1972 tcg_gen_mov_i64(regs
[r1
], outh
);
1973 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
1974 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1975 tcg_temp_free_i64(outh
);
1976 tcg_temp_free_i64(outl
);
1977 tcg_temp_free_i64(cc
);
1982 #ifndef CONFIG_USER_ONLY
1983 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1985 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1986 check_privileged(s
);
1987 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1988 tcg_temp_free_i32(r1
);
1994 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1996 TCGv_i64 t1
= tcg_temp_new_i64();
1997 TCGv_i32 t2
= tcg_temp_new_i32();
1998 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1999 gen_helper_cvd(t1
, t2
);
2000 tcg_temp_free_i32(t2
);
2001 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2002 tcg_temp_free_i64(t1
);
2006 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2008 int m3
= get_field(s
->fields
, m3
);
2009 TCGLabel
*lab
= gen_new_label();
2012 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2013 if (s
->insn
->data
) {
2014 c
= tcg_unsigned_cond(c
);
2016 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2025 #ifndef CONFIG_USER_ONLY
2026 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2030 check_privileged(s
);
2031 potential_page_fault(s
);
2033 /* We pretend the format is RX_a so that D2 is the field we want. */
2034 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2035 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2036 tcg_temp_free_i32(tmp
);
2041 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2043 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2044 return_low128(o
->out
);
2048 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2050 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2051 return_low128(o
->out
);
2055 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2057 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2058 return_low128(o
->out
);
2062 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2064 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2065 return_low128(o
->out
);
2069 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2071 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2075 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2077 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2081 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2083 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2084 return_low128(o
->out2
);
2088 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2090 int r2
= get_field(s
->fields
, r2
);
2091 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2095 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2097 /* No cache information provided. */
2098 tcg_gen_movi_i64(o
->out
, -1);
2102 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2104 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2108 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2110 int r1
= get_field(s
->fields
, r1
);
2111 int r2
= get_field(s
->fields
, r2
);
2112 TCGv_i64 t
= tcg_temp_new_i64();
2114 /* Note the "subsequently" in the PoO, which implies a defined result
2115 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2116 tcg_gen_shri_i64(t
, psw_mask
, 32);
2117 store_reg32_i64(r1
, t
);
2119 store_reg32_i64(r2
, psw_mask
);
2122 tcg_temp_free_i64(t
);
2126 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2128 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2129 tb->flags, (ab)use the tb->cs_base field as the address of
2130 the template in memory, and grab 8 bits of tb->flags/cflags for
2131 the contents of the register. We would then recognize all this
2132 in gen_intermediate_code_internal, generating code for exactly
2133 one instruction. This new TB then gets executed normally.
2135 On the other hand, this seems to be mostly used for modifying
2136 MVC inside of memcpy, which needs a helper call anyway. So
2137 perhaps this doesn't bear thinking about any further. */
2144 tmp
= tcg_const_i64(s
->next_pc
);
2145 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2146 tcg_temp_free_i64(tmp
);
2151 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2153 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2154 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2155 tcg_temp_free_i32(m3
);
2159 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2161 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2162 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2163 tcg_temp_free_i32(m3
);
2167 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2169 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2170 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2171 return_low128(o
->out2
);
2172 tcg_temp_free_i32(m3
);
2176 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2178 /* We'll use the original input for cc computation, since we get to
2179 compare that against 0, which ought to be better than comparing
2180 the real output against 64. It also lets cc_dst be a convenient
2181 temporary during our computation. */
2182 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2184 /* R1 = IN ? CLZ(IN) : 64. */
2185 gen_helper_clz(o
->out
, o
->in2
);
2187 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2188 value by 64, which is undefined. But since the shift is 64 iff the
2189 input is zero, we still get the correct result after and'ing. */
2190 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2191 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2192 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2196 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2198 int m3
= get_field(s
->fields
, m3
);
2199 int pos
, len
, base
= s
->insn
->data
;
2200 TCGv_i64 tmp
= tcg_temp_new_i64();
2205 /* Effectively a 32-bit load. */
2206 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2213 /* Effectively a 16-bit load. */
2214 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2222 /* Effectively an 8-bit load. */
2223 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2228 pos
= base
+ ctz32(m3
) * 8;
2229 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2230 ccm
= ((1ull << len
) - 1) << pos
;
2234 /* This is going to be a sequence of loads and inserts. */
2235 pos
= base
+ 32 - 8;
2239 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2240 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2241 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2244 m3
= (m3
<< 1) & 0xf;
2250 tcg_gen_movi_i64(tmp
, ccm
);
2251 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2252 tcg_temp_free_i64(tmp
);
2256 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2258 int shift
= s
->insn
->data
& 0xff;
2259 int size
= s
->insn
->data
>> 8;
2260 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2264 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2269 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2271 t1
= tcg_temp_new_i64();
2272 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2273 tcg_gen_shri_i64(t1
, t1
, 36);
2274 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2276 tcg_gen_extu_i32_i64(t1
, cc_op
);
2277 tcg_gen_shli_i64(t1
, t1
, 28);
2278 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2279 tcg_temp_free_i64(t1
);
2283 #ifndef CONFIG_USER_ONLY
2284 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2286 check_privileged(s
);
2287 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2291 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2293 check_privileged(s
);
2294 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2299 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2301 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2305 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2307 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2311 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2313 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2317 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2319 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2323 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2325 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2326 return_low128(o
->out2
);
2330 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2332 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2333 return_low128(o
->out2
);
2337 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2339 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2343 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2345 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2349 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2351 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2355 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2357 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2361 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2363 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2367 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2369 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2373 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2375 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2379 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2381 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2385 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2387 TCGLabel
*lab
= gen_new_label();
2388 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2389 /* The value is stored even in case of trap. */
2390 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2396 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2398 TCGLabel
*lab
= gen_new_label();
2399 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2400 /* The value is stored even in case of trap. */
2401 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2407 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2409 TCGLabel
*lab
= gen_new_label();
2410 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2411 /* The value is stored even in case of trap. */
2412 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2418 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2420 TCGLabel
*lab
= gen_new_label();
2421 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2422 /* The value is stored even in case of trap. */
2423 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2429 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2431 TCGLabel
*lab
= gen_new_label();
2432 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2433 /* The value is stored even in case of trap. */
2434 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2440 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2444 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2447 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2451 TCGv_i32 t32
= tcg_temp_new_i32();
2454 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2457 t
= tcg_temp_new_i64();
2458 tcg_gen_extu_i32_i64(t
, t32
);
2459 tcg_temp_free_i32(t32
);
2461 z
= tcg_const_i64(0);
2462 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2463 tcg_temp_free_i64(t
);
2464 tcg_temp_free_i64(z
);
2470 #ifndef CONFIG_USER_ONLY
2471 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2473 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2474 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2475 check_privileged(s
);
2476 potential_page_fault(s
);
2477 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2478 tcg_temp_free_i32(r1
);
2479 tcg_temp_free_i32(r3
);
2483 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2485 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2486 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2487 check_privileged(s
);
2488 potential_page_fault(s
);
2489 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2490 tcg_temp_free_i32(r1
);
2491 tcg_temp_free_i32(r3
);
2494 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2496 check_privileged(s
);
2497 potential_page_fault(s
);
2498 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2503 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2507 check_privileged(s
);
2509 t1
= tcg_temp_new_i64();
2510 t2
= tcg_temp_new_i64();
2511 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2512 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2513 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2514 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2515 tcg_gen_shli_i64(t1
, t1
, 32);
2516 gen_helper_load_psw(cpu_env
, t1
, t2
);
2517 tcg_temp_free_i64(t1
);
2518 tcg_temp_free_i64(t2
);
2519 return EXIT_NORETURN
;
2522 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2526 check_privileged(s
);
2528 t1
= tcg_temp_new_i64();
2529 t2
= tcg_temp_new_i64();
2530 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2531 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2532 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2533 gen_helper_load_psw(cpu_env
, t1
, t2
);
2534 tcg_temp_free_i64(t1
);
2535 tcg_temp_free_i64(t2
);
2536 return EXIT_NORETURN
;
2540 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2542 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2543 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2544 potential_page_fault(s
);
2545 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2546 tcg_temp_free_i32(r1
);
2547 tcg_temp_free_i32(r3
);
2551 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2553 int r1
= get_field(s
->fields
, r1
);
2554 int r3
= get_field(s
->fields
, r3
);
2557 /* Only one register to read. */
2558 t1
= tcg_temp_new_i64();
2559 if (unlikely(r1
== r3
)) {
2560 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2561 store_reg32_i64(r1
, t1
);
2566 /* First load the values of the first and last registers to trigger
2567 possible page faults. */
2568 t2
= tcg_temp_new_i64();
2569 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2570 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2571 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2572 store_reg32_i64(r1
, t1
);
2573 store_reg32_i64(r3
, t2
);
2575 /* Only two registers to read. */
2576 if (((r1
+ 1) & 15) == r3
) {
2582 /* Then load the remaining registers. Page fault can't occur. */
2584 tcg_gen_movi_i64(t2
, 4);
2587 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2588 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2589 store_reg32_i64(r1
, t1
);
2597 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2599 int r1
= get_field(s
->fields
, r1
);
2600 int r3
= get_field(s
->fields
, r3
);
2603 /* Only one register to read. */
2604 t1
= tcg_temp_new_i64();
2605 if (unlikely(r1
== r3
)) {
2606 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2607 store_reg32h_i64(r1
, t1
);
2612 /* First load the values of the first and last registers to trigger
2613 possible page faults. */
2614 t2
= tcg_temp_new_i64();
2615 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2616 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2617 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2618 store_reg32h_i64(r1
, t1
);
2619 store_reg32h_i64(r3
, t2
);
2621 /* Only two registers to read. */
2622 if (((r1
+ 1) & 15) == r3
) {
2628 /* Then load the remaining registers. Page fault can't occur. */
2630 tcg_gen_movi_i64(t2
, 4);
2633 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2634 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2635 store_reg32h_i64(r1
, t1
);
2643 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2645 int r1
= get_field(s
->fields
, r1
);
2646 int r3
= get_field(s
->fields
, r3
);
2649 /* Only one register to read. */
2650 if (unlikely(r1
== r3
)) {
2651 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2655 /* First load the values of the first and last registers to trigger
2656 possible page faults. */
2657 t1
= tcg_temp_new_i64();
2658 t2
= tcg_temp_new_i64();
2659 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2660 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2661 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2662 tcg_gen_mov_i64(regs
[r1
], t1
);
2665 /* Only two registers to read. */
2666 if (((r1
+ 1) & 15) == r3
) {
2671 /* Then load the remaining registers. Page fault can't occur. */
2673 tcg_gen_movi_i64(t1
, 8);
2676 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2677 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2684 #ifndef CONFIG_USER_ONLY
2685 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2687 check_privileged(s
);
2688 potential_page_fault(s
);
2689 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2693 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2695 check_privileged(s
);
2696 potential_page_fault(s
);
2697 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2702 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2705 o
->g_out
= o
->g_in2
;
2706 TCGV_UNUSED_I64(o
->in2
);
2711 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2713 int b2
= get_field(s
->fields
, b2
);
2714 TCGv ar1
= tcg_temp_new_i64();
2717 o
->g_out
= o
->g_in2
;
2718 TCGV_UNUSED_I64(o
->in2
);
2721 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2722 case PSW_ASC_PRIMARY
>> 32:
2723 tcg_gen_movi_i64(ar1
, 0);
2725 case PSW_ASC_ACCREG
>> 32:
2726 tcg_gen_movi_i64(ar1
, 1);
2728 case PSW_ASC_SECONDARY
>> 32:
2730 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2732 tcg_gen_movi_i64(ar1
, 0);
2735 case PSW_ASC_HOME
>> 32:
2736 tcg_gen_movi_i64(ar1
, 2);
2740 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2741 tcg_temp_free_i64(ar1
);
2746 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2750 o
->g_out
= o
->g_in1
;
2751 o
->g_out2
= o
->g_in2
;
2752 TCGV_UNUSED_I64(o
->in1
);
2753 TCGV_UNUSED_I64(o
->in2
);
2754 o
->g_in1
= o
->g_in2
= false;
2758 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2760 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2761 potential_page_fault(s
);
2762 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2763 tcg_temp_free_i32(l
);
2767 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2769 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2770 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2771 potential_page_fault(s
);
2772 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2773 tcg_temp_free_i32(r1
);
2774 tcg_temp_free_i32(r2
);
2779 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2781 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2782 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2783 potential_page_fault(s
);
2784 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2785 tcg_temp_free_i32(r1
);
2786 tcg_temp_free_i32(r3
);
2791 #ifndef CONFIG_USER_ONLY
2792 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2794 int r1
= get_field(s
->fields
, l1
);
2795 check_privileged(s
);
2796 potential_page_fault(s
);
2797 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2802 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2804 int r1
= get_field(s
->fields
, l1
);
2805 check_privileged(s
);
2806 potential_page_fault(s
);
2807 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2813 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2815 potential_page_fault(s
);
2816 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2821 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2823 potential_page_fault(s
);
2824 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2826 return_low128(o
->in2
);
2830 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2832 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2836 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2838 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2842 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2844 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2848 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2850 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2854 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2856 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2860 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2862 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2863 return_low128(o
->out2
);
2867 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2869 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2870 return_low128(o
->out2
);
2874 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2876 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2877 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2878 tcg_temp_free_i64(r3
);
2882 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2884 int r3
= get_field(s
->fields
, r3
);
2885 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2889 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2891 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2892 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2893 tcg_temp_free_i64(r3
);
2897 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2899 int r3
= get_field(s
->fields
, r3
);
2900 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2904 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2907 z
= tcg_const_i64(0);
2908 n
= tcg_temp_new_i64();
2909 tcg_gen_neg_i64(n
, o
->in2
);
2910 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
2911 tcg_temp_free_i64(n
);
2912 tcg_temp_free_i64(z
);
2916 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2918 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2922 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2924 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2928 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2930 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2931 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2935 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2937 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2938 potential_page_fault(s
);
2939 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2940 tcg_temp_free_i32(l
);
2945 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2947 tcg_gen_neg_i64(o
->out
, o
->in2
);
2951 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2953 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2957 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2959 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2963 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2965 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2966 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2970 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2972 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2973 potential_page_fault(s
);
2974 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2975 tcg_temp_free_i32(l
);
2980 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2982 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2986 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2988 int shift
= s
->insn
->data
& 0xff;
2989 int size
= s
->insn
->data
>> 8;
2990 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2993 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2994 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2996 /* Produce the CC from only the bits manipulated. */
2997 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2998 set_cc_nz_u64(s
, cc_dst
);
3002 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3004 gen_helper_popcnt(o
->out
, o
->in2
);
3008 #ifndef CONFIG_USER_ONLY
3009 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3011 check_privileged(s
);
3012 gen_helper_ptlb(cpu_env
);
3017 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3019 int i3
= get_field(s
->fields
, i3
);
3020 int i4
= get_field(s
->fields
, i4
);
3021 int i5
= get_field(s
->fields
, i5
);
3022 int do_zero
= i4
& 0x80;
3023 uint64_t mask
, imask
, pmask
;
3026 /* Adjust the arguments for the specific insn. */
3027 switch (s
->fields
->op2
) {
3028 case 0x55: /* risbg */
3033 case 0x5d: /* risbhg */
3036 pmask
= 0xffffffff00000000ull
;
3038 case 0x51: /* risblg */
3041 pmask
= 0x00000000ffffffffull
;
3047 /* MASK is the set of bits to be inserted from R2.
3048 Take care for I3/I4 wraparound. */
3051 mask
^= pmask
>> i4
>> 1;
3053 mask
|= ~(pmask
>> i4
>> 1);
3057 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3058 insns, we need to keep the other half of the register. */
3059 imask
= ~mask
| ~pmask
;
3061 if (s
->fields
->op2
== 0x55) {
3068 /* In some cases we can implement this with deposit, which can be more
3069 efficient on some hosts. */
3070 if (~mask
== imask
&& i3
<= i4
) {
3071 if (s
->fields
->op2
== 0x5d) {
3074 /* Note that we rotate the bits to be inserted to the lsb, not to
3075 the position as described in the PoO. */
3078 rot
= (i5
- pos
) & 63;
3084 /* Rotate the input as necessary. */
3085 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3087 /* Insert the selected bits into the output. */
3089 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3090 } else if (imask
== 0) {
3091 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3093 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3094 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3095 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3100 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3102 int i3
= get_field(s
->fields
, i3
);
3103 int i4
= get_field(s
->fields
, i4
);
3104 int i5
= get_field(s
->fields
, i5
);
3107 /* If this is a test-only form, arrange to discard the result. */
3109 o
->out
= tcg_temp_new_i64();
3117 /* MASK is the set of bits to be operated on from R2.
3118 Take care for I3/I4 wraparound. */
3121 mask
^= ~0ull >> i4
>> 1;
3123 mask
|= ~(~0ull >> i4
>> 1);
3126 /* Rotate the input as necessary. */
3127 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3130 switch (s
->fields
->op2
) {
3131 case 0x55: /* AND */
3132 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3133 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3136 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3137 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3139 case 0x57: /* XOR */
3140 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3141 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3148 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3149 set_cc_nz_u64(s
, cc_dst
);
3153 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3155 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3159 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3161 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3165 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3167 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3171 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3173 TCGv_i32 t1
= tcg_temp_new_i32();
3174 TCGv_i32 t2
= tcg_temp_new_i32();
3175 TCGv_i32 to
= tcg_temp_new_i32();
3176 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
3177 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
3178 tcg_gen_rotl_i32(to
, t1
, t2
);
3179 tcg_gen_extu_i32_i64(o
->out
, to
);
3180 tcg_temp_free_i32(t1
);
3181 tcg_temp_free_i32(t2
);
3182 tcg_temp_free_i32(to
);
3186 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3188 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3192 #ifndef CONFIG_USER_ONLY
3193 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3195 check_privileged(s
);
3196 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3201 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3203 check_privileged(s
);
3204 gen_helper_sacf(cpu_env
, o
->in2
);
3205 /* Addressing mode has changed, so end the block. */
3206 return EXIT_PC_STALE
;
3210 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3212 int sam
= s
->insn
->data
;
3228 /* Bizarre but true, we check the address of the current insn for the
3229 specification exception, not the next to be executed. Thus the PoO
3230 documents that Bad Things Happen two bytes before the end. */
3231 if (s
->pc
& ~mask
) {
3232 gen_program_exception(s
, PGM_SPECIFICATION
);
3233 return EXIT_NORETURN
;
3237 tsam
= tcg_const_i64(sam
);
3238 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3239 tcg_temp_free_i64(tsam
);
3241 /* Always exit the TB, since we (may have) changed execution mode. */
3242 return EXIT_PC_STALE
;
3245 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3247 int r1
= get_field(s
->fields
, r1
);
3248 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3252 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3254 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3258 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3260 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3264 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3266 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3267 return_low128(o
->out2
);
3271 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3273 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3277 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3279 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3283 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3285 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3286 return_low128(o
->out2
);
3290 #ifndef CONFIG_USER_ONLY
3291 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3293 check_privileged(s
);
3294 potential_page_fault(s
);
3295 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3300 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3302 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3303 check_privileged(s
);
3304 potential_page_fault(s
);
3305 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3306 tcg_temp_free_i32(r1
);
3311 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3318 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3320 /* We want to store when the condition is fulfilled, so branch
3321 out when it's not */
3322 c
.cond
= tcg_invert_cond(c
.cond
);
3324 lab
= gen_new_label();
3326 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3328 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3332 r1
= get_field(s
->fields
, r1
);
3333 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3334 if (s
->insn
->data
) {
3335 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3337 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3339 tcg_temp_free_i64(a
);
3345 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3347 uint64_t sign
= 1ull << s
->insn
->data
;
3348 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3349 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3350 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3351 /* The arithmetic left shift is curious in that it does not affect
3352 the sign bit. Copy that over from the source unchanged. */
3353 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3354 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3355 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3359 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3361 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3365 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3367 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3371 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3373 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3377 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3379 gen_helper_sfpc(cpu_env
, o
->in2
);
3383 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3385 gen_helper_sfas(cpu_env
, o
->in2
);
3389 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3391 int b2
= get_field(s
->fields
, b2
);
3392 int d2
= get_field(s
->fields
, d2
);
3393 TCGv_i64 t1
= tcg_temp_new_i64();
3394 TCGv_i64 t2
= tcg_temp_new_i64();
3397 switch (s
->fields
->op2
) {
3398 case 0x99: /* SRNM */
3401 case 0xb8: /* SRNMB */
3404 case 0xb9: /* SRNMT */
3410 mask
= (1 << len
) - 1;
3412 /* Insert the value into the appropriate field of the FPC. */
3414 tcg_gen_movi_i64(t1
, d2
& mask
);
3416 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3417 tcg_gen_andi_i64(t1
, t1
, mask
);
3419 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3420 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3421 tcg_temp_free_i64(t1
);
3423 /* Then install the new FPC to set the rounding mode in fpu_status. */
3424 gen_helper_sfpc(cpu_env
, t2
);
3425 tcg_temp_free_i64(t2
);
3429 #ifndef CONFIG_USER_ONLY
3430 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3432 check_privileged(s
);
3433 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3434 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3438 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3440 check_privileged(s
);
3441 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3445 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3447 check_privileged(s
);
3448 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3452 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3454 check_privileged(s
);
3455 /* ??? Surely cpu address != cpu number. In any case the previous
3456 version of this stored more than the required half-word, so it
3457 is unlikely this has ever been tested. */
3458 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3462 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3464 gen_helper_stck(o
->out
, cpu_env
);
3465 /* ??? We don't implement clock states. */
3466 gen_op_movi_cc(s
, 0);
3470 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3472 TCGv_i64 c1
= tcg_temp_new_i64();
3473 TCGv_i64 c2
= tcg_temp_new_i64();
3474 gen_helper_stck(c1
, cpu_env
);
3475 /* Shift the 64-bit value into its place as a zero-extended
3476 104-bit value. Note that "bit positions 64-103 are always
3477 non-zero so that they compare differently to STCK"; we set
3478 the least significant bit to 1. */
3479 tcg_gen_shli_i64(c2
, c1
, 56);
3480 tcg_gen_shri_i64(c1
, c1
, 8);
3481 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3482 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3483 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3484 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3485 tcg_temp_free_i64(c1
);
3486 tcg_temp_free_i64(c2
);
3487 /* ??? We don't implement clock states. */
3488 gen_op_movi_cc(s
, 0);
3492 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3494 check_privileged(s
);
3495 gen_helper_sckc(cpu_env
, o
->in2
);
3499 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3501 check_privileged(s
);
3502 gen_helper_stckc(o
->out
, cpu_env
);
3506 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3508 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3509 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3510 check_privileged(s
);
3511 potential_page_fault(s
);
3512 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3513 tcg_temp_free_i32(r1
);
3514 tcg_temp_free_i32(r3
);
3518 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3520 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3521 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3522 check_privileged(s
);
3523 potential_page_fault(s
);
3524 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3525 tcg_temp_free_i32(r1
);
3526 tcg_temp_free_i32(r3
);
3530 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3532 TCGv_i64 t1
= tcg_temp_new_i64();
3534 check_privileged(s
);
3535 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3536 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3537 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3538 tcg_temp_free_i64(t1
);
3543 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3545 check_privileged(s
);
3546 gen_helper_spt(cpu_env
, o
->in2
);
3550 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3553 /* We really ought to have more complete indication of facilities
3554 that we implement. Address this when STFLE is implemented. */
3555 check_privileged(s
);
3556 f
= tcg_const_i64(0xc0000000);
3557 a
= tcg_const_i64(200);
3558 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3559 tcg_temp_free_i64(f
);
3560 tcg_temp_free_i64(a
);
3564 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3566 check_privileged(s
);
3567 gen_helper_stpt(o
->out
, cpu_env
);
3571 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3573 check_privileged(s
);
3574 potential_page_fault(s
);
3575 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3580 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3582 check_privileged(s
);
3583 gen_helper_spx(cpu_env
, o
->in2
);
3587 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
3589 check_privileged(s
);
3590 /* Not operational. */
3591 gen_op_movi_cc(s
, 3);
3595 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3597 check_privileged(s
);
3598 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3599 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3603 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3605 uint64_t i2
= get_field(s
->fields
, i2
);
3608 check_privileged(s
);
3610 /* It is important to do what the instruction name says: STORE THEN.
3611 If we let the output hook perform the store then if we fault and
3612 restart, we'll have the wrong SYSTEM MASK in place. */
3613 t
= tcg_temp_new_i64();
3614 tcg_gen_shri_i64(t
, psw_mask
, 56);
3615 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3616 tcg_temp_free_i64(t
);
3618 if (s
->fields
->op
== 0xac) {
3619 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3620 (i2
<< 56) | 0x00ffffffffffffffull
);
3622 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3627 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3629 check_privileged(s
);
3630 potential_page_fault(s
);
3631 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3635 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3637 check_privileged(s
);
3638 potential_page_fault(s
);
3639 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3644 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3646 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3650 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3652 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3656 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3658 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3662 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3664 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3668 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3670 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3671 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3672 potential_page_fault(s
);
3673 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3674 tcg_temp_free_i32(r1
);
3675 tcg_temp_free_i32(r3
);
3679 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3681 int m3
= get_field(s
->fields
, m3
);
3682 int pos
, base
= s
->insn
->data
;
3683 TCGv_i64 tmp
= tcg_temp_new_i64();
3685 pos
= base
+ ctz32(m3
) * 8;
3688 /* Effectively a 32-bit store. */
3689 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3690 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3696 /* Effectively a 16-bit store. */
3697 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3698 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3705 /* Effectively an 8-bit store. */
3706 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3707 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3711 /* This is going to be a sequence of shifts and stores. */
3712 pos
= base
+ 32 - 8;
3715 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3716 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3717 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3719 m3
= (m3
<< 1) & 0xf;
3724 tcg_temp_free_i64(tmp
);
3728 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3730 int r1
= get_field(s
->fields
, r1
);
3731 int r3
= get_field(s
->fields
, r3
);
3732 int size
= s
->insn
->data
;
3733 TCGv_i64 tsize
= tcg_const_i64(size
);
3737 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3739 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3744 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3748 tcg_temp_free_i64(tsize
);
3752 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3754 int r1
= get_field(s
->fields
, r1
);
3755 int r3
= get_field(s
->fields
, r3
);
3756 TCGv_i64 t
= tcg_temp_new_i64();
3757 TCGv_i64 t4
= tcg_const_i64(4);
3758 TCGv_i64 t32
= tcg_const_i64(32);
3761 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3762 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3766 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3770 tcg_temp_free_i64(t
);
3771 tcg_temp_free_i64(t4
);
3772 tcg_temp_free_i64(t32
);
3776 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3778 potential_page_fault(s
);
3779 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3781 return_low128(o
->in2
);
3785 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3787 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3791 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3796 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3798 /* The !borrow flag is the msb of CC. Since we want the inverse of
3799 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3800 disas_jcc(s
, &cmp
, 8 | 4);
3801 borrow
= tcg_temp_new_i64();
3803 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3805 TCGv_i32 t
= tcg_temp_new_i32();
3806 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3807 tcg_gen_extu_i32_i64(borrow
, t
);
3808 tcg_temp_free_i32(t
);
3812 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3813 tcg_temp_free_i64(borrow
);
3817 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3824 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3825 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3826 tcg_temp_free_i32(t
);
3828 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3829 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3830 tcg_temp_free_i32(t
);
3832 gen_exception(EXCP_SVC
);
3833 return EXIT_NORETURN
;
3836 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3838 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3843 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3845 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3850 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3852 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3857 #ifndef CONFIG_USER_ONLY
3858 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3860 potential_page_fault(s
);
3861 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3867 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3869 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3870 potential_page_fault(s
);
3871 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3872 tcg_temp_free_i32(l
);
3877 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
3879 potential_page_fault(s
);
3880 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3881 return_low128(o
->out2
);
3886 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
3888 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3889 potential_page_fault(s
);
3890 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3891 tcg_temp_free_i32(l
);
3896 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3898 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3899 potential_page_fault(s
);
3900 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3901 tcg_temp_free_i32(l
);
3905 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3907 int d1
= get_field(s
->fields
, d1
);
3908 int d2
= get_field(s
->fields
, d2
);
3909 int b1
= get_field(s
->fields
, b1
);
3910 int b2
= get_field(s
->fields
, b2
);
3911 int l
= get_field(s
->fields
, l1
);
3914 o
->addr1
= get_address(s
, 0, b1
, d1
);
3916 /* If the addresses are identical, this is a store/memset of zero. */
3917 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
3918 o
->in2
= tcg_const_i64(0);
3922 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
3925 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
3929 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
3932 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
3936 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
3939 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
3943 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
3945 gen_op_movi_cc(s
, 0);
3949 /* But in general we'll defer to a helper. */
3950 o
->in2
= get_address(s
, 0, b2
, d2
);
3951 t32
= tcg_const_i32(l
);
3952 potential_page_fault(s
);
3953 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
3954 tcg_temp_free_i32(t32
);
3959 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3961 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3965 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3967 int shift
= s
->insn
->data
& 0xff;
3968 int size
= s
->insn
->data
>> 8;
3969 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3972 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3973 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3975 /* Produce the CC from only the bits manipulated. */
3976 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3977 set_cc_nz_u64(s
, cc_dst
);
3981 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3983 o
->out
= tcg_const_i64(0);
3987 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3989 o
->out
= tcg_const_i64(0);
3995 /* ====================================================================== */
3996 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3997 the original inputs), update the various cc data structures in order to
3998 be able to compute the new condition code. */
4000 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4002 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4005 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4007 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4010 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4012 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4015 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4017 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4020 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4022 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4025 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4027 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4030 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4032 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4035 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4037 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4040 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4042 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4045 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4047 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4050 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4052 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4055 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4057 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4060 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4062 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4065 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4067 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4070 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4072 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4075 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4077 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4080 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4082 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4085 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4087 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4090 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4092 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4095 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4097 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4098 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4101 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4103 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4106 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4108 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4111 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4113 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4116 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4118 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4121 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4123 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4126 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4128 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4131 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4133 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4136 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4138 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4141 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4143 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4146 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4148 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4151 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4153 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4156 /* ====================================================================== */
4157 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4158 with the TCG register to which we will write. Used in combination with
4159 the "wout" generators, in some cases we need a new temporary, and in
4160 some cases we can write to a TCG global. */
4162 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4164 o
->out
= tcg_temp_new_i64();
4166 #define SPEC_prep_new 0
4168 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4170 o
->out
= tcg_temp_new_i64();
4171 o
->out2
= tcg_temp_new_i64();
4173 #define SPEC_prep_new_P 0
4175 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4177 o
->out
= regs
[get_field(f
, r1
)];
4180 #define SPEC_prep_r1 0
4182 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4184 int r1
= get_field(f
, r1
);
4186 o
->out2
= regs
[r1
+ 1];
4187 o
->g_out
= o
->g_out2
= true;
4189 #define SPEC_prep_r1_P SPEC_r1_even
4191 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4193 o
->out
= fregs
[get_field(f
, r1
)];
4196 #define SPEC_prep_f1 0
4198 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4200 int r1
= get_field(f
, r1
);
4202 o
->out2
= fregs
[r1
+ 2];
4203 o
->g_out
= o
->g_out2
= true;
4205 #define SPEC_prep_x1 SPEC_r1_f128
4207 /* ====================================================================== */
4208 /* The "Write OUTput" generators. These generally perform some non-trivial
4209 copy of data to TCG globals, or to main memory. The trivial cases are
4210 generally handled by having a "prep" generator install the TCG global
4211 as the destination of the operation. */
4213 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4215 store_reg(get_field(f
, r1
), o
->out
);
4217 #define SPEC_wout_r1 0
4219 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4221 int r1
= get_field(f
, r1
);
4222 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4224 #define SPEC_wout_r1_8 0
4226 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4228 int r1
= get_field(f
, r1
);
4229 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4231 #define SPEC_wout_r1_16 0
4233 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4235 store_reg32_i64(get_field(f
, r1
), o
->out
);
4237 #define SPEC_wout_r1_32 0
4239 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4241 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4243 #define SPEC_wout_r1_32h 0
4245 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4247 int r1
= get_field(f
, r1
);
4248 store_reg32_i64(r1
, o
->out
);
4249 store_reg32_i64(r1
+ 1, o
->out2
);
4251 #define SPEC_wout_r1_P32 SPEC_r1_even
4253 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4255 int r1
= get_field(f
, r1
);
4256 store_reg32_i64(r1
+ 1, o
->out
);
4257 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4258 store_reg32_i64(r1
, o
->out
);
4260 #define SPEC_wout_r1_D32 SPEC_r1_even
4262 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4264 store_freg32_i64(get_field(f
, r1
), o
->out
);
4266 #define SPEC_wout_e1 0
4268 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4270 store_freg(get_field(f
, r1
), o
->out
);
4272 #define SPEC_wout_f1 0
4274 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4276 int f1
= get_field(s
->fields
, r1
);
4277 store_freg(f1
, o
->out
);
4278 store_freg(f1
+ 2, o
->out2
);
4280 #define SPEC_wout_x1 SPEC_r1_f128
4282 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4284 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4285 store_reg32_i64(get_field(f
, r1
), o
->out
);
4288 #define SPEC_wout_cond_r1r2_32 0
4290 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4292 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4293 store_freg32_i64(get_field(f
, r1
), o
->out
);
4296 #define SPEC_wout_cond_e1e2 0
4298 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4300 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4302 #define SPEC_wout_m1_8 0
4304 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4306 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4308 #define SPEC_wout_m1_16 0
4310 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4312 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4314 #define SPEC_wout_m1_32 0
4316 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4318 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4320 #define SPEC_wout_m1_64 0
4322 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4324 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4326 #define SPEC_wout_m2_32 0
4328 static void wout_m2_32_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4330 /* XXX release reservation */
4331 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4332 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4334 #define SPEC_wout_m2_32_r1_atomic 0
4336 static void wout_m2_64_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4338 /* XXX release reservation */
4339 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4340 store_reg(get_field(f
, r1
), o
->in2
);
4342 #define SPEC_wout_m2_64_r1_atomic 0
4344 /* ====================================================================== */
4345 /* The "INput 1" generators. These load the first operand to an insn. */
4347 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4349 o
->in1
= load_reg(get_field(f
, r1
));
4351 #define SPEC_in1_r1 0
4353 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4355 o
->in1
= regs
[get_field(f
, r1
)];
4358 #define SPEC_in1_r1_o 0
4360 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4362 o
->in1
= tcg_temp_new_i64();
4363 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4365 #define SPEC_in1_r1_32s 0
4367 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4369 o
->in1
= tcg_temp_new_i64();
4370 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4372 #define SPEC_in1_r1_32u 0
4374 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4376 o
->in1
= tcg_temp_new_i64();
4377 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4379 #define SPEC_in1_r1_sr32 0
4381 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4383 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4385 #define SPEC_in1_r1p1 SPEC_r1_even
4387 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4389 o
->in1
= tcg_temp_new_i64();
4390 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4392 #define SPEC_in1_r1p1_32s SPEC_r1_even
4394 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4396 o
->in1
= tcg_temp_new_i64();
4397 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4399 #define SPEC_in1_r1p1_32u SPEC_r1_even
4401 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4403 int r1
= get_field(f
, r1
);
4404 o
->in1
= tcg_temp_new_i64();
4405 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4407 #define SPEC_in1_r1_D32 SPEC_r1_even
4409 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4411 o
->in1
= load_reg(get_field(f
, r2
));
4413 #define SPEC_in1_r2 0
4415 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4417 o
->in1
= tcg_temp_new_i64();
4418 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
4420 #define SPEC_in1_r2_sr32 0
4422 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4424 o
->in1
= load_reg(get_field(f
, r3
));
4426 #define SPEC_in1_r3 0
4428 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4430 o
->in1
= regs
[get_field(f
, r3
)];
4433 #define SPEC_in1_r3_o 0
4435 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4437 o
->in1
= tcg_temp_new_i64();
4438 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4440 #define SPEC_in1_r3_32s 0
4442 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4444 o
->in1
= tcg_temp_new_i64();
4445 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4447 #define SPEC_in1_r3_32u 0
4449 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4451 int r3
= get_field(f
, r3
);
4452 o
->in1
= tcg_temp_new_i64();
4453 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4455 #define SPEC_in1_r3_D32 SPEC_r3_even
4457 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4459 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4461 #define SPEC_in1_e1 0
4463 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4465 o
->in1
= fregs
[get_field(f
, r1
)];
4468 #define SPEC_in1_f1_o 0
4470 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4472 int r1
= get_field(f
, r1
);
4474 o
->out2
= fregs
[r1
+ 2];
4475 o
->g_out
= o
->g_out2
= true;
4477 #define SPEC_in1_x1_o SPEC_r1_f128
4479 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4481 o
->in1
= fregs
[get_field(f
, r3
)];
4484 #define SPEC_in1_f3_o 0
4486 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4488 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4490 #define SPEC_in1_la1 0
4492 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4494 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4495 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4497 #define SPEC_in1_la2 0
4499 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4502 o
->in1
= tcg_temp_new_i64();
4503 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4505 #define SPEC_in1_m1_8u 0
4507 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4510 o
->in1
= tcg_temp_new_i64();
4511 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4513 #define SPEC_in1_m1_16s 0
4515 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4518 o
->in1
= tcg_temp_new_i64();
4519 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4521 #define SPEC_in1_m1_16u 0
4523 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4526 o
->in1
= tcg_temp_new_i64();
4527 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4529 #define SPEC_in1_m1_32s 0
4531 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4534 o
->in1
= tcg_temp_new_i64();
4535 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4537 #define SPEC_in1_m1_32u 0
4539 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4542 o
->in1
= tcg_temp_new_i64();
4543 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4545 #define SPEC_in1_m1_64 0
4547 /* ====================================================================== */
4548 /* The "INput 2" generators. These load the second operand to an insn. */
4550 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4552 o
->in2
= regs
[get_field(f
, r1
)];
4555 #define SPEC_in2_r1_o 0
4557 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4559 o
->in2
= tcg_temp_new_i64();
4560 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4562 #define SPEC_in2_r1_16u 0
4564 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4566 o
->in2
= tcg_temp_new_i64();
4567 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4569 #define SPEC_in2_r1_32u 0
4571 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4573 int r1
= get_field(f
, r1
);
4574 o
->in2
= tcg_temp_new_i64();
4575 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4577 #define SPEC_in2_r1_D32 SPEC_r1_even
4579 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4581 o
->in2
= load_reg(get_field(f
, r2
));
4583 #define SPEC_in2_r2 0
4585 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4587 o
->in2
= regs
[get_field(f
, r2
)];
4590 #define SPEC_in2_r2_o 0
4592 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4594 int r2
= get_field(f
, r2
);
4596 o
->in2
= load_reg(r2
);
4599 #define SPEC_in2_r2_nz 0
4601 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4603 o
->in2
= tcg_temp_new_i64();
4604 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4606 #define SPEC_in2_r2_8s 0
4608 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4610 o
->in2
= tcg_temp_new_i64();
4611 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4613 #define SPEC_in2_r2_8u 0
4615 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4617 o
->in2
= tcg_temp_new_i64();
4618 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4620 #define SPEC_in2_r2_16s 0
4622 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4624 o
->in2
= tcg_temp_new_i64();
4625 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4627 #define SPEC_in2_r2_16u 0
4629 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4631 o
->in2
= load_reg(get_field(f
, r3
));
4633 #define SPEC_in2_r3 0
4635 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4637 o
->in2
= tcg_temp_new_i64();
4638 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
4640 #define SPEC_in2_r3_sr32 0
4642 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4644 o
->in2
= tcg_temp_new_i64();
4645 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4647 #define SPEC_in2_r2_32s 0
4649 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4651 o
->in2
= tcg_temp_new_i64();
4652 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4654 #define SPEC_in2_r2_32u 0
4656 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4658 o
->in2
= tcg_temp_new_i64();
4659 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
4661 #define SPEC_in2_r2_sr32 0
4663 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4665 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4667 #define SPEC_in2_e2 0
4669 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4671 o
->in2
= fregs
[get_field(f
, r2
)];
4674 #define SPEC_in2_f2_o 0
4676 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4678 int r2
= get_field(f
, r2
);
4680 o
->in2
= fregs
[r2
+ 2];
4681 o
->g_in1
= o
->g_in2
= true;
4683 #define SPEC_in2_x2_o SPEC_r2_f128
4685 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4687 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4689 #define SPEC_in2_ra2 0
4691 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4693 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4694 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4696 #define SPEC_in2_a2 0
4698 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4700 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4702 #define SPEC_in2_ri2 0
4704 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4706 help_l2_shift(s
, f
, o
, 31);
4708 #define SPEC_in2_sh32 0
4710 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4712 help_l2_shift(s
, f
, o
, 63);
4714 #define SPEC_in2_sh64 0
4716 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4719 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4721 #define SPEC_in2_m2_8u 0
4723 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4726 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4728 #define SPEC_in2_m2_16s 0
4730 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4733 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4735 #define SPEC_in2_m2_16u 0
4737 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4740 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4742 #define SPEC_in2_m2_32s 0
4744 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4747 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4749 #define SPEC_in2_m2_32u 0
4751 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4754 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4756 #define SPEC_in2_m2_64 0
4758 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4761 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4763 #define SPEC_in2_mri2_16u 0
4765 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4768 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4770 #define SPEC_in2_mri2_32s 0
4772 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4775 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4777 #define SPEC_in2_mri2_32u 0
4779 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4782 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4784 #define SPEC_in2_mri2_64 0
4786 static void in2_m2_32s_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4788 /* XXX should reserve the address */
4790 o
->in2
= tcg_temp_new_i64();
4791 tcg_gen_qemu_ld32s(o
->in2
, o
->addr1
, get_mem_index(s
));
4793 #define SPEC_in2_m2_32s_atomic 0
4795 static void in2_m2_64_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4797 /* XXX should reserve the address */
4799 o
->in2
= tcg_temp_new_i64();
4800 tcg_gen_qemu_ld64(o
->in2
, o
->addr1
, get_mem_index(s
));
4802 #define SPEC_in2_m2_64_atomic 0
4804 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4806 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4808 #define SPEC_in2_i2 0
4810 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4812 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4814 #define SPEC_in2_i2_8u 0
4816 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4818 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4820 #define SPEC_in2_i2_16u 0
4822 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4824 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4826 #define SPEC_in2_i2_32u 0
4828 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4830 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4831 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4833 #define SPEC_in2_i2_16u_shl 0
4835 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4837 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4838 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4840 #define SPEC_in2_i2_32u_shl 0
4842 /* ====================================================================== */
4844 /* Find opc within the table of insns. This is formulated as a switch
4845 statement so that (1) we get compile-time notice of cut-paste errors
4846 for duplicated opcodes, and (2) the compiler generates the binary
4847 search tree, rather than us having to post-process the table. */
4849 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4850 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4852 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4854 enum DisasInsnEnum
{
4855 #include "insn-data.def"
4859 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4863 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4865 .help_in1 = in1_##I1, \
4866 .help_in2 = in2_##I2, \
4867 .help_prep = prep_##P, \
4868 .help_wout = wout_##W, \
4869 .help_cout = cout_##CC, \
4870 .help_op = op_##OP, \
4874 /* Allow 0 to be used for NULL in the table below. */
4882 #define SPEC_in1_0 0
4883 #define SPEC_in2_0 0
4884 #define SPEC_prep_0 0
4885 #define SPEC_wout_0 0
4887 static const DisasInsn insn_info
[] = {
4888 #include "insn-data.def"
4892 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4893 case OPC: return &insn_info[insn_ ## NM];
4895 static const DisasInsn
*lookup_opc(uint16_t opc
)
4898 #include "insn-data.def"
4907 /* Extract a field from the insn. The INSN should be left-aligned in
4908 the uint64_t so that we can more easily utilize the big-bit-endian
4909 definitions we extract from the Principals of Operation. */
4911 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4919 /* Zero extract the field from the insn. */
4920 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4922 /* Sign-extend, or un-swap the field as necessary. */
4924 case 0: /* unsigned */
4926 case 1: /* signed */
4927 assert(f
->size
<= 32);
4928 m
= 1u << (f
->size
- 1);
4931 case 2: /* dl+dh split, signed 20 bit. */
4932 r
= ((int8_t)r
<< 12) | (r
>> 8);
4938 /* Validate that the "compressed" encoding we selected above is valid.
4939 I.e. we havn't make two different original fields overlap. */
4940 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4941 o
->presentC
|= 1 << f
->indexC
;
4942 o
->presentO
|= 1 << f
->indexO
;
4944 o
->c
[f
->indexC
] = r
;
4947 /* Lookup the insn at the current PC, extracting the operands into O and
4948 returning the info struct for the insn. Returns NULL for invalid insn. */
4950 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4953 uint64_t insn
, pc
= s
->pc
;
4955 const DisasInsn
*info
;
4957 insn
= ld_code2(env
, pc
);
4958 op
= (insn
>> 8) & 0xff;
4959 ilen
= get_ilen(op
);
4960 s
->next_pc
= s
->pc
+ ilen
;
4967 insn
= ld_code4(env
, pc
) << 32;
4970 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4976 /* We can't actually determine the insn format until we've looked up
4977 the full insn opcode. Which we can't do without locating the
4978 secondary opcode. Assume by default that OP2 is at bit 40; for
4979 those smaller insns that don't actually have a secondary opcode
4980 this will correctly result in OP2 = 0. */
4986 case 0xb2: /* S, RRF, RRE */
4987 case 0xb3: /* RRE, RRD, RRF */
4988 case 0xb9: /* RRE, RRF */
4989 case 0xe5: /* SSE, SIL */
4990 op2
= (insn
<< 8) >> 56;
4994 case 0xc0: /* RIL */
4995 case 0xc2: /* RIL */
4996 case 0xc4: /* RIL */
4997 case 0xc6: /* RIL */
4998 case 0xc8: /* SSF */
4999 case 0xcc: /* RIL */
5000 op2
= (insn
<< 12) >> 60;
5002 case 0xd0 ... 0xdf: /* SS */
5008 case 0xee ... 0xf3: /* SS */
5009 case 0xf8 ... 0xfd: /* SS */
5013 op2
= (insn
<< 40) >> 56;
5017 memset(f
, 0, sizeof(*f
));
5021 /* Lookup the instruction. */
5022 info
= lookup_opc(op
<< 8 | op2
);
5024 /* If we found it, extract the operands. */
5026 DisasFormat fmt
= info
->fmt
;
5029 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5030 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5036 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5038 const DisasInsn
*insn
;
5039 ExitStatus ret
= NO_EXIT
;
5043 /* Search for the insn in the table. */
5044 insn
= extract_insn(env
, s
, &f
);
5046 /* Not found means unimplemented/illegal opcode. */
5048 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5050 gen_illegal_opcode(s
);
5051 return EXIT_NORETURN
;
5054 /* Check for insn specification exceptions. */
5056 int spec
= insn
->spec
, excp
= 0, r
;
5058 if (spec
& SPEC_r1_even
) {
5059 r
= get_field(&f
, r1
);
5061 excp
= PGM_SPECIFICATION
;
5064 if (spec
& SPEC_r2_even
) {
5065 r
= get_field(&f
, r2
);
5067 excp
= PGM_SPECIFICATION
;
5070 if (spec
& SPEC_r3_even
) {
5071 r
= get_field(&f
, r3
);
5073 excp
= PGM_SPECIFICATION
;
5076 if (spec
& SPEC_r1_f128
) {
5077 r
= get_field(&f
, r1
);
5079 excp
= PGM_SPECIFICATION
;
5082 if (spec
& SPEC_r2_f128
) {
5083 r
= get_field(&f
, r2
);
5085 excp
= PGM_SPECIFICATION
;
5089 gen_program_exception(s
, excp
);
5090 return EXIT_NORETURN
;
5094 /* Set up the strutures we use to communicate with the helpers. */
5097 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5098 TCGV_UNUSED_I64(o
.out
);
5099 TCGV_UNUSED_I64(o
.out2
);
5100 TCGV_UNUSED_I64(o
.in1
);
5101 TCGV_UNUSED_I64(o
.in2
);
5102 TCGV_UNUSED_I64(o
.addr1
);
5104 /* Implement the instruction. */
5105 if (insn
->help_in1
) {
5106 insn
->help_in1(s
, &f
, &o
);
5108 if (insn
->help_in2
) {
5109 insn
->help_in2(s
, &f
, &o
);
5111 if (insn
->help_prep
) {
5112 insn
->help_prep(s
, &f
, &o
);
5114 if (insn
->help_op
) {
5115 ret
= insn
->help_op(s
, &o
);
5117 if (insn
->help_wout
) {
5118 insn
->help_wout(s
, &f
, &o
);
5120 if (insn
->help_cout
) {
5121 insn
->help_cout(s
, &o
);
5124 /* Free any temporaries created by the helpers. */
5125 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5126 tcg_temp_free_i64(o
.out
);
5128 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5129 tcg_temp_free_i64(o
.out2
);
5131 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5132 tcg_temp_free_i64(o
.in1
);
5134 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5135 tcg_temp_free_i64(o
.in2
);
5137 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5138 tcg_temp_free_i64(o
.addr1
);
5141 /* Advance to the next instruction. */
5146 static inline void gen_intermediate_code_internal(S390CPU
*cpu
,
5147 TranslationBlock
*tb
,
5150 CPUState
*cs
= CPU(cpu
);
5151 CPUS390XState
*env
= &cpu
->env
;
5153 target_ulong pc_start
;
5154 uint64_t next_page_start
;
5156 int num_insns
, max_insns
;
5164 if (!(tb
->flags
& FLAG_MASK_64
)) {
5165 pc_start
&= 0x7fffffff;
5170 dc
.cc_op
= CC_OP_DYNAMIC
;
5171 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5173 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5176 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5177 if (max_insns
== 0) {
5178 max_insns
= CF_COUNT_MASK
;
5185 j
= tcg_op_buf_count();
5189 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5192 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
5193 gen_opc_cc_op
[lj
] = dc
.cc_op
;
5194 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
5195 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
5197 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5201 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
5202 tcg_gen_debug_insn_start(dc
.pc
);
5206 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
5207 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
5208 if (bp
->pc
== dc
.pc
) {
5209 status
= EXIT_PC_STALE
;
5215 if (status
== NO_EXIT
) {
5216 status
= translate_one(env
, &dc
);
5219 /* If we reach a page boundary, are single stepping,
5220 or exhaust instruction count, stop generation. */
5221 if (status
== NO_EXIT
5222 && (dc
.pc
>= next_page_start
5223 || tcg_op_buf_full()
5224 || num_insns
>= max_insns
5226 || cs
->singlestep_enabled
)) {
5227 status
= EXIT_PC_STALE
;
5229 } while (status
== NO_EXIT
);
5231 if (tb
->cflags
& CF_LAST_IO
) {
5240 update_psw_addr(&dc
);
5242 case EXIT_PC_UPDATED
:
5243 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5244 cc op type is in env */
5246 /* Exit the TB, either by raising a debug exception or by return. */
5248 gen_exception(EXCP_DEBUG
);
5257 gen_tb_end(tb
, num_insns
);
5260 j
= tcg_op_buf_count();
5263 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5266 tb
->size
= dc
.pc
- pc_start
;
5267 tb
->icount
= num_insns
;
5270 #if defined(S390X_DEBUG_DISAS)
5271 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5272 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5273 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
5279 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
5281 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, false);
5284 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
5286 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, true);
5289 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
5292 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
5293 cc_op
= gen_opc_cc_op
[pc_pos
];
5294 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {