4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg16(int reg
, TCGv_i32 v
)
278 /* 16 bit register writes keep the upper bytes */
279 #if HOST_LONG_BITS == 32
280 tcg_gen_deposit_i32(TCGV_LOW(regs
[reg
]), TCGV_LOW(regs
[reg
]), v
, 0, 16);
282 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
283 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 16);
287 static inline void store_freg32(int reg
, TCGv_i32 v
)
289 /* 32 bit register writes keep the lower half */
290 #if HOST_LONG_BITS == 32
291 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
293 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
294 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
298 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
300 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
303 static inline void return_low128(TCGv_i64 dest
)
305 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
308 static inline void update_psw_addr(DisasContext
*s
)
311 tcg_gen_movi_i64(psw_addr
, s
->pc
);
314 static inline void potential_page_fault(DisasContext
*s
)
316 #ifndef CONFIG_USER_ONLY
322 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
324 return (uint64_t)cpu_lduw_code(env
, pc
);
327 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
329 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
332 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
334 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
337 static inline int get_mem_index(DisasContext
*s
)
339 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
340 case PSW_ASC_PRIMARY
>> 32:
342 case PSW_ASC_SECONDARY
>> 32:
344 case PSW_ASC_HOME
>> 32:
352 static void gen_exception(int excp
)
354 TCGv_i32 tmp
= tcg_const_i32(excp
);
355 gen_helper_exception(cpu_env
, tmp
);
356 tcg_temp_free_i32(tmp
);
359 static void gen_program_exception(DisasContext
*s
, int code
)
363 /* Remember what pgm exeption this was. */
364 tmp
= tcg_const_i32(code
);
365 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
366 tcg_temp_free_i32(tmp
);
368 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
369 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
370 tcg_temp_free_i32(tmp
);
372 /* Advance past instruction. */
379 /* Trigger exception. */
380 gen_exception(EXCP_PGM
);
383 s
->is_jmp
= DISAS_EXCP
;
386 static inline void gen_illegal_opcode(DisasContext
*s
)
388 gen_program_exception(s
, PGM_SPECIFICATION
);
391 static inline void check_privileged(DisasContext
*s
)
393 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
394 gen_program_exception(s
, PGM_PRIVILEGED
);
398 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
402 /* 31-bitify the immediate part; register contents are dealt with below */
403 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
409 tmp
= tcg_const_i64(d2
);
410 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
415 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
419 tmp
= tcg_const_i64(d2
);
420 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
425 tmp
= tcg_const_i64(d2
);
428 /* 31-bit mode mask if there are values loaded from registers */
429 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
430 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
436 static void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
438 s
->cc_op
= CC_OP_CONST0
+ val
;
441 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
443 tcg_gen_discard_i64(cc_src
);
444 tcg_gen_mov_i64(cc_dst
, dst
);
445 tcg_gen_discard_i64(cc_vr
);
449 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
451 tcg_gen_discard_i64(cc_src
);
452 tcg_gen_extu_i32_i64(cc_dst
, dst
);
453 tcg_gen_discard_i64(cc_vr
);
457 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
460 tcg_gen_mov_i64(cc_src
, src
);
461 tcg_gen_mov_i64(cc_dst
, dst
);
462 tcg_gen_discard_i64(cc_vr
);
466 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
469 tcg_gen_extu_i32_i64(cc_src
, src
);
470 tcg_gen_extu_i32_i64(cc_dst
, dst
);
471 tcg_gen_discard_i64(cc_vr
);
475 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
476 TCGv_i64 dst
, TCGv_i64 vr
)
478 tcg_gen_mov_i64(cc_src
, src
);
479 tcg_gen_mov_i64(cc_dst
, dst
);
480 tcg_gen_mov_i64(cc_vr
, vr
);
484 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
486 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
489 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
491 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
494 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
497 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
500 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
503 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
506 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
508 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
511 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
513 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
516 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
518 /* XXX optimize for the constant? put it in s? */
519 TCGv_i32 tmp
= tcg_const_i32(v2
);
520 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
521 tcg_temp_free_i32(tmp
);
524 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
526 TCGv_i32 tmp
= tcg_const_i32(v2
);
527 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
528 tcg_temp_free_i32(tmp
);
531 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
533 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
536 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
538 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
541 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
543 TCGv_i64 tmp
= tcg_const_i64(v2
);
545 tcg_temp_free_i64(tmp
);
548 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
550 TCGv_i64 tmp
= tcg_const_i64(v2
);
552 tcg_temp_free_i64(tmp
);
555 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
557 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
560 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
562 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
565 static void set_cc_cmp_f32_i64(DisasContext
*s
, TCGv_i32 v1
, TCGv_i64 v2
)
567 tcg_gen_extu_i32_i64(cc_src
, v1
);
568 tcg_gen_mov_i64(cc_dst
, v2
);
569 tcg_gen_discard_i64(cc_vr
);
570 s
->cc_op
= CC_OP_LTGT_F32
;
573 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i32 v1
)
575 gen_op_update1_cc_i32(s
, CC_OP_NZ_F32
, v1
);
578 /* CC value is in env->cc_op */
579 static inline void set_cc_static(DisasContext
*s
)
581 tcg_gen_discard_i64(cc_src
);
582 tcg_gen_discard_i64(cc_dst
);
583 tcg_gen_discard_i64(cc_vr
);
584 s
->cc_op
= CC_OP_STATIC
;
587 static inline void gen_op_set_cc_op(DisasContext
*s
)
589 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
590 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
594 static inline void gen_update_cc_op(DisasContext
*s
)
599 /* calculates cc into cc_op */
600 static void gen_op_calc_cc(DisasContext
*s
)
602 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
603 TCGv_i64 dummy
= tcg_const_i64(0);
610 /* s->cc_op is the cc value */
611 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
614 /* env->cc_op already is the cc value */
628 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
633 case CC_OP_LTUGTU_32
:
634 case CC_OP_LTUGTU_64
:
641 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
656 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
659 /* unknown operation - assume 3 arguments and cc_op in env */
660 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
666 tcg_temp_free_i32(local_cc_op
);
667 tcg_temp_free_i64(dummy
);
669 /* We now have cc in cc_op as constant */
673 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
677 *r1
= (insn
>> 4) & 0xf;
681 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
682 int *x2
, int *b2
, int *d2
)
686 *r1
= (insn
>> 20) & 0xf;
687 *x2
= (insn
>> 16) & 0xf;
688 *b2
= (insn
>> 12) & 0xf;
691 return get_address(s
, *x2
, *b2
, *d2
);
694 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
699 *r1
= (insn
>> 20) & 0xf;
701 *r3
= (insn
>> 16) & 0xf;
702 *b2
= (insn
>> 12) & 0xf;
706 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
711 *i2
= (insn
>> 16) & 0xff;
712 *b1
= (insn
>> 12) & 0xf;
715 return get_address(s
, 0, *b1
, *d1
);
718 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
720 /* NOTE: we handle the case where the TB spans two pages here */
721 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
722 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
723 && !s
->singlestep_enabled
724 && !(s
->tb
->cflags
& CF_LAST_IO
));
727 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
731 if (use_goto_tb(s
, pc
)) {
732 tcg_gen_goto_tb(tb_num
);
733 tcg_gen_movi_i64(psw_addr
, pc
);
734 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
736 /* jump to another page: currently not optimized */
737 tcg_gen_movi_i64(psw_addr
, pc
);
742 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
744 #ifdef DEBUG_INLINE_BRANCHES
745 inline_branch_miss
[cc_op
]++;
749 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
751 #ifdef DEBUG_INLINE_BRANCHES
752 inline_branch_hit
[cc_op
]++;
756 /* Table of mask values to comparison codes, given a comparison as input.
757 For a true comparison CC=3 will never be set, but we treat this
758 conservatively for possible use when CC=3 indicates overflow. */
759 static const TCGCond ltgt_cond
[16] = {
760 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
761 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
762 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
763 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
764 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
765 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
766 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
767 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
770 /* Table of mask values to comparison codes, given a logic op as input.
771 For such, only CC=0 and CC=1 should be possible. */
772 static const TCGCond nz_cond
[16] = {
774 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
776 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
778 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
779 /* EQ | NE | x | x */
780 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
783 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
784 details required to generate a TCG comparison. */
785 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
788 enum cc_op old_cc_op
= s
->cc_op
;
790 if (mask
== 15 || mask
== 0) {
791 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
794 c
->g1
= c
->g2
= true;
799 /* Find the TCG condition for the mask + cc op. */
805 cond
= ltgt_cond
[mask
];
806 if (cond
== TCG_COND_NEVER
) {
809 account_inline_branch(s
, old_cc_op
);
812 case CC_OP_LTUGTU_32
:
813 case CC_OP_LTUGTU_64
:
814 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
815 if (cond
== TCG_COND_NEVER
) {
818 account_inline_branch(s
, old_cc_op
);
822 cond
= nz_cond
[mask
];
823 if (cond
== TCG_COND_NEVER
) {
826 account_inline_branch(s
, old_cc_op
);
841 account_inline_branch(s
, old_cc_op
);
856 account_inline_branch(s
, old_cc_op
);
861 /* Calculate cc value. */
866 /* Jump based on CC. We'll load up the real cond below;
867 the assignment here merely avoids a compiler warning. */
868 account_noninline_branch(s
, old_cc_op
);
869 old_cc_op
= CC_OP_STATIC
;
870 cond
= TCG_COND_NEVER
;
874 /* Load up the arguments of the comparison. */
876 c
->g1
= c
->g2
= false;
880 c
->u
.s32
.a
= tcg_temp_new_i32();
881 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
882 c
->u
.s32
.b
= tcg_const_i32(0);
885 case CC_OP_LTUGTU_32
:
887 c
->u
.s32
.a
= tcg_temp_new_i32();
888 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
889 c
->u
.s32
.b
= tcg_temp_new_i32();
890 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
896 c
->u
.s64
.b
= tcg_const_i64(0);
900 case CC_OP_LTUGTU_64
:
903 c
->g1
= c
->g2
= true;
909 c
->u
.s64
.a
= tcg_temp_new_i64();
910 c
->u
.s64
.b
= tcg_const_i64(0);
911 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
919 case 0x8 | 0x4 | 0x2: /* cc != 3 */
921 c
->u
.s32
.b
= tcg_const_i32(3);
923 case 0x8 | 0x4 | 0x1: /* cc != 2 */
925 c
->u
.s32
.b
= tcg_const_i32(2);
927 case 0x8 | 0x2 | 0x1: /* cc != 1 */
929 c
->u
.s32
.b
= tcg_const_i32(1);
931 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
934 c
->u
.s32
.a
= tcg_temp_new_i32();
935 c
->u
.s32
.b
= tcg_const_i32(0);
936 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
938 case 0x8 | 0x4: /* cc < 2 */
940 c
->u
.s32
.b
= tcg_const_i32(2);
942 case 0x8: /* cc == 0 */
944 c
->u
.s32
.b
= tcg_const_i32(0);
946 case 0x4 | 0x2 | 0x1: /* cc != 0 */
948 c
->u
.s32
.b
= tcg_const_i32(0);
950 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
953 c
->u
.s32
.a
= tcg_temp_new_i32();
954 c
->u
.s32
.b
= tcg_const_i32(0);
955 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
957 case 0x4: /* cc == 1 */
959 c
->u
.s32
.b
= tcg_const_i32(1);
961 case 0x2 | 0x1: /* cc > 1 */
963 c
->u
.s32
.b
= tcg_const_i32(1);
965 case 0x2: /* cc == 2 */
967 c
->u
.s32
.b
= tcg_const_i32(2);
969 case 0x1: /* cc == 3 */
971 c
->u
.s32
.b
= tcg_const_i32(3);
974 /* CC is masked by something else: (8 >> cc) & mask. */
977 c
->u
.s32
.a
= tcg_const_i32(8);
978 c
->u
.s32
.b
= tcg_const_i32(0);
979 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
980 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
991 static void free_compare(DisasCompare
*c
)
995 tcg_temp_free_i64(c
->u
.s64
.a
);
997 tcg_temp_free_i32(c
->u
.s32
.a
);
1002 tcg_temp_free_i64(c
->u
.s64
.b
);
1004 tcg_temp_free_i32(c
->u
.s32
.b
);
1009 static void gen_op_mvc(DisasContext
*s
, int l
, TCGv_i64 s1
, TCGv_i64 s2
)
1013 int l_memset
= gen_new_label();
1014 int l_out
= gen_new_label();
1015 TCGv_i64 dest
= tcg_temp_local_new_i64();
1016 TCGv_i64 src
= tcg_temp_local_new_i64();
1019 /* Find out if we should use the inline version of mvc */
1034 /* Fall back to helper */
1035 vl
= tcg_const_i32(l
);
1036 potential_page_fault(s
);
1037 gen_helper_mvc(cpu_env
, vl
, s1
, s2
);
1038 tcg_temp_free_i32(vl
);
1042 tcg_gen_mov_i64(dest
, s1
);
1043 tcg_gen_mov_i64(src
, s2
);
1045 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
1046 /* XXX what if we overflow while moving? */
1047 tcg_gen_andi_i64(dest
, dest
, 0x7fffffffUL
);
1048 tcg_gen_andi_i64(src
, src
, 0x7fffffffUL
);
1051 tmp
= tcg_temp_new_i64();
1052 tcg_gen_addi_i64(tmp
, src
, 1);
1053 tcg_gen_brcond_i64(TCG_COND_EQ
, dest
, tmp
, l_memset
);
1054 tcg_temp_free_i64(tmp
);
1058 tmp
= tcg_temp_new_i64();
1060 tcg_gen_qemu_ld8u(tmp
, src
, get_mem_index(s
));
1061 tcg_gen_qemu_st8(tmp
, dest
, get_mem_index(s
));
1063 tcg_temp_free_i64(tmp
);
1066 tmp
= tcg_temp_new_i64();
1068 tcg_gen_qemu_ld16u(tmp
, src
, get_mem_index(s
));
1069 tcg_gen_qemu_st16(tmp
, dest
, get_mem_index(s
));
1071 tcg_temp_free_i64(tmp
);
1074 tmp
= tcg_temp_new_i64();
1076 tcg_gen_qemu_ld32u(tmp
, src
, get_mem_index(s
));
1077 tcg_gen_qemu_st32(tmp
, dest
, get_mem_index(s
));
1079 tcg_temp_free_i64(tmp
);
1082 tmp
= tcg_temp_new_i64();
1083 tmp2
= tcg_temp_new_i64();
1085 tcg_gen_qemu_ld32u(tmp
, src
, get_mem_index(s
));
1086 tcg_gen_addi_i64(src
, src
, 4);
1087 tcg_gen_qemu_ld8u(tmp2
, src
, get_mem_index(s
));
1088 tcg_gen_qemu_st32(tmp
, dest
, get_mem_index(s
));
1089 tcg_gen_addi_i64(dest
, dest
, 4);
1090 tcg_gen_qemu_st8(tmp2
, dest
, get_mem_index(s
));
1092 tcg_temp_free_i64(tmp
);
1093 tcg_temp_free_i64(tmp2
);
1096 tmp
= tcg_temp_new_i64();
1098 tcg_gen_qemu_ld64(tmp
, src
, get_mem_index(s
));
1099 tcg_gen_qemu_st64(tmp
, dest
, get_mem_index(s
));
1101 tcg_temp_free_i64(tmp
);
1104 /* The inline version can become too big for too uneven numbers, only
1105 use it on known good lengths */
1106 tmp
= tcg_temp_new_i64();
1107 tmp2
= tcg_const_i64(8);
1108 for (i
= 0; (i
+ 7) <= l
; i
+= 8) {
1109 tcg_gen_qemu_ld64(tmp
, src
, get_mem_index(s
));
1110 tcg_gen_qemu_st64(tmp
, dest
, get_mem_index(s
));
1112 tcg_gen_add_i64(src
, src
, tmp2
);
1113 tcg_gen_add_i64(dest
, dest
, tmp2
);
1116 tcg_temp_free_i64(tmp2
);
1117 tmp2
= tcg_const_i64(1);
1119 for (; i
<= l
; i
++) {
1120 tcg_gen_qemu_ld8u(tmp
, src
, get_mem_index(s
));
1121 tcg_gen_qemu_st8(tmp
, dest
, get_mem_index(s
));
1123 tcg_gen_add_i64(src
, src
, tmp2
);
1124 tcg_gen_add_i64(dest
, dest
, tmp2
);
1127 tcg_temp_free_i64(tmp2
);
1128 tcg_temp_free_i64(tmp
);
1134 gen_set_label(l_memset
);
1135 /* memset case (dest == (src + 1)) */
1137 tmp
= tcg_temp_new_i64();
1138 tmp2
= tcg_temp_new_i64();
1139 /* fill tmp with the byte */
1140 tcg_gen_qemu_ld8u(tmp
, src
, get_mem_index(s
));
1141 tcg_gen_shli_i64(tmp2
, tmp
, 8);
1142 tcg_gen_or_i64(tmp
, tmp
, tmp2
);
1143 tcg_gen_shli_i64(tmp2
, tmp
, 16);
1144 tcg_gen_or_i64(tmp
, tmp
, tmp2
);
1145 tcg_gen_shli_i64(tmp2
, tmp
, 32);
1146 tcg_gen_or_i64(tmp
, tmp
, tmp2
);
1147 tcg_temp_free_i64(tmp2
);
1149 tmp2
= tcg_const_i64(8);
1151 for (i
= 0; (i
+ 7) <= l
; i
+= 8) {
1152 tcg_gen_qemu_st64(tmp
, dest
, get_mem_index(s
));
1153 tcg_gen_addi_i64(dest
, dest
, 8);
1156 tcg_temp_free_i64(tmp2
);
1157 tmp2
= tcg_const_i64(1);
1159 for (; i
<= l
; i
++) {
1160 tcg_gen_qemu_st8(tmp
, dest
, get_mem_index(s
));
1161 tcg_gen_addi_i64(dest
, dest
, 1);
1164 tcg_temp_free_i64(tmp2
);
1165 tcg_temp_free_i64(tmp
);
1167 gen_set_label(l_out
);
1169 tcg_temp_free(dest
);
1173 static void gen_op_clc(DisasContext
*s
, int l
, TCGv_i64 s1
, TCGv_i64 s2
)
1179 /* check for simple 32bit or 64bit match */
1182 tmp
= tcg_temp_new_i64();
1183 tmp2
= tcg_temp_new_i64();
1185 tcg_gen_qemu_ld8u(tmp
, s1
, get_mem_index(s
));
1186 tcg_gen_qemu_ld8u(tmp2
, s2
, get_mem_index(s
));
1187 cmp_u64(s
, tmp
, tmp2
);
1189 tcg_temp_free_i64(tmp
);
1190 tcg_temp_free_i64(tmp2
);
1193 tmp
= tcg_temp_new_i64();
1194 tmp2
= tcg_temp_new_i64();
1196 tcg_gen_qemu_ld16u(tmp
, s1
, get_mem_index(s
));
1197 tcg_gen_qemu_ld16u(tmp2
, s2
, get_mem_index(s
));
1198 cmp_u64(s
, tmp
, tmp2
);
1200 tcg_temp_free_i64(tmp
);
1201 tcg_temp_free_i64(tmp2
);
1204 tmp
= tcg_temp_new_i64();
1205 tmp2
= tcg_temp_new_i64();
1207 tcg_gen_qemu_ld32u(tmp
, s1
, get_mem_index(s
));
1208 tcg_gen_qemu_ld32u(tmp2
, s2
, get_mem_index(s
));
1209 cmp_u64(s
, tmp
, tmp2
);
1211 tcg_temp_free_i64(tmp
);
1212 tcg_temp_free_i64(tmp2
);
1215 tmp
= tcg_temp_new_i64();
1216 tmp2
= tcg_temp_new_i64();
1218 tcg_gen_qemu_ld64(tmp
, s1
, get_mem_index(s
));
1219 tcg_gen_qemu_ld64(tmp2
, s2
, get_mem_index(s
));
1220 cmp_u64(s
, tmp
, tmp2
);
1222 tcg_temp_free_i64(tmp
);
1223 tcg_temp_free_i64(tmp2
);
1227 potential_page_fault(s
);
1228 vl
= tcg_const_i32(l
);
1229 gen_helper_clc(cc_op
, cpu_env
, vl
, s1
, s2
);
1230 tcg_temp_free_i32(vl
);
1234 static void disas_e3(CPUS390XState
*env
, DisasContext
* s
, int op
, int r1
,
1235 int x2
, int b2
, int d2
)
1237 TCGv_i64 addr
, tmp2
;
1240 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1241 op
, r1
, x2
, b2
, d2
);
1242 addr
= get_address(s
, x2
, b2
, d2
);
1244 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1245 tmp2
= tcg_temp_new_i64();
1246 tcg_gen_qemu_ld64(tmp2
, addr
, get_mem_index(s
));
1247 tcg_gen_bswap64_i64(tmp2
, tmp2
);
1248 store_reg(r1
, tmp2
);
1249 tcg_temp_free_i64(tmp2
);
1251 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1252 tmp2
= tcg_temp_new_i64();
1253 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1254 tcg_gen_andi_i64(tmp2
, tmp2
, 0x7fffffffULL
);
1255 store_reg(r1
, tmp2
);
1256 tcg_temp_free_i64(tmp2
);
1258 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1259 tmp2
= tcg_temp_new_i64();
1260 tmp32_1
= tcg_temp_new_i32();
1261 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1262 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1263 tcg_temp_free_i64(tmp2
);
1264 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1265 store_reg32(r1
, tmp32_1
);
1266 tcg_temp_free_i32(tmp32_1
);
1268 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1269 tmp2
= tcg_temp_new_i64();
1270 tmp32_1
= tcg_temp_new_i32();
1271 tcg_gen_qemu_ld16u(tmp2
, addr
, get_mem_index(s
));
1272 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1273 tcg_temp_free_i64(tmp2
);
1274 tcg_gen_bswap16_i32(tmp32_1
, tmp32_1
);
1275 store_reg16(r1
, tmp32_1
);
1276 tcg_temp_free_i32(tmp32_1
);
1278 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1279 tmp32_1
= load_reg32(r1
);
1280 tmp2
= tcg_temp_new_i64();
1281 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1282 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1283 tcg_temp_free_i32(tmp32_1
);
1284 tcg_gen_qemu_st32(tmp2
, addr
, get_mem_index(s
));
1285 tcg_temp_free_i64(tmp2
);
1288 LOG_DISAS("illegal e3 operation 0x%x\n", op
);
1289 gen_illegal_opcode(s
);
1292 tcg_temp_free_i64(addr
);
1295 #ifndef CONFIG_USER_ONLY
1296 static void disas_e5(CPUS390XState
*env
, DisasContext
* s
, uint64_t insn
)
1299 int op
= (insn
>> 32) & 0xff;
1301 tmp
= get_address(s
, 0, (insn
>> 28) & 0xf, (insn
>> 16) & 0xfff);
1302 tmp2
= get_address(s
, 0, (insn
>> 12) & 0xf, insn
& 0xfff);
1304 LOG_DISAS("disas_e5: insn %" PRIx64
"\n", insn
);
1306 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1307 /* Test Protection */
1308 potential_page_fault(s
);
1309 gen_helper_tprot(cc_op
, tmp
, tmp2
);
1313 LOG_DISAS("illegal e5 operation 0x%x\n", op
);
1314 gen_illegal_opcode(s
);
1318 tcg_temp_free_i64(tmp
);
1319 tcg_temp_free_i64(tmp2
);
1323 static void disas_eb(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1324 int r3
, int b2
, int d2
)
1326 TCGv_i64 tmp
, tmp2
, tmp3
, tmp4
;
1327 TCGv_i32 tmp32_1
, tmp32_2
;
1330 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1331 op
, r1
, r3
, b2
, d2
);
1333 case 0xc: /* SRLG R1,R3,D2(B2) [RSY] */
1334 case 0xd: /* SLLG R1,R3,D2(B2) [RSY] */
1335 case 0xa: /* SRAG R1,R3,D2(B2) [RSY] */
1336 case 0xb: /* SLAG R1,R3,D2(B2) [RSY] */
1337 case 0x1c: /* RLLG R1,R3,D2(B2) [RSY] */
1339 tmp
= get_address(s
, 0, b2
, d2
);
1340 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
1342 tmp
= tcg_const_i64(d2
& 0x3f);
1346 tcg_gen_shr_i64(regs
[r1
], regs
[r3
], tmp
);
1349 tcg_gen_shl_i64(regs
[r1
], regs
[r3
], tmp
);
1352 tcg_gen_sar_i64(regs
[r1
], regs
[r3
], tmp
);
1355 tmp2
= tcg_temp_new_i64();
1356 tmp3
= tcg_temp_new_i64();
1357 gen_op_update2_cc_i64(s
, CC_OP_SLAG
, regs
[r3
], tmp
);
1358 tcg_gen_shl_i64(tmp2
, regs
[r3
], tmp
);
1359 /* override sign bit with source sign */
1360 tcg_gen_andi_i64(tmp2
, tmp2
, ~0x8000000000000000ULL
);
1361 tcg_gen_andi_i64(tmp3
, regs
[r3
], 0x8000000000000000ULL
);
1362 tcg_gen_or_i64(regs
[r1
], tmp2
, tmp3
);
1363 tcg_temp_free_i64(tmp2
);
1364 tcg_temp_free_i64(tmp3
);
1367 tcg_gen_rotl_i64(regs
[r1
], regs
[r3
], tmp
);
1374 set_cc_s64(s
, regs
[r1
]);
1376 tcg_temp_free_i64(tmp
);
1378 case 0x1d: /* RLL R1,R3,D2(B2) [RSY] */
1380 tmp
= get_address(s
, 0, b2
, d2
);
1381 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
1383 tmp
= tcg_const_i64(d2
& 0x3f);
1385 tmp32_1
= tcg_temp_new_i32();
1386 tmp32_2
= load_reg32(r3
);
1387 tcg_gen_trunc_i64_i32(tmp32_1
, tmp
);
1390 tcg_gen_rotl_i32(tmp32_1
, tmp32_2
, tmp32_1
);
1396 store_reg32(r1
, tmp32_1
);
1397 tcg_temp_free_i64(tmp
);
1398 tcg_temp_free_i32(tmp32_1
);
1399 tcg_temp_free_i32(tmp32_2
);
1401 case 0x4: /* LMG R1,R3,D2(B2) [RSE] */
1402 case 0x24: /* STMG R1,R3,D2(B2) [RSE] */
1405 case 0x26: /* STMH R1,R3,D2(B2) [RSE] */
1406 case 0x96: /* LMH R1,R3,D2(B2) [RSE] */
1409 /* Apparently, unrolling lmg/stmg of any size gains performance -
1410 even for very long ones... */
1411 tmp
= get_address(s
, 0, b2
, d2
);
1412 tmp3
= tcg_const_i64(stm_len
);
1413 tmp4
= tcg_const_i64(op
== 0x26 ? 32 : 4);
1414 for (i
= r1
;; i
= (i
+ 1) % 16) {
1417 tcg_gen_qemu_ld64(regs
[i
], tmp
, get_mem_index(s
));
1420 tmp2
= tcg_temp_new_i64();
1421 #if HOST_LONG_BITS == 32
1422 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1423 tcg_gen_trunc_i64_i32(TCGV_HIGH(regs
[i
]), tmp2
);
1425 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1426 tcg_gen_shl_i64(tmp2
, tmp2
, tmp4
);
1427 tcg_gen_ext32u_i64(regs
[i
], regs
[i
]);
1428 tcg_gen_or_i64(regs
[i
], regs
[i
], tmp2
);
1430 tcg_temp_free_i64(tmp2
);
1433 tcg_gen_qemu_st64(regs
[i
], tmp
, get_mem_index(s
));
1436 tmp2
= tcg_temp_new_i64();
1437 tcg_gen_shr_i64(tmp2
, regs
[i
], tmp4
);
1438 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1439 tcg_temp_free_i64(tmp2
);
1447 tcg_gen_add_i64(tmp
, tmp
, tmp3
);
1449 tcg_temp_free_i64(tmp
);
1450 tcg_temp_free_i64(tmp3
);
1451 tcg_temp_free_i64(tmp4
);
1453 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1454 tmp
= get_address(s
, 0, b2
, d2
);
1455 tmp32_1
= tcg_const_i32(r1
);
1456 tmp32_2
= tcg_const_i32(r3
);
1457 potential_page_fault(s
);
1458 gen_helper_stcmh(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1459 tcg_temp_free_i64(tmp
);
1460 tcg_temp_free_i32(tmp32_1
);
1461 tcg_temp_free_i32(tmp32_2
);
1463 #ifndef CONFIG_USER_ONLY
1464 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1466 check_privileged(s
);
1467 tmp
= get_address(s
, 0, b2
, d2
);
1468 tmp32_1
= tcg_const_i32(r1
);
1469 tmp32_2
= tcg_const_i32(r3
);
1470 potential_page_fault(s
);
1471 gen_helper_lctlg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1472 tcg_temp_free_i64(tmp
);
1473 tcg_temp_free_i32(tmp32_1
);
1474 tcg_temp_free_i32(tmp32_2
);
1476 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1478 check_privileged(s
);
1479 tmp
= get_address(s
, 0, b2
, d2
);
1480 tmp32_1
= tcg_const_i32(r1
);
1481 tmp32_2
= tcg_const_i32(r3
);
1482 potential_page_fault(s
);
1483 gen_helper_stctg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1484 tcg_temp_free_i64(tmp
);
1485 tcg_temp_free_i32(tmp32_1
);
1486 tcg_temp_free_i32(tmp32_2
);
1489 case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
1490 tmp
= get_address(s
, 0, b2
, d2
);
1491 tmp32_1
= tcg_const_i32(r1
);
1492 tmp32_2
= tcg_const_i32(r3
);
1493 potential_page_fault(s
);
1494 /* XXX rewrite in tcg */
1495 gen_helper_csg(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1497 tcg_temp_free_i64(tmp
);
1498 tcg_temp_free_i32(tmp32_1
);
1499 tcg_temp_free_i32(tmp32_2
);
1501 case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
1502 tmp
= get_address(s
, 0, b2
, d2
);
1503 tmp32_1
= tcg_const_i32(r1
);
1504 tmp32_2
= tcg_const_i32(r3
);
1505 potential_page_fault(s
);
1506 /* XXX rewrite in tcg */
1507 gen_helper_cdsg(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1509 tcg_temp_free_i64(tmp
);
1510 tcg_temp_free_i32(tmp32_1
);
1511 tcg_temp_free_i32(tmp32_2
);
1513 case 0x52: /* MVIY D1(B1),I2 [SIY] */
1514 tmp
= get_address(s
, 0, b2
, d2
); /* SIY -> this is the destination */
1515 tmp2
= tcg_const_i64((r1
<< 4) | r3
);
1516 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
1517 tcg_temp_free_i64(tmp
);
1518 tcg_temp_free_i64(tmp2
);
1521 LOG_DISAS("illegal eb operation 0x%x\n", op
);
1522 gen_illegal_opcode(s
);
1527 static void disas_ed(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1528 int x2
, int b2
, int d2
, int r1b
)
1530 TCGv_i32 tmp_r1
, tmp32
;
1532 addr
= get_address(s
, x2
, b2
, d2
);
1533 tmp_r1
= tcg_const_i32(r1
);
1535 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1536 potential_page_fault(s
);
1537 gen_helper_ldeb(cpu_env
, tmp_r1
, addr
);
1539 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1540 potential_page_fault(s
);
1541 gen_helper_lxdb(cpu_env
, tmp_r1
, addr
);
1543 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1544 tmp
= tcg_temp_new_i64();
1545 tmp32
= load_freg32(r1
);
1546 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1547 set_cc_cmp_f32_i64(s
, tmp32
, tmp
);
1548 tcg_temp_free_i64(tmp
);
1549 tcg_temp_free_i32(tmp32
);
1551 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1552 tmp
= tcg_temp_new_i64();
1553 tmp32
= tcg_temp_new_i32();
1554 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1555 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1556 gen_helper_aeb(cpu_env
, tmp_r1
, tmp32
);
1557 tcg_temp_free_i64(tmp
);
1558 tcg_temp_free_i32(tmp32
);
1560 tmp32
= load_freg32(r1
);
1561 gen_set_cc_nz_f32(s
, tmp32
);
1562 tcg_temp_free_i32(tmp32
);
1564 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1565 tmp
= tcg_temp_new_i64();
1566 tmp32
= tcg_temp_new_i32();
1567 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1568 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1569 gen_helper_seb(cpu_env
, tmp_r1
, tmp32
);
1570 tcg_temp_free_i64(tmp
);
1571 tcg_temp_free_i32(tmp32
);
1573 tmp32
= load_freg32(r1
);
1574 gen_set_cc_nz_f32(s
, tmp32
);
1575 tcg_temp_free_i32(tmp32
);
1577 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1578 tmp
= tcg_temp_new_i64();
1579 tmp32
= tcg_temp_new_i32();
1580 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1581 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1582 gen_helper_deb(cpu_env
, tmp_r1
, tmp32
);
1583 tcg_temp_free_i64(tmp
);
1584 tcg_temp_free_i32(tmp32
);
1586 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1587 potential_page_fault(s
);
1588 gen_helper_tceb(cc_op
, cpu_env
, tmp_r1
, addr
);
1591 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1592 potential_page_fault(s
);
1593 gen_helper_tcdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1596 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1597 potential_page_fault(s
);
1598 gen_helper_tcxb(cc_op
, cpu_env
, tmp_r1
, addr
);
1601 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1602 tmp
= tcg_temp_new_i64();
1603 tmp32
= tcg_temp_new_i32();
1604 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1605 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1606 gen_helper_meeb(cpu_env
, tmp_r1
, tmp32
);
1607 tcg_temp_free_i64(tmp
);
1608 tcg_temp_free_i32(tmp32
);
1610 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1611 potential_page_fault(s
);
1612 gen_helper_cdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1615 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1616 potential_page_fault(s
);
1617 gen_helper_adb(cc_op
, cpu_env
, tmp_r1
, addr
);
1620 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1621 potential_page_fault(s
);
1622 gen_helper_sdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1625 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1626 potential_page_fault(s
);
1627 gen_helper_mdb(cpu_env
, tmp_r1
, addr
);
1629 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1630 potential_page_fault(s
);
1631 gen_helper_ddb(cpu_env
, tmp_r1
, addr
);
1633 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1634 /* for RXF insns, r1 is R3 and r1b is R1 */
1635 tmp32
= tcg_const_i32(r1b
);
1636 potential_page_fault(s
);
1637 gen_helper_madb(cpu_env
, tmp32
, addr
, tmp_r1
);
1638 tcg_temp_free_i32(tmp32
);
1641 LOG_DISAS("illegal ed operation 0x%x\n", op
);
1642 gen_illegal_opcode(s
);
1645 tcg_temp_free_i32(tmp_r1
);
1646 tcg_temp_free_i64(addr
);
1649 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1652 TCGv_i64 tmp
, tmp2
, tmp3
;
1653 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1655 #ifndef CONFIG_USER_ONLY
1659 r1
= (insn
>> 4) & 0xf;
1662 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1665 case 0x22: /* IPM R1 [RRE] */
1666 tmp32_1
= tcg_const_i32(r1
);
1668 gen_helper_ipm(cpu_env
, cc_op
, tmp32_1
);
1669 tcg_temp_free_i32(tmp32_1
);
1671 case 0x41: /* CKSM R1,R2 [RRE] */
1672 tmp32_1
= tcg_const_i32(r1
);
1673 tmp32_2
= tcg_const_i32(r2
);
1674 potential_page_fault(s
);
1675 gen_helper_cksm(cpu_env
, tmp32_1
, tmp32_2
);
1676 tcg_temp_free_i32(tmp32_1
);
1677 tcg_temp_free_i32(tmp32_2
);
1678 gen_op_movi_cc(s
, 0);
1680 case 0x4e: /* SAR R1,R2 [RRE] */
1681 tmp32_1
= load_reg32(r2
);
1682 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
1683 tcg_temp_free_i32(tmp32_1
);
1685 case 0x4f: /* EAR R1,R2 [RRE] */
1686 tmp32_1
= tcg_temp_new_i32();
1687 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1688 store_reg32(r1
, tmp32_1
);
1689 tcg_temp_free_i32(tmp32_1
);
1691 case 0x54: /* MVPG R1,R2 [RRE] */
1693 tmp2
= load_reg(r1
);
1694 tmp3
= load_reg(r2
);
1695 potential_page_fault(s
);
1696 gen_helper_mvpg(cpu_env
, tmp
, tmp2
, tmp3
);
1697 tcg_temp_free_i64(tmp
);
1698 tcg_temp_free_i64(tmp2
);
1699 tcg_temp_free_i64(tmp3
);
1700 /* XXX check CCO bit and set CC accordingly */
1701 gen_op_movi_cc(s
, 0);
1703 case 0x55: /* MVST R1,R2 [RRE] */
1704 tmp32_1
= load_reg32(0);
1705 tmp32_2
= tcg_const_i32(r1
);
1706 tmp32_3
= tcg_const_i32(r2
);
1707 potential_page_fault(s
);
1708 gen_helper_mvst(cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1709 tcg_temp_free_i32(tmp32_1
);
1710 tcg_temp_free_i32(tmp32_2
);
1711 tcg_temp_free_i32(tmp32_3
);
1712 gen_op_movi_cc(s
, 1);
1714 case 0x5d: /* CLST R1,R2 [RRE] */
1715 tmp32_1
= load_reg32(0);
1716 tmp32_2
= tcg_const_i32(r1
);
1717 tmp32_3
= tcg_const_i32(r2
);
1718 potential_page_fault(s
);
1719 gen_helper_clst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1721 tcg_temp_free_i32(tmp32_1
);
1722 tcg_temp_free_i32(tmp32_2
);
1723 tcg_temp_free_i32(tmp32_3
);
1725 case 0x5e: /* SRST R1,R2 [RRE] */
1726 tmp32_1
= load_reg32(0);
1727 tmp32_2
= tcg_const_i32(r1
);
1728 tmp32_3
= tcg_const_i32(r2
);
1729 potential_page_fault(s
);
1730 gen_helper_srst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1732 tcg_temp_free_i32(tmp32_1
);
1733 tcg_temp_free_i32(tmp32_2
);
1734 tcg_temp_free_i32(tmp32_3
);
1737 #ifndef CONFIG_USER_ONLY
1738 case 0x02: /* STIDP D2(B2) [S] */
1740 check_privileged(s
);
1741 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1742 tmp
= get_address(s
, 0, b2
, d2
);
1743 potential_page_fault(s
);
1744 gen_helper_stidp(cpu_env
, tmp
);
1745 tcg_temp_free_i64(tmp
);
1747 case 0x04: /* SCK D2(B2) [S] */
1749 check_privileged(s
);
1750 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1751 tmp
= get_address(s
, 0, b2
, d2
);
1752 potential_page_fault(s
);
1753 gen_helper_sck(cc_op
, tmp
);
1755 tcg_temp_free_i64(tmp
);
1757 case 0x05: /* STCK D2(B2) [S] */
1759 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1760 tmp
= get_address(s
, 0, b2
, d2
);
1761 potential_page_fault(s
);
1762 gen_helper_stck(cc_op
, cpu_env
, tmp
);
1764 tcg_temp_free_i64(tmp
);
1766 case 0x06: /* SCKC D2(B2) [S] */
1767 /* Set Clock Comparator */
1768 check_privileged(s
);
1769 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1770 tmp
= get_address(s
, 0, b2
, d2
);
1771 potential_page_fault(s
);
1772 gen_helper_sckc(cpu_env
, tmp
);
1773 tcg_temp_free_i64(tmp
);
1775 case 0x07: /* STCKC D2(B2) [S] */
1776 /* Store Clock Comparator */
1777 check_privileged(s
);
1778 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1779 tmp
= get_address(s
, 0, b2
, d2
);
1780 potential_page_fault(s
);
1781 gen_helper_stckc(cpu_env
, tmp
);
1782 tcg_temp_free_i64(tmp
);
1784 case 0x08: /* SPT D2(B2) [S] */
1786 check_privileged(s
);
1787 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1788 tmp
= get_address(s
, 0, b2
, d2
);
1789 potential_page_fault(s
);
1790 gen_helper_spt(cpu_env
, tmp
);
1791 tcg_temp_free_i64(tmp
);
1793 case 0x09: /* STPT D2(B2) [S] */
1794 /* Store CPU Timer */
1795 check_privileged(s
);
1796 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1797 tmp
= get_address(s
, 0, b2
, d2
);
1798 potential_page_fault(s
);
1799 gen_helper_stpt(cpu_env
, tmp
);
1800 tcg_temp_free_i64(tmp
);
1802 case 0x0a: /* SPKA D2(B2) [S] */
1803 /* Set PSW Key from Address */
1804 check_privileged(s
);
1805 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1806 tmp
= get_address(s
, 0, b2
, d2
);
1807 tmp2
= tcg_temp_new_i64();
1808 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1809 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1810 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1811 tcg_temp_free_i64(tmp2
);
1812 tcg_temp_free_i64(tmp
);
1814 case 0x0d: /* PTLB [S] */
1816 check_privileged(s
);
1817 gen_helper_ptlb(cpu_env
);
1819 case 0x10: /* SPX D2(B2) [S] */
1820 /* Set Prefix Register */
1821 check_privileged(s
);
1822 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1823 tmp
= get_address(s
, 0, b2
, d2
);
1824 potential_page_fault(s
);
1825 gen_helper_spx(cpu_env
, tmp
);
1826 tcg_temp_free_i64(tmp
);
1828 case 0x11: /* STPX D2(B2) [S] */
1830 check_privileged(s
);
1831 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1832 tmp
= get_address(s
, 0, b2
, d2
);
1833 tmp2
= tcg_temp_new_i64();
1834 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1835 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1836 tcg_temp_free_i64(tmp
);
1837 tcg_temp_free_i64(tmp2
);
1839 case 0x12: /* STAP D2(B2) [S] */
1840 /* Store CPU Address */
1841 check_privileged(s
);
1842 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1843 tmp
= get_address(s
, 0, b2
, d2
);
1844 tmp2
= tcg_temp_new_i64();
1845 tmp32_1
= tcg_temp_new_i32();
1846 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1847 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1848 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1849 tcg_temp_free_i64(tmp
);
1850 tcg_temp_free_i64(tmp2
);
1851 tcg_temp_free_i32(tmp32_1
);
1853 case 0x21: /* IPTE R1,R2 [RRE] */
1854 /* Invalidate PTE */
1855 check_privileged(s
);
1856 r1
= (insn
>> 4) & 0xf;
1859 tmp2
= load_reg(r2
);
1860 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1861 tcg_temp_free_i64(tmp
);
1862 tcg_temp_free_i64(tmp2
);
1864 case 0x29: /* ISKE R1,R2 [RRE] */
1865 /* Insert Storage Key Extended */
1866 check_privileged(s
);
1867 r1
= (insn
>> 4) & 0xf;
1870 tmp2
= tcg_temp_new_i64();
1871 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1872 store_reg(r1
, tmp2
);
1873 tcg_temp_free_i64(tmp
);
1874 tcg_temp_free_i64(tmp2
);
1876 case 0x2a: /* RRBE R1,R2 [RRE] */
1877 /* Set Storage Key Extended */
1878 check_privileged(s
);
1879 r1
= (insn
>> 4) & 0xf;
1881 tmp32_1
= load_reg32(r1
);
1883 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1885 tcg_temp_free_i32(tmp32_1
);
1886 tcg_temp_free_i64(tmp
);
1888 case 0x2b: /* SSKE R1,R2 [RRE] */
1889 /* Set Storage Key Extended */
1890 check_privileged(s
);
1891 r1
= (insn
>> 4) & 0xf;
1893 tmp32_1
= load_reg32(r1
);
1895 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1896 tcg_temp_free_i32(tmp32_1
);
1897 tcg_temp_free_i64(tmp
);
1899 case 0x34: /* STCH ? */
1900 /* Store Subchannel */
1901 check_privileged(s
);
1902 gen_op_movi_cc(s
, 3);
1904 case 0x46: /* STURA R1,R2 [RRE] */
1905 /* Store Using Real Address */
1906 check_privileged(s
);
1907 r1
= (insn
>> 4) & 0xf;
1909 tmp32_1
= load_reg32(r1
);
1911 potential_page_fault(s
);
1912 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1913 tcg_temp_free_i32(tmp32_1
);
1914 tcg_temp_free_i64(tmp
);
1916 case 0x50: /* CSP R1,R2 [RRE] */
1917 /* Compare And Swap And Purge */
1918 check_privileged(s
);
1919 r1
= (insn
>> 4) & 0xf;
1921 tmp32_1
= tcg_const_i32(r1
);
1922 tmp32_2
= tcg_const_i32(r2
);
1923 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1925 tcg_temp_free_i32(tmp32_1
);
1926 tcg_temp_free_i32(tmp32_2
);
1928 case 0x5f: /* CHSC ? */
1929 /* Channel Subsystem Call */
1930 check_privileged(s
);
1931 gen_op_movi_cc(s
, 3);
1933 case 0x78: /* STCKE D2(B2) [S] */
1934 /* Store Clock Extended */
1935 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1936 tmp
= get_address(s
, 0, b2
, d2
);
1937 potential_page_fault(s
);
1938 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1940 tcg_temp_free_i64(tmp
);
1942 case 0x79: /* SACF D2(B2) [S] */
1943 /* Set Address Space Control Fast */
1944 check_privileged(s
);
1945 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1946 tmp
= get_address(s
, 0, b2
, d2
);
1947 potential_page_fault(s
);
1948 gen_helper_sacf(cpu_env
, tmp
);
1949 tcg_temp_free_i64(tmp
);
1950 /* addressing mode has changed, so end the block */
1953 s
->is_jmp
= DISAS_JUMP
;
1955 case 0x7d: /* STSI D2,(B2) [S] */
1956 check_privileged(s
);
1957 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1958 tmp
= get_address(s
, 0, b2
, d2
);
1959 tmp32_1
= load_reg32(0);
1960 tmp32_2
= load_reg32(1);
1961 potential_page_fault(s
);
1962 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1964 tcg_temp_free_i64(tmp
);
1965 tcg_temp_free_i32(tmp32_1
);
1966 tcg_temp_free_i32(tmp32_2
);
1968 case 0x9d: /* LFPC D2(B2) [S] */
1969 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1970 tmp
= get_address(s
, 0, b2
, d2
);
1971 tmp2
= tcg_temp_new_i64();
1972 tmp32_1
= tcg_temp_new_i32();
1973 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1974 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1975 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1976 tcg_temp_free_i64(tmp
);
1977 tcg_temp_free_i64(tmp2
);
1978 tcg_temp_free_i32(tmp32_1
);
1980 case 0xb1: /* STFL D2(B2) [S] */
1981 /* Store Facility List (CPU features) at 200 */
1982 check_privileged(s
);
1983 tmp2
= tcg_const_i64(0xc0000000);
1984 tmp
= tcg_const_i64(200);
1985 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1986 tcg_temp_free_i64(tmp2
);
1987 tcg_temp_free_i64(tmp
);
1989 case 0xb2: /* LPSWE D2(B2) [S] */
1990 /* Load PSW Extended */
1991 check_privileged(s
);
1992 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1993 tmp
= get_address(s
, 0, b2
, d2
);
1994 tmp2
= tcg_temp_new_i64();
1995 tmp3
= tcg_temp_new_i64();
1996 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1997 tcg_gen_addi_i64(tmp
, tmp
, 8);
1998 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1999 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
2000 /* we need to keep cc_op intact */
2001 s
->is_jmp
= DISAS_JUMP
;
2002 tcg_temp_free_i64(tmp
);
2003 tcg_temp_free_i64(tmp2
);
2004 tcg_temp_free_i64(tmp3
);
2006 case 0x20: /* SERVC R1,R2 [RRE] */
2007 /* SCLP Service call (PV hypercall) */
2008 check_privileged(s
);
2009 potential_page_fault(s
);
2010 tmp32_1
= load_reg32(r2
);
2012 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
2014 tcg_temp_free_i32(tmp32_1
);
2015 tcg_temp_free_i64(tmp
);
2019 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
2020 gen_illegal_opcode(s
);
2025 static void disas_b3(CPUS390XState
*env
, DisasContext
*s
, int op
, int m3
,
2029 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
2030 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op
, m3
, r1
, r2
);
2031 #define FP_HELPER(i) \
2032 tmp32_1 = tcg_const_i32(r1); \
2033 tmp32_2 = tcg_const_i32(r2); \
2034 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
2035 tcg_temp_free_i32(tmp32_1); \
2036 tcg_temp_free_i32(tmp32_2);
2038 #define FP_HELPER_CC(i) \
2039 tmp32_1 = tcg_const_i32(r1); \
2040 tmp32_2 = tcg_const_i32(r2); \
2041 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
2043 tcg_temp_free_i32(tmp32_1); \
2044 tcg_temp_free_i32(tmp32_2);
2047 case 0x0: /* LPEBR R1,R2 [RRE] */
2048 FP_HELPER_CC(lpebr
);
2050 case 0x2: /* LTEBR R1,R2 [RRE] */
2051 FP_HELPER_CC(ltebr
);
2053 case 0x3: /* LCEBR R1,R2 [RRE] */
2054 FP_HELPER_CC(lcebr
);
2056 case 0x4: /* LDEBR R1,R2 [RRE] */
2059 case 0x5: /* LXDBR R1,R2 [RRE] */
2062 case 0x9: /* CEBR R1,R2 [RRE] */
2065 case 0xa: /* AEBR R1,R2 [RRE] */
2068 case 0xb: /* SEBR R1,R2 [RRE] */
2071 case 0xd: /* DEBR R1,R2 [RRE] */
2074 case 0x10: /* LPDBR R1,R2 [RRE] */
2075 FP_HELPER_CC(lpdbr
);
2077 case 0x12: /* LTDBR R1,R2 [RRE] */
2078 FP_HELPER_CC(ltdbr
);
2080 case 0x13: /* LCDBR R1,R2 [RRE] */
2081 FP_HELPER_CC(lcdbr
);
2083 case 0x15: /* SQBDR R1,R2 [RRE] */
2086 case 0x17: /* MEEBR R1,R2 [RRE] */
2089 case 0x19: /* CDBR R1,R2 [RRE] */
2092 case 0x1a: /* ADBR R1,R2 [RRE] */
2095 case 0x1b: /* SDBR R1,R2 [RRE] */
2098 case 0x1c: /* MDBR R1,R2 [RRE] */
2101 case 0x1d: /* DDBR R1,R2 [RRE] */
2104 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
2105 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
2106 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
2107 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
2108 tmp32_1
= tcg_const_i32(m3
);
2109 tmp32_2
= tcg_const_i32(r2
);
2110 tmp32_3
= tcg_const_i32(r1
);
2113 gen_helper_maebr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
2116 gen_helper_madbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
2119 gen_helper_msdbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
2124 tcg_temp_free_i32(tmp32_1
);
2125 tcg_temp_free_i32(tmp32_2
);
2126 tcg_temp_free_i32(tmp32_3
);
2128 case 0x40: /* LPXBR R1,R2 [RRE] */
2129 FP_HELPER_CC(lpxbr
);
2131 case 0x42: /* LTXBR R1,R2 [RRE] */
2132 FP_HELPER_CC(ltxbr
);
2134 case 0x43: /* LCXBR R1,R2 [RRE] */
2135 FP_HELPER_CC(lcxbr
);
2137 case 0x44: /* LEDBR R1,R2 [RRE] */
2140 case 0x45: /* LDXBR R1,R2 [RRE] */
2143 case 0x46: /* LEXBR R1,R2 [RRE] */
2146 case 0x49: /* CXBR R1,R2 [RRE] */
2149 case 0x4a: /* AXBR R1,R2 [RRE] */
2152 case 0x4b: /* SXBR R1,R2 [RRE] */
2155 case 0x4c: /* MXBR R1,R2 [RRE] */
2158 case 0x4d: /* DXBR R1,R2 [RRE] */
2161 case 0x65: /* LXR R1,R2 [RRE] */
2162 tmp
= load_freg(r2
);
2163 store_freg(r1
, tmp
);
2164 tcg_temp_free_i64(tmp
);
2165 tmp
= load_freg(r2
+ 2);
2166 store_freg(r1
+ 2, tmp
);
2167 tcg_temp_free_i64(tmp
);
2169 case 0x74: /* LZER R1 [RRE] */
2170 tmp32_1
= tcg_const_i32(r1
);
2171 gen_helper_lzer(cpu_env
, tmp32_1
);
2172 tcg_temp_free_i32(tmp32_1
);
2174 case 0x75: /* LZDR R1 [RRE] */
2175 tmp32_1
= tcg_const_i32(r1
);
2176 gen_helper_lzdr(cpu_env
, tmp32_1
);
2177 tcg_temp_free_i32(tmp32_1
);
2179 case 0x76: /* LZXR R1 [RRE] */
2180 tmp32_1
= tcg_const_i32(r1
);
2181 gen_helper_lzxr(cpu_env
, tmp32_1
);
2182 tcg_temp_free_i32(tmp32_1
);
2184 case 0x84: /* SFPC R1 [RRE] */
2185 tmp32_1
= load_reg32(r1
);
2186 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2187 tcg_temp_free_i32(tmp32_1
);
2189 case 0x8c: /* EFPC R1 [RRE] */
2190 tmp32_1
= tcg_temp_new_i32();
2191 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2192 store_reg32(r1
, tmp32_1
);
2193 tcg_temp_free_i32(tmp32_1
);
2195 case 0x94: /* CEFBR R1,R2 [RRE] */
2196 case 0x95: /* CDFBR R1,R2 [RRE] */
2197 case 0x96: /* CXFBR R1,R2 [RRE] */
2198 tmp32_1
= tcg_const_i32(r1
);
2199 tmp32_2
= load_reg32(r2
);
2202 gen_helper_cefbr(cpu_env
, tmp32_1
, tmp32_2
);
2205 gen_helper_cdfbr(cpu_env
, tmp32_1
, tmp32_2
);
2208 gen_helper_cxfbr(cpu_env
, tmp32_1
, tmp32_2
);
2213 tcg_temp_free_i32(tmp32_1
);
2214 tcg_temp_free_i32(tmp32_2
);
2216 case 0x98: /* CFEBR R1,R2 [RRE] */
2217 case 0x99: /* CFDBR R1,R2 [RRE] */
2218 case 0x9a: /* CFXBR R1,R2 [RRE] */
2219 tmp32_1
= tcg_const_i32(r1
);
2220 tmp32_2
= tcg_const_i32(r2
);
2221 tmp32_3
= tcg_const_i32(m3
);
2224 gen_helper_cfebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2227 gen_helper_cfdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2230 gen_helper_cfxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2236 tcg_temp_free_i32(tmp32_1
);
2237 tcg_temp_free_i32(tmp32_2
);
2238 tcg_temp_free_i32(tmp32_3
);
2240 case 0xa4: /* CEGBR R1,R2 [RRE] */
2241 case 0xa5: /* CDGBR R1,R2 [RRE] */
2242 tmp32_1
= tcg_const_i32(r1
);
2246 gen_helper_cegbr(cpu_env
, tmp32_1
, tmp
);
2249 gen_helper_cdgbr(cpu_env
, tmp32_1
, tmp
);
2254 tcg_temp_free_i32(tmp32_1
);
2255 tcg_temp_free_i64(tmp
);
2257 case 0xa6: /* CXGBR R1,R2 [RRE] */
2258 tmp32_1
= tcg_const_i32(r1
);
2260 gen_helper_cxgbr(cpu_env
, tmp32_1
, tmp
);
2261 tcg_temp_free_i32(tmp32_1
);
2262 tcg_temp_free_i64(tmp
);
2264 case 0xa8: /* CGEBR R1,R2 [RRE] */
2265 tmp32_1
= tcg_const_i32(r1
);
2266 tmp32_2
= tcg_const_i32(r2
);
2267 tmp32_3
= tcg_const_i32(m3
);
2268 gen_helper_cgebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2270 tcg_temp_free_i32(tmp32_1
);
2271 tcg_temp_free_i32(tmp32_2
);
2272 tcg_temp_free_i32(tmp32_3
);
2274 case 0xa9: /* CGDBR R1,R2 [RRE] */
2275 tmp32_1
= tcg_const_i32(r1
);
2276 tmp32_2
= tcg_const_i32(r2
);
2277 tmp32_3
= tcg_const_i32(m3
);
2278 gen_helper_cgdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2280 tcg_temp_free_i32(tmp32_1
);
2281 tcg_temp_free_i32(tmp32_2
);
2282 tcg_temp_free_i32(tmp32_3
);
2284 case 0xaa: /* CGXBR R1,R2 [RRE] */
2285 tmp32_1
= tcg_const_i32(r1
);
2286 tmp32_2
= tcg_const_i32(r2
);
2287 tmp32_3
= tcg_const_i32(m3
);
2288 gen_helper_cgxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2290 tcg_temp_free_i32(tmp32_1
);
2291 tcg_temp_free_i32(tmp32_2
);
2292 tcg_temp_free_i32(tmp32_3
);
2295 LOG_DISAS("illegal b3 operation 0x%x\n", op
);
2296 gen_illegal_opcode(s
);
2304 static void disas_b9(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
2310 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
2312 case 0x17: /* LLGTR R1,R2 [RRE] */
2313 tmp32_1
= load_reg32(r2
);
2314 tmp
= tcg_temp_new_i64();
2315 tcg_gen_andi_i32(tmp32_1
, tmp32_1
, 0x7fffffffUL
);
2316 tcg_gen_extu_i32_i64(tmp
, tmp32_1
);
2318 tcg_temp_free_i32(tmp32_1
);
2319 tcg_temp_free_i64(tmp
);
2321 case 0x0f: /* LRVGR R1,R2 [RRE] */
2322 tcg_gen_bswap64_i64(regs
[r1
], regs
[r2
]);
2324 case 0x1f: /* LRVR R1,R2 [RRE] */
2325 tmp32_1
= load_reg32(r2
);
2326 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
2327 store_reg32(r1
, tmp32_1
);
2328 tcg_temp_free_i32(tmp32_1
);
2330 case 0x83: /* FLOGR R1,R2 [RRE] */
2332 tmp32_1
= tcg_const_i32(r1
);
2333 gen_helper_flogr(cc_op
, cpu_env
, tmp32_1
, tmp
);
2335 tcg_temp_free_i64(tmp
);
2336 tcg_temp_free_i32(tmp32_1
);
2339 LOG_DISAS("illegal b9 operation 0x%x\n", op
);
2340 gen_illegal_opcode(s
);
2345 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
2347 TCGv_i64 tmp
, tmp2
, tmp3
, tmp4
;
2348 TCGv_i32 tmp32_1
, tmp32_2
;
2351 int op
, r1
, r2
, r3
, d1
, d2
, x2
, b1
, b2
, i
, i2
, r1b
;
2354 opc
= cpu_ldub_code(env
, s
->pc
);
2355 LOG_DISAS("opc 0x%x\n", opc
);
2358 #ifndef CONFIG_USER_ONLY
2359 case 0x82: /* LPSW D2(B2) [S] */
2361 check_privileged(s
);
2362 insn
= ld_code4(env
, s
->pc
);
2363 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2364 tmp
= get_address(s
, 0, b2
, d2
);
2365 tmp2
= tcg_temp_new_i64();
2366 tmp3
= tcg_temp_new_i64();
2367 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
2368 tcg_gen_addi_i64(tmp
, tmp
, 4);
2369 tcg_gen_qemu_ld32u(tmp3
, tmp
, get_mem_index(s
));
2370 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2371 tcg_gen_shli_i64(tmp2
, tmp2
, 32);
2372 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
2373 tcg_temp_free_i64(tmp
);
2374 tcg_temp_free_i64(tmp2
);
2375 tcg_temp_free_i64(tmp3
);
2376 /* we need to keep cc_op intact */
2377 s
->is_jmp
= DISAS_JUMP
;
2379 case 0x83: /* DIAG R1,R3,D2 [RS] */
2380 /* Diagnose call (KVM hypercall) */
2381 check_privileged(s
);
2382 potential_page_fault(s
);
2383 insn
= ld_code4(env
, s
->pc
);
2384 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2385 tmp32_1
= tcg_const_i32(insn
& 0xfff);
2388 gen_helper_diag(tmp2
, cpu_env
, tmp32_1
, tmp2
, tmp3
);
2390 tcg_temp_free_i32(tmp32_1
);
2391 tcg_temp_free_i64(tmp2
);
2392 tcg_temp_free_i64(tmp3
);
2395 case 0x88: /* SRL R1,D2(B2) [RS] */
2396 case 0x89: /* SLL R1,D2(B2) [RS] */
2397 case 0x8a: /* SRA R1,D2(B2) [RS] */
2398 insn
= ld_code4(env
, s
->pc
);
2399 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2400 tmp
= get_address(s
, 0, b2
, d2
);
2401 tmp32_1
= load_reg32(r1
);
2402 tmp32_2
= tcg_temp_new_i32();
2403 tcg_gen_trunc_i64_i32(tmp32_2
, tmp
);
2404 tcg_gen_andi_i32(tmp32_2
, tmp32_2
, 0x3f);
2407 tcg_gen_shr_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2410 tcg_gen_shl_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2413 tcg_gen_sar_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2414 set_cc_s32(s
, tmp32_1
);
2419 store_reg32(r1
, tmp32_1
);
2420 tcg_temp_free_i64(tmp
);
2421 tcg_temp_free_i32(tmp32_1
);
2422 tcg_temp_free_i32(tmp32_2
);
2424 case 0x8c: /* SRDL R1,D2(B2) [RS] */
2425 case 0x8d: /* SLDL R1,D2(B2) [RS] */
2426 case 0x8e: /* SRDA R1,D2(B2) [RS] */
2427 insn
= ld_code4(env
, s
->pc
);
2428 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2429 tmp
= get_address(s
, 0, b2
, d2
); /* shift */
2430 tmp2
= tcg_temp_new_i64();
2431 tmp32_1
= load_reg32(r1
);
2432 tmp32_2
= load_reg32(r1
+ 1);
2433 tcg_gen_concat_i32_i64(tmp2
, tmp32_2
, tmp32_1
); /* operand */
2436 tcg_gen_shr_i64(tmp2
, tmp2
, tmp
);
2439 tcg_gen_shl_i64(tmp2
, tmp2
, tmp
);
2442 tcg_gen_sar_i64(tmp2
, tmp2
, tmp
);
2443 set_cc_s64(s
, tmp2
);
2446 tcg_gen_shri_i64(tmp
, tmp2
, 32);
2447 tcg_gen_trunc_i64_i32(tmp32_1
, tmp
);
2448 store_reg32(r1
, tmp32_1
);
2449 tcg_gen_trunc_i64_i32(tmp32_2
, tmp2
);
2450 store_reg32(r1
+ 1, tmp32_2
);
2451 tcg_temp_free_i64(tmp
);
2452 tcg_temp_free_i64(tmp2
);
2454 case 0x98: /* LM R1,R3,D2(B2) [RS] */
2455 case 0x90: /* STM R1,R3,D2(B2) [RS] */
2456 insn
= ld_code4(env
, s
->pc
);
2457 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2459 tmp
= get_address(s
, 0, b2
, d2
);
2460 tmp2
= tcg_temp_new_i64();
2461 tmp3
= tcg_const_i64(4);
2462 tmp4
= tcg_const_i64(0xffffffff00000000ULL
);
2463 for (i
= r1
;; i
= (i
+ 1) % 16) {
2465 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
2466 tcg_gen_and_i64(regs
[i
], regs
[i
], tmp4
);
2467 tcg_gen_or_i64(regs
[i
], regs
[i
], tmp2
);
2469 tcg_gen_qemu_st32(regs
[i
], tmp
, get_mem_index(s
));
2474 tcg_gen_add_i64(tmp
, tmp
, tmp3
);
2476 tcg_temp_free_i64(tmp
);
2477 tcg_temp_free_i64(tmp2
);
2478 tcg_temp_free_i64(tmp3
);
2479 tcg_temp_free_i64(tmp4
);
2481 case 0x92: /* MVI D1(B1),I2 [SI] */
2482 insn
= ld_code4(env
, s
->pc
);
2483 tmp
= decode_si(s
, insn
, &i2
, &b1
, &d1
);
2484 tmp2
= tcg_const_i64(i2
);
2485 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
2486 tcg_temp_free_i64(tmp
);
2487 tcg_temp_free_i64(tmp2
);
2489 case 0x94: /* NI D1(B1),I2 [SI] */
2490 case 0x96: /* OI D1(B1),I2 [SI] */
2491 case 0x97: /* XI D1(B1),I2 [SI] */
2492 insn
= ld_code4(env
, s
->pc
);
2493 tmp
= decode_si(s
, insn
, &i2
, &b1
, &d1
);
2494 tmp2
= tcg_temp_new_i64();
2495 tcg_gen_qemu_ld8u(tmp2
, tmp
, get_mem_index(s
));
2498 tcg_gen_andi_i64(tmp2
, tmp2
, i2
);
2501 tcg_gen_ori_i64(tmp2
, tmp2
, i2
);
2504 tcg_gen_xori_i64(tmp2
, tmp2
, i2
);
2509 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
2510 set_cc_nz_u64(s
, tmp2
);
2511 tcg_temp_free_i64(tmp
);
2512 tcg_temp_free_i64(tmp2
);
2514 case 0x9a: /* LAM R1,R3,D2(B2) [RS] */
2515 insn
= ld_code4(env
, s
->pc
);
2516 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2517 tmp
= get_address(s
, 0, b2
, d2
);
2518 tmp32_1
= tcg_const_i32(r1
);
2519 tmp32_2
= tcg_const_i32(r3
);
2520 potential_page_fault(s
);
2521 gen_helper_lam(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2522 tcg_temp_free_i64(tmp
);
2523 tcg_temp_free_i32(tmp32_1
);
2524 tcg_temp_free_i32(tmp32_2
);
2526 case 0x9b: /* STAM R1,R3,D2(B2) [RS] */
2527 insn
= ld_code4(env
, s
->pc
);
2528 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2529 tmp
= get_address(s
, 0, b2
, d2
);
2530 tmp32_1
= tcg_const_i32(r1
);
2531 tmp32_2
= tcg_const_i32(r3
);
2532 potential_page_fault(s
);
2533 gen_helper_stam(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2534 tcg_temp_free_i64(tmp
);
2535 tcg_temp_free_i32(tmp32_1
);
2536 tcg_temp_free_i32(tmp32_2
);
2538 case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */
2539 insn
= ld_code4(env
, s
->pc
);
2540 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2541 tmp
= get_address(s
, 0, b2
, d2
);
2542 tmp32_1
= tcg_const_i32(r1
);
2543 tmp32_2
= tcg_const_i32(r3
);
2544 potential_page_fault(s
);
2545 gen_helper_mvcle(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2547 tcg_temp_free_i64(tmp
);
2548 tcg_temp_free_i32(tmp32_1
);
2549 tcg_temp_free_i32(tmp32_2
);
2551 case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */
2552 insn
= ld_code4(env
, s
->pc
);
2553 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2554 tmp
= get_address(s
, 0, b2
, d2
);
2555 tmp32_1
= tcg_const_i32(r1
);
2556 tmp32_2
= tcg_const_i32(r3
);
2557 potential_page_fault(s
);
2558 gen_helper_clcle(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2560 tcg_temp_free_i64(tmp
);
2561 tcg_temp_free_i32(tmp32_1
);
2562 tcg_temp_free_i32(tmp32_2
);
2564 #ifndef CONFIG_USER_ONLY
2565 case 0xac: /* STNSM D1(B1),I2 [SI] */
2566 case 0xad: /* STOSM D1(B1),I2 [SI] */
2567 check_privileged(s
);
2568 insn
= ld_code4(env
, s
->pc
);
2569 tmp
= decode_si(s
, insn
, &i2
, &b1
, &d1
);
2570 tmp2
= tcg_temp_new_i64();
2571 tcg_gen_shri_i64(tmp2
, psw_mask
, 56);
2572 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
2574 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2575 ((uint64_t)i2
<< 56) | 0x00ffffffffffffffULL
);
2577 tcg_gen_ori_i64(psw_mask
, psw_mask
, (uint64_t)i2
<< 56);
2579 tcg_temp_free_i64(tmp
);
2580 tcg_temp_free_i64(tmp2
);
2582 case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
2583 check_privileged(s
);
2584 insn
= ld_code4(env
, s
->pc
);
2585 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2586 tmp
= get_address(s
, 0, b2
, d2
);
2587 tmp2
= load_reg(r3
);
2588 tmp32_1
= tcg_const_i32(r1
);
2589 potential_page_fault(s
);
2590 gen_helper_sigp(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp2
);
2592 tcg_temp_free_i64(tmp
);
2593 tcg_temp_free_i64(tmp2
);
2594 tcg_temp_free_i32(tmp32_1
);
2596 case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
2597 check_privileged(s
);
2598 insn
= ld_code4(env
, s
->pc
);
2599 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2600 tmp32_1
= tcg_const_i32(r1
);
2601 potential_page_fault(s
);
2602 gen_helper_lra(cc_op
, cpu_env
, tmp
, tmp32_1
);
2604 tcg_temp_free_i64(tmp
);
2605 tcg_temp_free_i32(tmp32_1
);
2609 insn
= ld_code4(env
, s
->pc
);
2610 op
= (insn
>> 16) & 0xff;
2612 case 0x9c: /* STFPC D2(B2) [S] */
2614 b2
= (insn
>> 12) & 0xf;
2615 tmp32_1
= tcg_temp_new_i32();
2616 tmp
= tcg_temp_new_i64();
2617 tmp2
= get_address(s
, 0, b2
, d2
);
2618 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2619 tcg_gen_extu_i32_i64(tmp
, tmp32_1
);
2620 tcg_gen_qemu_st32(tmp
, tmp2
, get_mem_index(s
));
2621 tcg_temp_free_i32(tmp32_1
);
2622 tcg_temp_free_i64(tmp
);
2623 tcg_temp_free_i64(tmp2
);
2626 disas_b2(env
, s
, op
, insn
);
2631 insn
= ld_code4(env
, s
->pc
);
2632 op
= (insn
>> 16) & 0xff;
2633 r3
= (insn
>> 12) & 0xf; /* aka m3 */
2634 r1
= (insn
>> 4) & 0xf;
2636 disas_b3(env
, s
, op
, r3
, r1
, r2
);
2638 #ifndef CONFIG_USER_ONLY
2639 case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
2641 check_privileged(s
);
2642 insn
= ld_code4(env
, s
->pc
);
2643 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2644 tmp
= get_address(s
, 0, b2
, d2
);
2645 tmp32_1
= tcg_const_i32(r1
);
2646 tmp32_2
= tcg_const_i32(r3
);
2647 potential_page_fault(s
);
2648 gen_helper_stctl(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2649 tcg_temp_free_i64(tmp
);
2650 tcg_temp_free_i32(tmp32_1
);
2651 tcg_temp_free_i32(tmp32_2
);
2653 case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
2655 check_privileged(s
);
2656 insn
= ld_code4(env
, s
->pc
);
2657 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2658 tmp
= get_address(s
, 0, b2
, d2
);
2659 tmp32_1
= tcg_const_i32(r1
);
2660 tmp32_2
= tcg_const_i32(r3
);
2661 potential_page_fault(s
);
2662 gen_helper_lctl(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2663 tcg_temp_free_i64(tmp
);
2664 tcg_temp_free_i32(tmp32_1
);
2665 tcg_temp_free_i32(tmp32_2
);
2669 insn
= ld_code4(env
, s
->pc
);
2670 r1
= (insn
>> 4) & 0xf;
2672 op
= (insn
>> 16) & 0xff;
2673 disas_b9(env
, s
, op
, r1
, r2
);
2675 case 0xba: /* CS R1,R3,D2(B2) [RS] */
2676 insn
= ld_code4(env
, s
->pc
);
2677 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2678 tmp
= get_address(s
, 0, b2
, d2
);
2679 tmp32_1
= tcg_const_i32(r1
);
2680 tmp32_2
= tcg_const_i32(r3
);
2681 potential_page_fault(s
);
2682 gen_helper_cs(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2684 tcg_temp_free_i64(tmp
);
2685 tcg_temp_free_i32(tmp32_1
);
2686 tcg_temp_free_i32(tmp32_2
);
2688 case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
2689 insn
= ld_code4(env
, s
->pc
);
2690 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2691 tmp
= get_address(s
, 0, b2
, d2
);
2692 tmp32_1
= load_reg32(r1
);
2693 tmp32_2
= tcg_const_i32(r3
);
2694 potential_page_fault(s
);
2695 gen_helper_clm(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp
);
2697 tcg_temp_free_i64(tmp
);
2698 tcg_temp_free_i32(tmp32_1
);
2699 tcg_temp_free_i32(tmp32_2
);
2701 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2702 insn
= ld_code4(env
, s
->pc
);
2703 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2704 tmp
= get_address(s
, 0, b2
, d2
);
2705 tmp32_1
= load_reg32(r1
);
2706 tmp32_2
= tcg_const_i32(r3
);
2707 potential_page_fault(s
);
2708 gen_helper_stcm(cpu_env
, tmp32_1
, tmp32_2
, tmp
);
2709 tcg_temp_free_i64(tmp
);
2710 tcg_temp_free_i32(tmp32_1
);
2711 tcg_temp_free_i32(tmp32_2
);
2713 case 0xd2: /* MVC D1(L,B1),D2(B2) [SS] */
2714 case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */
2715 case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */
2716 case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */
2717 case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
2718 case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
2719 case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
2720 insn
= ld_code6(env
, s
->pc
);
2721 vl
= tcg_const_i32((insn
>> 32) & 0xff);
2722 b1
= (insn
>> 28) & 0xf;
2723 b2
= (insn
>> 12) & 0xf;
2724 d1
= (insn
>> 16) & 0xfff;
2726 tmp
= get_address(s
, 0, b1
, d1
);
2727 tmp2
= get_address(s
, 0, b2
, d2
);
2730 gen_op_mvc(s
, (insn
>> 32) & 0xff, tmp
, tmp2
);
2733 potential_page_fault(s
);
2734 gen_helper_nc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
2738 gen_op_clc(s
, (insn
>> 32) & 0xff, tmp
, tmp2
);
2741 potential_page_fault(s
);
2742 gen_helper_oc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
2746 potential_page_fault(s
);
2747 gen_helper_xc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
2751 potential_page_fault(s
);
2752 gen_helper_tr(cpu_env
, vl
, tmp
, tmp2
);
2756 potential_page_fault(s
);
2757 gen_helper_unpk(cpu_env
, vl
, tmp
, tmp2
);
2762 tcg_temp_free_i64(tmp
);
2763 tcg_temp_free_i64(tmp2
);
2765 #ifndef CONFIG_USER_ONLY
2766 case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
2767 case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
2768 check_privileged(s
);
2769 potential_page_fault(s
);
2770 insn
= ld_code6(env
, s
->pc
);
2771 r1
= (insn
>> 36) & 0xf;
2772 r3
= (insn
>> 32) & 0xf;
2773 b1
= (insn
>> 28) & 0xf;
2774 d1
= (insn
>> 16) & 0xfff;
2775 b2
= (insn
>> 12) & 0xf;
2779 tmp2
= get_address(s
, 0, b1
, d1
);
2780 tmp3
= get_address(s
, 0, b2
, d2
);
2782 gen_helper_mvcp(cc_op
, cpu_env
, tmp
, tmp2
, tmp3
);
2784 gen_helper_mvcs(cc_op
, cpu_env
, tmp
, tmp2
, tmp3
);
2787 tcg_temp_free_i64(tmp
);
2788 tcg_temp_free_i64(tmp2
);
2789 tcg_temp_free_i64(tmp3
);
2793 insn
= ld_code6(env
, s
->pc
);
2796 r1
= (insn
>> 36) & 0xf;
2797 x2
= (insn
>> 32) & 0xf;
2798 b2
= (insn
>> 28) & 0xf;
2799 d2
= ((int)((((insn
>> 16) & 0xfff)
2800 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
2801 disas_e3(env
, s
, op
, r1
, x2
, b2
, d2
);
2803 #ifndef CONFIG_USER_ONLY
2805 /* Test Protection */
2806 check_privileged(s
);
2807 insn
= ld_code6(env
, s
->pc
);
2809 disas_e5(env
, s
, insn
);
2813 insn
= ld_code6(env
, s
->pc
);
2816 r1
= (insn
>> 36) & 0xf;
2817 r3
= (insn
>> 32) & 0xf;
2818 b2
= (insn
>> 28) & 0xf;
2819 d2
= ((int)((((insn
>> 16) & 0xfff)
2820 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
2821 disas_eb(env
, s
, op
, r1
, r3
, b2
, d2
);
2824 insn
= ld_code6(env
, s
->pc
);
2827 r1
= (insn
>> 36) & 0xf;
2828 x2
= (insn
>> 32) & 0xf;
2829 b2
= (insn
>> 28) & 0xf;
2830 d2
= (short)((insn
>> 16) & 0xfff);
2831 r1b
= (insn
>> 12) & 0xf;
2832 disas_ed(env
, s
, op
, r1
, x2
, b2
, d2
, r1b
);
2835 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
2836 gen_illegal_opcode(s
);
2841 /* ====================================================================== */
2842 /* Define the insn format enumeration. */
2843 #define F0(N) FMT_##N,
2844 #define F1(N, X1) F0(N)
2845 #define F2(N, X1, X2) F0(N)
2846 #define F3(N, X1, X2, X3) F0(N)
2847 #define F4(N, X1, X2, X3, X4) F0(N)
2848 #define F5(N, X1, X2, X3, X4, X5) F0(N)
2851 #include "insn-format.def"
2861 /* Define a structure to hold the decoded fields. We'll store each inside
2862 an array indexed by an enum. In order to conserve memory, we'll arrange
2863 for fields that do not exist at the same time to overlap, thus the "C"
2864 for compact. For checking purposes there is an "O" for original index
2865 as well that will be applied to availability bitmaps. */
2867 enum DisasFieldIndexO
{
2890 enum DisasFieldIndexC
{
2921 struct DisasFields
{
2924 unsigned presentC
:16;
2925 unsigned int presentO
;
2929 /* This is the way fields are to be accessed out of DisasFields. */
2930 #define have_field(S, F) have_field1((S), FLD_O_##F)
2931 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
2933 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
2935 return (f
->presentO
>> c
) & 1;
2938 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
2939 enum DisasFieldIndexC c
)
2941 assert(have_field1(f
, o
));
2945 /* Describe the layout of each field in each format. */
2946 typedef struct DisasField
{
2948 unsigned int size
:8;
2949 unsigned int type
:2;
2950 unsigned int indexC
:6;
2951 enum DisasFieldIndexO indexO
:8;
2954 typedef struct DisasFormatInfo
{
2955 DisasField op
[NUM_C_FIELD
];
2958 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
2959 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
2960 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2961 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
2962 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2963 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2964 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
2965 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2966 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2967 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2968 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2969 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2970 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
2971 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
2973 #define F0(N) { { } },
2974 #define F1(N, X1) { { X1 } },
2975 #define F2(N, X1, X2) { { X1, X2 } },
2976 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
2977 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
2978 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
2980 static const DisasFormatInfo format_info
[] = {
2981 #include "insn-format.def"
2999 /* Generally, we'll extract operands into this structures, operate upon
3000 them, and store them back. See the "in1", "in2", "prep", "wout" sets
3001 of routines below for more details. */
3003 bool g_out
, g_out2
, g_in1
, g_in2
;
3004 TCGv_i64 out
, out2
, in1
, in2
;
3008 /* Return values from translate_one, indicating the state of the TB. */
3010 /* Continue the TB. */
3012 /* We have emitted one or more goto_tb. No fixup required. */
3014 /* We are not using a goto_tb (for whatever reason), but have updated
3015 the PC (for whatever reason), so there's no need to do it again on
3018 /* We are exiting the TB, but have neither emitted a goto_tb, nor
3019 updated the PC for the next instruction to be executed. */
3021 /* We are ending the TB with a noreturn function call, e.g. longjmp.
3022 No following code will be executed. */
3026 typedef enum DisasFacility
{
3027 FAC_Z
, /* zarch (default) */
3028 FAC_CASS
, /* compare and swap and store */
3029 FAC_CASS2
, /* compare and swap and store 2*/
3030 FAC_DFP
, /* decimal floating point */
3031 FAC_DFPR
, /* decimal floating point rounding */
3032 FAC_DO
, /* distinct operands */
3033 FAC_EE
, /* execute extensions */
3034 FAC_EI
, /* extended immediate */
3035 FAC_FPE
, /* floating point extension */
3036 FAC_FPSSH
, /* floating point support sign handling */
3037 FAC_FPRGR
, /* FPR-GR transfer */
3038 FAC_GIE
, /* general instructions extension */
3039 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
3040 FAC_HW
, /* high-word */
3041 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
3042 FAC_LOC
, /* load/store on condition */
3043 FAC_LD
, /* long displacement */
3044 FAC_PC
, /* population count */
3045 FAC_SCF
, /* store clock fast */
3046 FAC_SFLE
, /* store facility list extended */
3052 DisasFacility fac
:6;
3056 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
3057 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
3058 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
3059 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
3060 void (*help_cout
)(DisasContext
*, DisasOps
*);
3061 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
3066 /* ====================================================================== */
3067 /* Miscelaneous helpers, used by several operations. */
3069 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
3071 if (dest
== s
->next_pc
) {
3074 if (use_goto_tb(s
, dest
)) {
3075 gen_update_cc_op(s
);
3077 tcg_gen_movi_i64(psw_addr
, dest
);
3078 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
3079 return EXIT_GOTO_TB
;
3081 tcg_gen_movi_i64(psw_addr
, dest
);
3082 return EXIT_PC_UPDATED
;
3086 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
3087 bool is_imm
, int imm
, TCGv_i64 cdest
)
3090 uint64_t dest
= s
->pc
+ 2 * imm
;
3093 /* Take care of the special cases first. */
3094 if (c
->cond
== TCG_COND_NEVER
) {
3099 if (dest
== s
->next_pc
) {
3100 /* Branch to next. */
3104 if (c
->cond
== TCG_COND_ALWAYS
) {
3105 ret
= help_goto_direct(s
, dest
);
3109 if (TCGV_IS_UNUSED_I64(cdest
)) {
3110 /* E.g. bcr %r0 -> no branch. */
3114 if (c
->cond
== TCG_COND_ALWAYS
) {
3115 tcg_gen_mov_i64(psw_addr
, cdest
);
3116 ret
= EXIT_PC_UPDATED
;
3121 if (use_goto_tb(s
, s
->next_pc
)) {
3122 if (is_imm
&& use_goto_tb(s
, dest
)) {
3123 /* Both exits can use goto_tb. */
3124 gen_update_cc_op(s
);
3126 lab
= gen_new_label();
3128 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
3130 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
3133 /* Branch not taken. */
3135 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
3136 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
3141 tcg_gen_movi_i64(psw_addr
, dest
);
3142 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
3146 /* Fallthru can use goto_tb, but taken branch cannot. */
3147 /* Store taken branch destination before the brcond. This
3148 avoids having to allocate a new local temp to hold it.
3149 We'll overwrite this in the not taken case anyway. */
3151 tcg_gen_mov_i64(psw_addr
, cdest
);
3154 lab
= gen_new_label();
3156 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
3158 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
3161 /* Branch not taken. */
3162 gen_update_cc_op(s
);
3164 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
3165 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
3169 tcg_gen_movi_i64(psw_addr
, dest
);
3171 ret
= EXIT_PC_UPDATED
;
3174 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
3175 Most commonly we're single-stepping or some other condition that
3176 disables all use of goto_tb. Just update the PC and exit. */
3178 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
3180 cdest
= tcg_const_i64(dest
);
3184 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
3187 TCGv_i32 t0
= tcg_temp_new_i32();
3188 TCGv_i64 t1
= tcg_temp_new_i64();
3189 TCGv_i64 z
= tcg_const_i64(0);
3190 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
3191 tcg_gen_extu_i32_i64(t1
, t0
);
3192 tcg_temp_free_i32(t0
);
3193 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
3194 tcg_temp_free_i64(t1
);
3195 tcg_temp_free_i64(z
);
3199 tcg_temp_free_i64(cdest
);
3201 tcg_temp_free_i64(next
);
3203 ret
= EXIT_PC_UPDATED
;
3211 /* ====================================================================== */
3212 /* The operations. These perform the bulk of the work for any insn,
3213 usually after the operands have been loaded and output initialized. */
3215 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
3217 gen_helper_abs_i64(o
->out
, o
->in2
);
3221 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
3223 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3227 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
3231 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3233 /* XXX possible optimization point */
3235 cc
= tcg_temp_new_i64();
3236 tcg_gen_extu_i32_i64(cc
, cc_op
);
3237 tcg_gen_shri_i64(cc
, cc
, 1);
3239 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3240 tcg_temp_free_i64(cc
);
3244 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
3246 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
3250 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
3252 int shift
= s
->insn
->data
& 0xff;
3253 int size
= s
->insn
->data
>> 8;
3254 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3257 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3258 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3259 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
3261 /* Produce the CC from only the bits manipulated. */
3262 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3263 set_cc_nz_u64(s
, cc_dst
);
3267 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
3269 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
3270 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
3271 tcg_gen_mov_i64(psw_addr
, o
->in2
);
3272 return EXIT_PC_UPDATED
;
3278 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
3280 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
3281 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
3284 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
3286 int m1
= get_field(s
->fields
, m1
);
3287 bool is_imm
= have_field(s
->fields
, i2
);
3288 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
3291 disas_jcc(s
, &c
, m1
);
3292 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
3295 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
3297 int r1
= get_field(s
->fields
, r1
);
3298 bool is_imm
= have_field(s
->fields
, i2
);
3299 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
3303 c
.cond
= TCG_COND_NE
;
3308 t
= tcg_temp_new_i64();
3309 tcg_gen_subi_i64(t
, regs
[r1
], 1);
3310 store_reg32_i64(r1
, t
);
3311 c
.u
.s32
.a
= tcg_temp_new_i32();
3312 c
.u
.s32
.b
= tcg_const_i32(0);
3313 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
3314 tcg_temp_free_i64(t
);
3316 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
3319 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
3321 int r1
= get_field(s
->fields
, r1
);
3322 bool is_imm
= have_field(s
->fields
, i2
);
3323 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
3326 c
.cond
= TCG_COND_NE
;
3331 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
3332 c
.u
.s64
.a
= regs
[r1
];
3333 c
.u
.s64
.b
= tcg_const_i64(0);
3335 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
3338 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
3340 TCGv_i64 t1
= tcg_temp_new_i64();
3341 TCGv_i32 t2
= tcg_temp_new_i32();
3342 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
3343 gen_helper_cvd(t1
, t2
);
3344 tcg_temp_free_i32(t2
);
3345 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
3346 tcg_temp_free_i64(t1
);
3350 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
3352 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
3353 return_low128(o
->out
);
3357 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
3359 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
3360 return_low128(o
->out
);
3364 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
3366 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
3367 return_low128(o
->out
);
3371 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
3373 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3374 return_low128(o
->out
);
3378 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
3380 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
3381 tb->flags, (ab)use the tb->cs_base field as the address of
3382 the template in memory, and grab 8 bits of tb->flags/cflags for
3383 the contents of the register. We would then recognize all this
3384 in gen_intermediate_code_internal, generating code for exactly
3385 one instruction. This new TB then gets executed normally.
3387 On the other hand, this seems to be mostly used for modifying
3388 MVC inside of memcpy, which needs a helper call anyway. So
3389 perhaps this doesn't bear thinking about any further. */
3396 tmp
= tcg_const_i64(s
->next_pc
);
3397 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
3398 tcg_temp_free_i64(tmp
);
3404 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
3406 int m3
= get_field(s
->fields
, m3
);
3407 int pos
, len
, base
= s
->insn
->data
;
3408 TCGv_i64 tmp
= tcg_temp_new_i64();
3413 /* Effectively a 32-bit load. */
3414 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
3421 /* Effectively a 16-bit load. */
3422 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
3430 /* Effectively an 8-bit load. */
3431 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
3436 pos
= base
+ ctz32(m3
) * 8;
3437 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
3438 ccm
= ((1ull << len
) - 1) << pos
;
3442 /* This is going to be a sequence of loads and inserts. */
3443 pos
= base
+ 32 - 8;
3447 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
3448 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3449 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
3452 m3
= (m3
<< 1) & 0xf;
3458 tcg_gen_movi_i64(tmp
, ccm
);
3459 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
3460 tcg_temp_free_i64(tmp
);
3464 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
3466 int shift
= s
->insn
->data
& 0xff;
3467 int size
= s
->insn
->data
>> 8;
3468 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
3472 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
3474 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
3478 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
3480 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
3484 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
3486 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
3490 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
3492 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
3496 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
3498 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
3502 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
3504 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
3508 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
3510 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
3514 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
3517 o
->g_out
= o
->g_in2
;
3518 TCGV_UNUSED_I64(o
->in2
);
3523 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
3527 o
->g_out
= o
->g_in1
;
3528 o
->g_out2
= o
->g_in2
;
3529 TCGV_UNUSED_I64(o
->in1
);
3530 TCGV_UNUSED_I64(o
->in2
);
3531 o
->g_in1
= o
->g_in2
= false;
3535 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3537 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3538 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
3539 potential_page_fault(s
);
3540 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
3541 tcg_temp_free_i32(r1
);
3542 tcg_temp_free_i32(r2
);
3547 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3549 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3553 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3555 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3556 return_low128(o
->out2
);
3560 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3562 gen_helper_nabs_i64(o
->out
, o
->in2
);
3566 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3568 tcg_gen_neg_i64(o
->out
, o
->in2
);
3572 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3574 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3578 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3580 int shift
= s
->insn
->data
& 0xff;
3581 int size
= s
->insn
->data
>> 8;
3582 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3585 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3586 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3588 /* Produce the CC from only the bits manipulated. */
3589 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3590 set_cc_nz_u64(s
, cc_dst
);
3594 #ifndef CONFIG_USER_ONLY
3595 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3597 check_privileged(s
);
3598 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3603 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3605 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3609 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3611 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3615 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3617 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3621 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3623 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3627 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3629 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3633 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3638 tcg_gen_not_i64(o
->in2
, o
->in2
);
3639 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3641 /* XXX possible optimization point */
3643 cc
= tcg_temp_new_i64();
3644 tcg_gen_extu_i32_i64(cc
, cc_op
);
3645 tcg_gen_shri_i64(cc
, cc
, 1);
3646 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3647 tcg_temp_free_i64(cc
);
3651 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3658 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3659 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3660 tcg_temp_free_i32(t
);
3662 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3663 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3664 tcg_temp_free_i32(t
);
3666 gen_exception(EXCP_SVC
);
3667 return EXIT_NORETURN
;
3670 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3672 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3676 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3678 int shift
= s
->insn
->data
& 0xff;
3679 int size
= s
->insn
->data
>> 8;
3680 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3683 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3684 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3686 /* Produce the CC from only the bits manipulated. */
3687 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3688 set_cc_nz_u64(s
, cc_dst
);
3692 /* ====================================================================== */
3693 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3694 the original inputs), update the various cc data structures in order to
3695 be able to compute the new condition code. */
3697 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3699 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3702 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3704 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3707 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3709 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3712 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3714 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3717 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3719 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3722 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3724 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3727 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3729 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3732 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3734 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3737 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3739 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3742 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3744 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3747 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3749 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3752 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3754 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3757 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3759 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3762 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3764 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3767 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3769 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3772 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3774 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3777 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3779 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3780 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3783 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3785 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3788 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3790 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3793 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3795 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3798 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3800 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3803 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3805 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3808 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3810 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3813 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3815 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3818 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3820 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3823 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3825 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3828 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3830 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3833 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3835 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3838 /* ====================================================================== */
3839 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3840 with the TCG register to which we will write. Used in combination with
3841 the "wout" generators, in some cases we need a new temporary, and in
3842 some cases we can write to a TCG global. */
3844 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3846 o
->out
= tcg_temp_new_i64();
3849 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3851 o
->out
= tcg_temp_new_i64();
3852 o
->out2
= tcg_temp_new_i64();
3855 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3857 o
->out
= regs
[get_field(f
, r1
)];
3861 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3863 /* ??? Specification exception: r1 must be even. */
3864 int r1
= get_field(f
, r1
);
3866 o
->out2
= regs
[(r1
+ 1) & 15];
3867 o
->g_out
= o
->g_out2
= true;
3870 /* ====================================================================== */
3871 /* The "Write OUTput" generators. These generally perform some non-trivial
3872 copy of data to TCG globals, or to main memory. The trivial cases are
3873 generally handled by having a "prep" generator install the TCG global
3874 as the destination of the operation. */
3876 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3878 store_reg(get_field(f
, r1
), o
->out
);
3881 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3883 int r1
= get_field(f
, r1
);
3884 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3887 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3889 store_reg32_i64(get_field(f
, r1
), o
->out
);
3892 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3894 /* ??? Specification exception: r1 must be even. */
3895 int r1
= get_field(f
, r1
);
3896 store_reg32_i64(r1
, o
->out
);
3897 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3900 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3902 /* ??? Specification exception: r1 must be even. */
3903 int r1
= get_field(f
, r1
);
3904 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3905 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3906 store_reg32_i64(r1
, o
->out
);
3909 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3911 store_freg32_i64(get_field(f
, r1
), o
->out
);
3914 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3916 store_freg(get_field(f
, r1
), o
->out
);
3919 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3921 int f1
= get_field(s
->fields
, r1
);
3922 store_freg(f1
, o
->out
);
3923 store_freg((f1
+ 2) & 15, o
->out2
);
3926 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3928 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3929 store_reg32_i64(get_field(f
, r1
), o
->out
);
3933 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3935 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3936 store_freg32_i64(get_field(f
, r1
), o
->out
);
3940 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3942 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3945 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3947 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3950 /* ====================================================================== */
3951 /* The "INput 1" generators. These load the first operand to an insn. */
3953 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3955 o
->in1
= load_reg(get_field(f
, r1
));
3958 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3960 o
->in1
= regs
[get_field(f
, r1
)];
3964 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3966 /* ??? Specification exception: r1 must be even. */
3967 int r1
= get_field(f
, r1
);
3968 o
->in1
= load_reg((r1
+ 1) & 15);
3971 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3973 /* ??? Specification exception: r1 must be even. */
3974 int r1
= get_field(f
, r1
);
3975 o
->in1
= tcg_temp_new_i64();
3976 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3979 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3981 /* ??? Specification exception: r1 must be even. */
3982 int r1
= get_field(f
, r1
);
3983 o
->in1
= tcg_temp_new_i64();
3984 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3987 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3989 /* ??? Specification exception: r1 must be even. */
3990 int r1
= get_field(f
, r1
);
3991 o
->in1
= tcg_temp_new_i64();
3992 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3995 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3997 o
->in1
= load_reg(get_field(f
, r2
));
4000 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4002 o
->in1
= load_reg(get_field(f
, r3
));
4005 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4007 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4010 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4012 o
->in1
= fregs
[get_field(f
, r1
)];
4016 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4018 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4021 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4024 o
->in1
= tcg_temp_new_i64();
4025 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4028 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4031 o
->in1
= tcg_temp_new_i64();
4032 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4035 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4038 o
->in1
= tcg_temp_new_i64();
4039 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4042 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4045 o
->in1
= tcg_temp_new_i64();
4046 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4049 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4052 o
->in1
= tcg_temp_new_i64();
4053 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4056 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4059 o
->in1
= tcg_temp_new_i64();
4060 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4063 /* ====================================================================== */
4064 /* The "INput 2" generators. These load the second operand to an insn. */
4066 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4068 o
->in2
= load_reg(get_field(f
, r2
));
4071 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4073 o
->in2
= regs
[get_field(f
, r2
)];
4077 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4079 int r2
= get_field(f
, r2
);
4081 o
->in2
= load_reg(r2
);
4085 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4087 o
->in2
= tcg_temp_new_i64();
4088 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4091 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4093 o
->in2
= tcg_temp_new_i64();
4094 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4097 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4099 o
->in2
= tcg_temp_new_i64();
4100 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4103 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4105 o
->in2
= tcg_temp_new_i64();
4106 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4109 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4111 o
->in2
= load_reg(get_field(f
, r3
));
4114 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4116 o
->in2
= tcg_temp_new_i64();
4117 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4120 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4122 o
->in2
= tcg_temp_new_i64();
4123 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4126 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4128 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4131 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4133 o
->in2
= fregs
[get_field(f
, r2
)];
4137 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4139 int f2
= get_field(f
, r2
);
4141 o
->in2
= fregs
[(f2
+ 2) & 15];
4142 o
->g_in1
= o
->g_in2
= true;
4145 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4147 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4148 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4151 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4153 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4156 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4159 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4162 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4165 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4168 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4171 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4174 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4177 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4180 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4183 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4186 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4189 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4192 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4195 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4198 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4201 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4204 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4207 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4210 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4212 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4215 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4217 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4220 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4222 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4225 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4227 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4230 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4232 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4233 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4236 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4238 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4239 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4242 /* ====================================================================== */
4244 /* Find opc within the table of insns. This is formulated as a switch
4245 statement so that (1) we get compile-time notice of cut-paste errors
4246 for duplicated opcodes, and (2) the compiler generates the binary
4247 search tree, rather than us having to post-process the table. */
4249 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4250 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4252 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4254 enum DisasInsnEnum
{
4255 #include "insn-data.def"
4259 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4264 .help_in1 = in1_##I1, \
4265 .help_in2 = in2_##I2, \
4266 .help_prep = prep_##P, \
4267 .help_wout = wout_##W, \
4268 .help_cout = cout_##CC, \
4269 .help_op = op_##OP, \
4273 /* Allow 0 to be used for NULL in the table below. */
4281 static const DisasInsn insn_info
[] = {
4282 #include "insn-data.def"
4286 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4287 case OPC: return &insn_info[insn_ ## NM];
4289 static const DisasInsn
*lookup_opc(uint16_t opc
)
4292 #include "insn-data.def"
4301 /* Extract a field from the insn. The INSN should be left-aligned in
4302 the uint64_t so that we can more easily utilize the big-bit-endian
4303 definitions we extract from the Principals of Operation. */
4305 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4313 /* Zero extract the field from the insn. */
4314 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4316 /* Sign-extend, or un-swap the field as necessary. */
4318 case 0: /* unsigned */
4320 case 1: /* signed */
4321 assert(f
->size
<= 32);
4322 m
= 1u << (f
->size
- 1);
4325 case 2: /* dl+dh split, signed 20 bit. */
4326 r
= ((int8_t)r
<< 12) | (r
>> 8);
4332 /* Validate that the "compressed" encoding we selected above is valid.
4333 I.e. we havn't make two different original fields overlap. */
4334 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4335 o
->presentC
|= 1 << f
->indexC
;
4336 o
->presentO
|= 1 << f
->indexO
;
4338 o
->c
[f
->indexC
] = r
;
4341 /* Lookup the insn at the current PC, extracting the operands into O and
4342 returning the info struct for the insn. Returns NULL for invalid insn. */
4344 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4347 uint64_t insn
, pc
= s
->pc
;
4349 const DisasInsn
*info
;
4351 insn
= ld_code2(env
, pc
);
4352 op
= (insn
>> 8) & 0xff;
4353 ilen
= get_ilen(op
);
4354 s
->next_pc
= s
->pc
+ ilen
;
4361 insn
= ld_code4(env
, pc
) << 32;
4364 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4370 /* We can't actually determine the insn format until we've looked up
4371 the full insn opcode. Which we can't do without locating the
4372 secondary opcode. Assume by default that OP2 is at bit 40; for
4373 those smaller insns that don't actually have a secondary opcode
4374 this will correctly result in OP2 = 0. */
4380 case 0xb2: /* S, RRF, RRE */
4381 case 0xb3: /* RRE, RRD, RRF */
4382 case 0xb9: /* RRE, RRF */
4383 case 0xe5: /* SSE, SIL */
4384 op2
= (insn
<< 8) >> 56;
4388 case 0xc0: /* RIL */
4389 case 0xc2: /* RIL */
4390 case 0xc4: /* RIL */
4391 case 0xc6: /* RIL */
4392 case 0xc8: /* SSF */
4393 case 0xcc: /* RIL */
4394 op2
= (insn
<< 12) >> 60;
4396 case 0xd0 ... 0xdf: /* SS */
4402 case 0xee ... 0xf3: /* SS */
4403 case 0xf8 ... 0xfd: /* SS */
4407 op2
= (insn
<< 40) >> 56;
4411 memset(f
, 0, sizeof(*f
));
4415 /* Lookup the instruction. */
4416 info
= lookup_opc(op
<< 8 | op2
);
4418 /* If we found it, extract the operands. */
4420 DisasFormat fmt
= info
->fmt
;
4423 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4424 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4430 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4432 const DisasInsn
*insn
;
4433 ExitStatus ret
= NO_EXIT
;
4437 insn
= extract_insn(env
, s
, &f
);
4439 /* If not found, try the old interpreter. This includes ILLOPC. */
4441 disas_s390_insn(env
, s
);
4442 switch (s
->is_jmp
) {
4450 ret
= EXIT_PC_UPDATED
;
4453 ret
= EXIT_NORETURN
;
4463 /* Set up the strutures we use to communicate with the helpers. */
4466 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4467 TCGV_UNUSED_I64(o
.out
);
4468 TCGV_UNUSED_I64(o
.out2
);
4469 TCGV_UNUSED_I64(o
.in1
);
4470 TCGV_UNUSED_I64(o
.in2
);
4471 TCGV_UNUSED_I64(o
.addr1
);
4473 /* Implement the instruction. */
4474 if (insn
->help_in1
) {
4475 insn
->help_in1(s
, &f
, &o
);
4477 if (insn
->help_in2
) {
4478 insn
->help_in2(s
, &f
, &o
);
4480 if (insn
->help_prep
) {
4481 insn
->help_prep(s
, &f
, &o
);
4483 if (insn
->help_op
) {
4484 ret
= insn
->help_op(s
, &o
);
4486 if (insn
->help_wout
) {
4487 insn
->help_wout(s
, &f
, &o
);
4489 if (insn
->help_cout
) {
4490 insn
->help_cout(s
, &o
);
4493 /* Free any temporaries created by the helpers. */
4494 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4495 tcg_temp_free_i64(o
.out
);
4497 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4498 tcg_temp_free_i64(o
.out2
);
4500 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4501 tcg_temp_free_i64(o
.in1
);
4503 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4504 tcg_temp_free_i64(o
.in2
);
4506 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4507 tcg_temp_free_i64(o
.addr1
);
4510 /* Advance to the next instruction. */
4515 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4516 TranslationBlock
*tb
,
4520 target_ulong pc_start
;
4521 uint64_t next_page_start
;
4522 uint16_t *gen_opc_end
;
4524 int num_insns
, max_insns
;
4532 if (!(tb
->flags
& FLAG_MASK_64
)) {
4533 pc_start
&= 0x7fffffff;
4538 dc
.cc_op
= CC_OP_DYNAMIC
;
4539 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4540 dc
.is_jmp
= DISAS_NEXT
;
4542 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4544 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4547 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4548 if (max_insns
== 0) {
4549 max_insns
= CF_COUNT_MASK
;
4556 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4560 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4563 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4564 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4565 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4566 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4568 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4572 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4573 tcg_gen_debug_insn_start(dc
.pc
);
4577 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4578 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4579 if (bp
->pc
== dc
.pc
) {
4580 status
= EXIT_PC_STALE
;
4586 if (status
== NO_EXIT
) {
4587 status
= translate_one(env
, &dc
);
4590 /* If we reach a page boundary, are single stepping,
4591 or exhaust instruction count, stop generation. */
4592 if (status
== NO_EXIT
4593 && (dc
.pc
>= next_page_start
4594 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4595 || num_insns
>= max_insns
4597 || env
->singlestep_enabled
)) {
4598 status
= EXIT_PC_STALE
;
4600 } while (status
== NO_EXIT
);
4602 if (tb
->cflags
& CF_LAST_IO
) {
4611 update_psw_addr(&dc
);
4613 case EXIT_PC_UPDATED
:
4614 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4615 gen_op_calc_cc(&dc
);
4617 /* Next TB starts off with CC_OP_DYNAMIC,
4618 so make sure the cc op type is in env */
4619 gen_op_set_cc_op(&dc
);
4622 gen_exception(EXCP_DEBUG
);
4624 /* Generate the return instruction */
4632 gen_icount_end(tb
, num_insns
);
4633 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4635 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4638 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4641 tb
->size
= dc
.pc
- pc_start
;
4642 tb
->icount
= num_insns
;
4645 #if defined(S390X_DEBUG_DISAS)
4646 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4647 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4648 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4654 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4656 gen_intermediate_code_internal(env
, tb
, 0);
4659 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4661 gen_intermediate_code_internal(env
, tb
, 1);
4664 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4667 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4668 cc_op
= gen_opc_cc_op
[pc_pos
];
4669 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {