4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_freg32(int reg
, TCGv_i32 v
)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
287 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
292 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
294 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
297 static inline void return_low128(TCGv_i64 dest
)
299 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
302 static inline void update_psw_addr(DisasContext
*s
)
305 tcg_gen_movi_i64(psw_addr
, s
->pc
);
308 static inline void potential_page_fault(DisasContext
*s
)
310 #ifndef CONFIG_USER_ONLY
316 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)cpu_lduw_code(env
, pc
);
321 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
326 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
328 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
331 static inline int get_mem_index(DisasContext
*s
)
333 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
334 case PSW_ASC_PRIMARY
>> 32:
336 case PSW_ASC_SECONDARY
>> 32:
338 case PSW_ASC_HOME
>> 32:
346 static void gen_exception(int excp
)
348 TCGv_i32 tmp
= tcg_const_i32(excp
);
349 gen_helper_exception(cpu_env
, tmp
);
350 tcg_temp_free_i32(tmp
);
353 static void gen_program_exception(DisasContext
*s
, int code
)
357 /* Remember what pgm exeption this was. */
358 tmp
= tcg_const_i32(code
);
359 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
360 tcg_temp_free_i32(tmp
);
362 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
363 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
364 tcg_temp_free_i32(tmp
);
366 /* Advance past instruction. */
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM
);
377 s
->is_jmp
= DISAS_EXCP
;
380 static inline void gen_illegal_opcode(DisasContext
*s
)
382 gen_program_exception(s
, PGM_SPECIFICATION
);
385 static inline void check_privileged(DisasContext
*s
)
387 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
388 gen_program_exception(s
, PGM_PRIVILEGED
);
392 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
403 tmp
= tcg_const_i64(d2
);
404 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
409 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
413 tmp
= tcg_const_i64(d2
);
414 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
419 tmp
= tcg_const_i64(d2
);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
424 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
430 static void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
432 s
->cc_op
= CC_OP_CONST0
+ val
;
435 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_mov_i64(cc_dst
, dst
);
439 tcg_gen_discard_i64(cc_vr
);
443 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_extu_i32_i64(cc_dst
, dst
);
447 tcg_gen_discard_i64(cc_vr
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 tcg_gen_mov_i64(cc_src
, src
);
455 tcg_gen_mov_i64(cc_dst
, dst
);
456 tcg_gen_discard_i64(cc_vr
);
460 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
463 tcg_gen_extu_i32_i64(cc_src
, src
);
464 tcg_gen_extu_i32_i64(cc_dst
, dst
);
465 tcg_gen_discard_i64(cc_vr
);
469 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
470 TCGv_i64 dst
, TCGv_i64 vr
)
472 tcg_gen_mov_i64(cc_src
, src
);
473 tcg_gen_mov_i64(cc_dst
, dst
);
474 tcg_gen_mov_i64(cc_vr
, vr
);
478 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
480 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
483 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
488 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
491 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
494 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
497 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
500 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
502 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
505 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
507 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
510 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
512 /* XXX optimize for the constant? put it in s? */
513 TCGv_i32 tmp
= tcg_const_i32(v2
);
514 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
515 tcg_temp_free_i32(tmp
);
518 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
520 TCGv_i32 tmp
= tcg_const_i32(v2
);
521 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
522 tcg_temp_free_i32(tmp
);
525 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
527 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
530 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
532 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
535 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
537 TCGv_i64 tmp
= tcg_const_i64(v2
);
539 tcg_temp_free_i64(tmp
);
542 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
544 TCGv_i64 tmp
= tcg_const_i64(v2
);
546 tcg_temp_free_i64(tmp
);
549 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
551 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
554 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
556 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
559 /* CC value is in env->cc_op */
560 static inline void set_cc_static(DisasContext
*s
)
562 tcg_gen_discard_i64(cc_src
);
563 tcg_gen_discard_i64(cc_dst
);
564 tcg_gen_discard_i64(cc_vr
);
565 s
->cc_op
= CC_OP_STATIC
;
568 static inline void gen_op_set_cc_op(DisasContext
*s
)
570 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
571 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
575 static inline void gen_update_cc_op(DisasContext
*s
)
580 /* calculates cc into cc_op */
581 static void gen_op_calc_cc(DisasContext
*s
)
583 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
584 TCGv_i64 dummy
= tcg_const_i64(0);
591 /* s->cc_op is the cc value */
592 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
595 /* env->cc_op already is the cc value */
609 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
614 case CC_OP_LTUGTU_32
:
615 case CC_OP_LTUGTU_64
:
622 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
637 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
640 /* unknown operation - assume 3 arguments and cc_op in env */
641 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
647 tcg_temp_free_i32(local_cc_op
);
648 tcg_temp_free_i64(dummy
);
650 /* We now have cc in cc_op as constant */
654 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
658 *r1
= (insn
>> 4) & 0xf;
662 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
663 int *x2
, int *b2
, int *d2
)
667 *r1
= (insn
>> 20) & 0xf;
668 *x2
= (insn
>> 16) & 0xf;
669 *b2
= (insn
>> 12) & 0xf;
672 return get_address(s
, *x2
, *b2
, *d2
);
675 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
680 *r1
= (insn
>> 20) & 0xf;
682 *r3
= (insn
>> 16) & 0xf;
683 *b2
= (insn
>> 12) & 0xf;
687 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
692 *i2
= (insn
>> 16) & 0xff;
693 *b1
= (insn
>> 12) & 0xf;
696 return get_address(s
, 0, *b1
, *d1
);
699 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
701 /* NOTE: we handle the case where the TB spans two pages here */
702 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
703 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
704 && !s
->singlestep_enabled
705 && !(s
->tb
->cflags
& CF_LAST_IO
));
708 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
712 if (use_goto_tb(s
, pc
)) {
713 tcg_gen_goto_tb(tb_num
);
714 tcg_gen_movi_i64(psw_addr
, pc
);
715 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
717 /* jump to another page: currently not optimized */
718 tcg_gen_movi_i64(psw_addr
, pc
);
723 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
725 #ifdef DEBUG_INLINE_BRANCHES
726 inline_branch_miss
[cc_op
]++;
730 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
732 #ifdef DEBUG_INLINE_BRANCHES
733 inline_branch_hit
[cc_op
]++;
737 /* Table of mask values to comparison codes, given a comparison as input.
738 For a true comparison CC=3 will never be set, but we treat this
739 conservatively for possible use when CC=3 indicates overflow. */
740 static const TCGCond ltgt_cond
[16] = {
741 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
742 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
743 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
744 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
745 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
746 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
747 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
748 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
751 /* Table of mask values to comparison codes, given a logic op as input.
752 For such, only CC=0 and CC=1 should be possible. */
753 static const TCGCond nz_cond
[16] = {
755 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
757 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
759 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
760 /* EQ | NE | x | x */
761 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
764 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
765 details required to generate a TCG comparison. */
766 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
769 enum cc_op old_cc_op
= s
->cc_op
;
771 if (mask
== 15 || mask
== 0) {
772 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
775 c
->g1
= c
->g2
= true;
780 /* Find the TCG condition for the mask + cc op. */
786 cond
= ltgt_cond
[mask
];
787 if (cond
== TCG_COND_NEVER
) {
790 account_inline_branch(s
, old_cc_op
);
793 case CC_OP_LTUGTU_32
:
794 case CC_OP_LTUGTU_64
:
795 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
796 if (cond
== TCG_COND_NEVER
) {
799 account_inline_branch(s
, old_cc_op
);
803 cond
= nz_cond
[mask
];
804 if (cond
== TCG_COND_NEVER
) {
807 account_inline_branch(s
, old_cc_op
);
822 account_inline_branch(s
, old_cc_op
);
837 account_inline_branch(s
, old_cc_op
);
842 /* Calculate cc value. */
847 /* Jump based on CC. We'll load up the real cond below;
848 the assignment here merely avoids a compiler warning. */
849 account_noninline_branch(s
, old_cc_op
);
850 old_cc_op
= CC_OP_STATIC
;
851 cond
= TCG_COND_NEVER
;
855 /* Load up the arguments of the comparison. */
857 c
->g1
= c
->g2
= false;
861 c
->u
.s32
.a
= tcg_temp_new_i32();
862 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
863 c
->u
.s32
.b
= tcg_const_i32(0);
866 case CC_OP_LTUGTU_32
:
868 c
->u
.s32
.a
= tcg_temp_new_i32();
869 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
870 c
->u
.s32
.b
= tcg_temp_new_i32();
871 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
877 c
->u
.s64
.b
= tcg_const_i64(0);
881 case CC_OP_LTUGTU_64
:
884 c
->g1
= c
->g2
= true;
890 c
->u
.s64
.a
= tcg_temp_new_i64();
891 c
->u
.s64
.b
= tcg_const_i64(0);
892 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
900 case 0x8 | 0x4 | 0x2: /* cc != 3 */
902 c
->u
.s32
.b
= tcg_const_i32(3);
904 case 0x8 | 0x4 | 0x1: /* cc != 2 */
906 c
->u
.s32
.b
= tcg_const_i32(2);
908 case 0x8 | 0x2 | 0x1: /* cc != 1 */
910 c
->u
.s32
.b
= tcg_const_i32(1);
912 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
915 c
->u
.s32
.a
= tcg_temp_new_i32();
916 c
->u
.s32
.b
= tcg_const_i32(0);
917 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
919 case 0x8 | 0x4: /* cc < 2 */
921 c
->u
.s32
.b
= tcg_const_i32(2);
923 case 0x8: /* cc == 0 */
925 c
->u
.s32
.b
= tcg_const_i32(0);
927 case 0x4 | 0x2 | 0x1: /* cc != 0 */
929 c
->u
.s32
.b
= tcg_const_i32(0);
931 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
934 c
->u
.s32
.a
= tcg_temp_new_i32();
935 c
->u
.s32
.b
= tcg_const_i32(0);
936 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
938 case 0x4: /* cc == 1 */
940 c
->u
.s32
.b
= tcg_const_i32(1);
942 case 0x2 | 0x1: /* cc > 1 */
944 c
->u
.s32
.b
= tcg_const_i32(1);
946 case 0x2: /* cc == 2 */
948 c
->u
.s32
.b
= tcg_const_i32(2);
950 case 0x1: /* cc == 3 */
952 c
->u
.s32
.b
= tcg_const_i32(3);
955 /* CC is masked by something else: (8 >> cc) & mask. */
958 c
->u
.s32
.a
= tcg_const_i32(8);
959 c
->u
.s32
.b
= tcg_const_i32(0);
960 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
961 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
972 static void free_compare(DisasCompare
*c
)
976 tcg_temp_free_i64(c
->u
.s64
.a
);
978 tcg_temp_free_i32(c
->u
.s32
.a
);
983 tcg_temp_free_i64(c
->u
.s64
.b
);
985 tcg_temp_free_i32(c
->u
.s32
.b
);
990 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
993 TCGv_i64 tmp
, tmp2
, tmp3
;
994 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
996 #ifndef CONFIG_USER_ONLY
1000 r1
= (insn
>> 4) & 0xf;
1003 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1006 case 0x22: /* IPM R1 [RRE] */
1007 tmp32_1
= tcg_const_i32(r1
);
1009 gen_helper_ipm(cpu_env
, cc_op
, tmp32_1
);
1010 tcg_temp_free_i32(tmp32_1
);
1012 case 0x41: /* CKSM R1,R2 [RRE] */
1013 tmp32_1
= tcg_const_i32(r1
);
1014 tmp32_2
= tcg_const_i32(r2
);
1015 potential_page_fault(s
);
1016 gen_helper_cksm(cpu_env
, tmp32_1
, tmp32_2
);
1017 tcg_temp_free_i32(tmp32_1
);
1018 tcg_temp_free_i32(tmp32_2
);
1019 gen_op_movi_cc(s
, 0);
1021 case 0x4e: /* SAR R1,R2 [RRE] */
1022 tmp32_1
= load_reg32(r2
);
1023 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
1024 tcg_temp_free_i32(tmp32_1
);
1026 case 0x4f: /* EAR R1,R2 [RRE] */
1027 tmp32_1
= tcg_temp_new_i32();
1028 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1029 store_reg32(r1
, tmp32_1
);
1030 tcg_temp_free_i32(tmp32_1
);
1032 case 0x54: /* MVPG R1,R2 [RRE] */
1034 tmp2
= load_reg(r1
);
1035 tmp3
= load_reg(r2
);
1036 potential_page_fault(s
);
1037 gen_helper_mvpg(cpu_env
, tmp
, tmp2
, tmp3
);
1038 tcg_temp_free_i64(tmp
);
1039 tcg_temp_free_i64(tmp2
);
1040 tcg_temp_free_i64(tmp3
);
1041 /* XXX check CCO bit and set CC accordingly */
1042 gen_op_movi_cc(s
, 0);
1044 case 0x55: /* MVST R1,R2 [RRE] */
1045 tmp32_1
= load_reg32(0);
1046 tmp32_2
= tcg_const_i32(r1
);
1047 tmp32_3
= tcg_const_i32(r2
);
1048 potential_page_fault(s
);
1049 gen_helper_mvst(cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1050 tcg_temp_free_i32(tmp32_1
);
1051 tcg_temp_free_i32(tmp32_2
);
1052 tcg_temp_free_i32(tmp32_3
);
1053 gen_op_movi_cc(s
, 1);
1055 case 0x5d: /* CLST R1,R2 [RRE] */
1056 tmp32_1
= load_reg32(0);
1057 tmp32_2
= tcg_const_i32(r1
);
1058 tmp32_3
= tcg_const_i32(r2
);
1059 potential_page_fault(s
);
1060 gen_helper_clst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1062 tcg_temp_free_i32(tmp32_1
);
1063 tcg_temp_free_i32(tmp32_2
);
1064 tcg_temp_free_i32(tmp32_3
);
1066 case 0x5e: /* SRST R1,R2 [RRE] */
1067 tmp32_1
= load_reg32(0);
1068 tmp32_2
= tcg_const_i32(r1
);
1069 tmp32_3
= tcg_const_i32(r2
);
1070 potential_page_fault(s
);
1071 gen_helper_srst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1073 tcg_temp_free_i32(tmp32_1
);
1074 tcg_temp_free_i32(tmp32_2
);
1075 tcg_temp_free_i32(tmp32_3
);
1078 #ifndef CONFIG_USER_ONLY
1079 case 0x02: /* STIDP D2(B2) [S] */
1081 check_privileged(s
);
1082 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1083 tmp
= get_address(s
, 0, b2
, d2
);
1084 potential_page_fault(s
);
1085 gen_helper_stidp(cpu_env
, tmp
);
1086 tcg_temp_free_i64(tmp
);
1088 case 0x04: /* SCK D2(B2) [S] */
1090 check_privileged(s
);
1091 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1092 tmp
= get_address(s
, 0, b2
, d2
);
1093 potential_page_fault(s
);
1094 gen_helper_sck(cc_op
, tmp
);
1096 tcg_temp_free_i64(tmp
);
1098 case 0x05: /* STCK D2(B2) [S] */
1100 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1101 tmp
= get_address(s
, 0, b2
, d2
);
1102 potential_page_fault(s
);
1103 gen_helper_stck(cc_op
, cpu_env
, tmp
);
1105 tcg_temp_free_i64(tmp
);
1107 case 0x06: /* SCKC D2(B2) [S] */
1108 /* Set Clock Comparator */
1109 check_privileged(s
);
1110 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1111 tmp
= get_address(s
, 0, b2
, d2
);
1112 potential_page_fault(s
);
1113 gen_helper_sckc(cpu_env
, tmp
);
1114 tcg_temp_free_i64(tmp
);
1116 case 0x07: /* STCKC D2(B2) [S] */
1117 /* Store Clock Comparator */
1118 check_privileged(s
);
1119 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1120 tmp
= get_address(s
, 0, b2
, d2
);
1121 potential_page_fault(s
);
1122 gen_helper_stckc(cpu_env
, tmp
);
1123 tcg_temp_free_i64(tmp
);
1125 case 0x08: /* SPT D2(B2) [S] */
1127 check_privileged(s
);
1128 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1129 tmp
= get_address(s
, 0, b2
, d2
);
1130 potential_page_fault(s
);
1131 gen_helper_spt(cpu_env
, tmp
);
1132 tcg_temp_free_i64(tmp
);
1134 case 0x09: /* STPT D2(B2) [S] */
1135 /* Store CPU Timer */
1136 check_privileged(s
);
1137 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1138 tmp
= get_address(s
, 0, b2
, d2
);
1139 potential_page_fault(s
);
1140 gen_helper_stpt(cpu_env
, tmp
);
1141 tcg_temp_free_i64(tmp
);
1143 case 0x0a: /* SPKA D2(B2) [S] */
1144 /* Set PSW Key from Address */
1145 check_privileged(s
);
1146 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1147 tmp
= get_address(s
, 0, b2
, d2
);
1148 tmp2
= tcg_temp_new_i64();
1149 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1150 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1151 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1152 tcg_temp_free_i64(tmp2
);
1153 tcg_temp_free_i64(tmp
);
1155 case 0x0d: /* PTLB [S] */
1157 check_privileged(s
);
1158 gen_helper_ptlb(cpu_env
);
1160 case 0x10: /* SPX D2(B2) [S] */
1161 /* Set Prefix Register */
1162 check_privileged(s
);
1163 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1164 tmp
= get_address(s
, 0, b2
, d2
);
1165 potential_page_fault(s
);
1166 gen_helper_spx(cpu_env
, tmp
);
1167 tcg_temp_free_i64(tmp
);
1169 case 0x11: /* STPX D2(B2) [S] */
1171 check_privileged(s
);
1172 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1173 tmp
= get_address(s
, 0, b2
, d2
);
1174 tmp2
= tcg_temp_new_i64();
1175 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1176 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1177 tcg_temp_free_i64(tmp
);
1178 tcg_temp_free_i64(tmp2
);
1180 case 0x12: /* STAP D2(B2) [S] */
1181 /* Store CPU Address */
1182 check_privileged(s
);
1183 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1184 tmp
= get_address(s
, 0, b2
, d2
);
1185 tmp2
= tcg_temp_new_i64();
1186 tmp32_1
= tcg_temp_new_i32();
1187 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1188 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1189 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1190 tcg_temp_free_i64(tmp
);
1191 tcg_temp_free_i64(tmp2
);
1192 tcg_temp_free_i32(tmp32_1
);
1194 case 0x21: /* IPTE R1,R2 [RRE] */
1195 /* Invalidate PTE */
1196 check_privileged(s
);
1197 r1
= (insn
>> 4) & 0xf;
1200 tmp2
= load_reg(r2
);
1201 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1202 tcg_temp_free_i64(tmp
);
1203 tcg_temp_free_i64(tmp2
);
1205 case 0x29: /* ISKE R1,R2 [RRE] */
1206 /* Insert Storage Key Extended */
1207 check_privileged(s
);
1208 r1
= (insn
>> 4) & 0xf;
1211 tmp2
= tcg_temp_new_i64();
1212 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1213 store_reg(r1
, tmp2
);
1214 tcg_temp_free_i64(tmp
);
1215 tcg_temp_free_i64(tmp2
);
1217 case 0x2a: /* RRBE R1,R2 [RRE] */
1218 /* Set Storage Key Extended */
1219 check_privileged(s
);
1220 r1
= (insn
>> 4) & 0xf;
1222 tmp32_1
= load_reg32(r1
);
1224 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1226 tcg_temp_free_i32(tmp32_1
);
1227 tcg_temp_free_i64(tmp
);
1229 case 0x2b: /* SSKE R1,R2 [RRE] */
1230 /* Set Storage Key Extended */
1231 check_privileged(s
);
1232 r1
= (insn
>> 4) & 0xf;
1234 tmp32_1
= load_reg32(r1
);
1236 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1237 tcg_temp_free_i32(tmp32_1
);
1238 tcg_temp_free_i64(tmp
);
1240 case 0x34: /* STCH ? */
1241 /* Store Subchannel */
1242 check_privileged(s
);
1243 gen_op_movi_cc(s
, 3);
1245 case 0x46: /* STURA R1,R2 [RRE] */
1246 /* Store Using Real Address */
1247 check_privileged(s
);
1248 r1
= (insn
>> 4) & 0xf;
1250 tmp32_1
= load_reg32(r1
);
1252 potential_page_fault(s
);
1253 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1254 tcg_temp_free_i32(tmp32_1
);
1255 tcg_temp_free_i64(tmp
);
1257 case 0x50: /* CSP R1,R2 [RRE] */
1258 /* Compare And Swap And Purge */
1259 check_privileged(s
);
1260 r1
= (insn
>> 4) & 0xf;
1262 tmp32_1
= tcg_const_i32(r1
);
1263 tmp32_2
= tcg_const_i32(r2
);
1264 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1266 tcg_temp_free_i32(tmp32_1
);
1267 tcg_temp_free_i32(tmp32_2
);
1269 case 0x5f: /* CHSC ? */
1270 /* Channel Subsystem Call */
1271 check_privileged(s
);
1272 gen_op_movi_cc(s
, 3);
1274 case 0x78: /* STCKE D2(B2) [S] */
1275 /* Store Clock Extended */
1276 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1277 tmp
= get_address(s
, 0, b2
, d2
);
1278 potential_page_fault(s
);
1279 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1281 tcg_temp_free_i64(tmp
);
1283 case 0x79: /* SACF D2(B2) [S] */
1284 /* Set Address Space Control Fast */
1285 check_privileged(s
);
1286 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1287 tmp
= get_address(s
, 0, b2
, d2
);
1288 potential_page_fault(s
);
1289 gen_helper_sacf(cpu_env
, tmp
);
1290 tcg_temp_free_i64(tmp
);
1291 /* addressing mode has changed, so end the block */
1294 s
->is_jmp
= DISAS_JUMP
;
1296 case 0x7d: /* STSI D2,(B2) [S] */
1297 check_privileged(s
);
1298 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1299 tmp
= get_address(s
, 0, b2
, d2
);
1300 tmp32_1
= load_reg32(0);
1301 tmp32_2
= load_reg32(1);
1302 potential_page_fault(s
);
1303 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1305 tcg_temp_free_i64(tmp
);
1306 tcg_temp_free_i32(tmp32_1
);
1307 tcg_temp_free_i32(tmp32_2
);
1309 case 0x9d: /* LFPC D2(B2) [S] */
1310 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1311 tmp
= get_address(s
, 0, b2
, d2
);
1312 tmp2
= tcg_temp_new_i64();
1313 tmp32_1
= tcg_temp_new_i32();
1314 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1315 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1316 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1317 tcg_temp_free_i64(tmp
);
1318 tcg_temp_free_i64(tmp2
);
1319 tcg_temp_free_i32(tmp32_1
);
1321 case 0xb1: /* STFL D2(B2) [S] */
1322 /* Store Facility List (CPU features) at 200 */
1323 check_privileged(s
);
1324 tmp2
= tcg_const_i64(0xc0000000);
1325 tmp
= tcg_const_i64(200);
1326 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1327 tcg_temp_free_i64(tmp2
);
1328 tcg_temp_free_i64(tmp
);
1330 case 0xb2: /* LPSWE D2(B2) [S] */
1331 /* Load PSW Extended */
1332 check_privileged(s
);
1333 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1334 tmp
= get_address(s
, 0, b2
, d2
);
1335 tmp2
= tcg_temp_new_i64();
1336 tmp3
= tcg_temp_new_i64();
1337 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1338 tcg_gen_addi_i64(tmp
, tmp
, 8);
1339 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1340 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1341 /* we need to keep cc_op intact */
1342 s
->is_jmp
= DISAS_JUMP
;
1343 tcg_temp_free_i64(tmp
);
1344 tcg_temp_free_i64(tmp2
);
1345 tcg_temp_free_i64(tmp3
);
1347 case 0x20: /* SERVC R1,R2 [RRE] */
1348 /* SCLP Service call (PV hypercall) */
1349 check_privileged(s
);
1350 potential_page_fault(s
);
1351 tmp32_1
= load_reg32(r2
);
1353 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1355 tcg_temp_free_i32(tmp32_1
);
1356 tcg_temp_free_i64(tmp
);
1360 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1361 gen_illegal_opcode(s
);
1366 static void disas_b3(CPUS390XState
*env
, DisasContext
*s
, int op
, int m3
,
1370 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1371 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op
, m3
, r1
, r2
);
1372 #define FP_HELPER(i) \
1373 tmp32_1 = tcg_const_i32(r1); \
1374 tmp32_2 = tcg_const_i32(r2); \
1375 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1376 tcg_temp_free_i32(tmp32_1); \
1377 tcg_temp_free_i32(tmp32_2);
1379 #define FP_HELPER_CC(i) \
1380 tmp32_1 = tcg_const_i32(r1); \
1381 tmp32_2 = tcg_const_i32(r2); \
1382 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1384 tcg_temp_free_i32(tmp32_1); \
1385 tcg_temp_free_i32(tmp32_2);
1388 case 0x15: /* SQBDR R1,R2 [RRE] */
1391 case 0x74: /* LZER R1 [RRE] */
1392 tmp32_1
= tcg_const_i32(r1
);
1393 gen_helper_lzer(cpu_env
, tmp32_1
);
1394 tcg_temp_free_i32(tmp32_1
);
1396 case 0x75: /* LZDR R1 [RRE] */
1397 tmp32_1
= tcg_const_i32(r1
);
1398 gen_helper_lzdr(cpu_env
, tmp32_1
);
1399 tcg_temp_free_i32(tmp32_1
);
1401 case 0x76: /* LZXR R1 [RRE] */
1402 tmp32_1
= tcg_const_i32(r1
);
1403 gen_helper_lzxr(cpu_env
, tmp32_1
);
1404 tcg_temp_free_i32(tmp32_1
);
1406 case 0x84: /* SFPC R1 [RRE] */
1407 tmp32_1
= load_reg32(r1
);
1408 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1409 tcg_temp_free_i32(tmp32_1
);
1411 case 0x94: /* CEFBR R1,R2 [RRE] */
1412 case 0x95: /* CDFBR R1,R2 [RRE] */
1413 case 0x96: /* CXFBR R1,R2 [RRE] */
1414 tmp32_1
= tcg_const_i32(r1
);
1415 tmp32_2
= load_reg32(r2
);
1418 gen_helper_cefbr(cpu_env
, tmp32_1
, tmp32_2
);
1421 gen_helper_cdfbr(cpu_env
, tmp32_1
, tmp32_2
);
1424 gen_helper_cxfbr(cpu_env
, tmp32_1
, tmp32_2
);
1429 tcg_temp_free_i32(tmp32_1
);
1430 tcg_temp_free_i32(tmp32_2
);
1432 case 0x98: /* CFEBR R1,R2 [RRE] */
1433 case 0x99: /* CFDBR R1,R2 [RRE] */
1434 case 0x9a: /* CFXBR R1,R2 [RRE] */
1435 tmp32_1
= tcg_const_i32(r1
);
1436 tmp32_2
= tcg_const_i32(r2
);
1437 tmp32_3
= tcg_const_i32(m3
);
1440 gen_helper_cfebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1443 gen_helper_cfdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1446 gen_helper_cfxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1452 tcg_temp_free_i32(tmp32_1
);
1453 tcg_temp_free_i32(tmp32_2
);
1454 tcg_temp_free_i32(tmp32_3
);
1456 case 0xa4: /* CEGBR R1,R2 [RRE] */
1457 case 0xa5: /* CDGBR R1,R2 [RRE] */
1458 tmp32_1
= tcg_const_i32(r1
);
1462 gen_helper_cegbr(cpu_env
, tmp32_1
, tmp
);
1465 gen_helper_cdgbr(cpu_env
, tmp32_1
, tmp
);
1470 tcg_temp_free_i32(tmp32_1
);
1471 tcg_temp_free_i64(tmp
);
1473 case 0xa6: /* CXGBR R1,R2 [RRE] */
1474 tmp32_1
= tcg_const_i32(r1
);
1476 gen_helper_cxgbr(cpu_env
, tmp32_1
, tmp
);
1477 tcg_temp_free_i32(tmp32_1
);
1478 tcg_temp_free_i64(tmp
);
1480 case 0xa8: /* CGEBR R1,R2 [RRE] */
1481 tmp32_1
= tcg_const_i32(r1
);
1482 tmp32_2
= tcg_const_i32(r2
);
1483 tmp32_3
= tcg_const_i32(m3
);
1484 gen_helper_cgebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1486 tcg_temp_free_i32(tmp32_1
);
1487 tcg_temp_free_i32(tmp32_2
);
1488 tcg_temp_free_i32(tmp32_3
);
1490 case 0xa9: /* CGDBR R1,R2 [RRE] */
1491 tmp32_1
= tcg_const_i32(r1
);
1492 tmp32_2
= tcg_const_i32(r2
);
1493 tmp32_3
= tcg_const_i32(m3
);
1494 gen_helper_cgdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1496 tcg_temp_free_i32(tmp32_1
);
1497 tcg_temp_free_i32(tmp32_2
);
1498 tcg_temp_free_i32(tmp32_3
);
1500 case 0xaa: /* CGXBR R1,R2 [RRE] */
1501 tmp32_1
= tcg_const_i32(r1
);
1502 tmp32_2
= tcg_const_i32(r2
);
1503 tmp32_3
= tcg_const_i32(m3
);
1504 gen_helper_cgxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1506 tcg_temp_free_i32(tmp32_1
);
1507 tcg_temp_free_i32(tmp32_2
);
1508 tcg_temp_free_i32(tmp32_3
);
1511 LOG_DISAS("illegal b3 operation 0x%x\n", op
);
1512 gen_illegal_opcode(s
);
1520 static void disas_b9(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1526 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1528 case 0x83: /* FLOGR R1,R2 [RRE] */
1530 tmp32_1
= tcg_const_i32(r1
);
1531 gen_helper_flogr(cc_op
, cpu_env
, tmp32_1
, tmp
);
1533 tcg_temp_free_i64(tmp
);
1534 tcg_temp_free_i32(tmp32_1
);
1537 LOG_DISAS("illegal b9 operation 0x%x\n", op
);
1538 gen_illegal_opcode(s
);
1543 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1549 opc
= cpu_ldub_code(env
, s
->pc
);
1550 LOG_DISAS("opc 0x%x\n", opc
);
1554 insn
= ld_code4(env
, s
->pc
);
1555 op
= (insn
>> 16) & 0xff;
1556 disas_b2(env
, s
, op
, insn
);
1559 insn
= ld_code4(env
, s
->pc
);
1560 op
= (insn
>> 16) & 0xff;
1561 r3
= (insn
>> 12) & 0xf; /* aka m3 */
1562 r1
= (insn
>> 4) & 0xf;
1564 disas_b3(env
, s
, op
, r3
, r1
, r2
);
1567 insn
= ld_code4(env
, s
->pc
);
1568 r1
= (insn
>> 4) & 0xf;
1570 op
= (insn
>> 16) & 0xff;
1571 disas_b9(env
, s
, op
, r1
, r2
);
1574 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
1575 gen_illegal_opcode(s
);
1580 /* ====================================================================== */
1581 /* Define the insn format enumeration. */
1582 #define F0(N) FMT_##N,
1583 #define F1(N, X1) F0(N)
1584 #define F2(N, X1, X2) F0(N)
1585 #define F3(N, X1, X2, X3) F0(N)
1586 #define F4(N, X1, X2, X3, X4) F0(N)
1587 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1590 #include "insn-format.def"
1600 /* Define a structure to hold the decoded fields. We'll store each inside
1601 an array indexed by an enum. In order to conserve memory, we'll arrange
1602 for fields that do not exist at the same time to overlap, thus the "C"
1603 for compact. For checking purposes there is an "O" for original index
1604 as well that will be applied to availability bitmaps. */
1606 enum DisasFieldIndexO
{
1629 enum DisasFieldIndexC
{
1660 struct DisasFields
{
1663 unsigned presentC
:16;
1664 unsigned int presentO
;
1668 /* This is the way fields are to be accessed out of DisasFields. */
1669 #define have_field(S, F) have_field1((S), FLD_O_##F)
1670 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1672 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1674 return (f
->presentO
>> c
) & 1;
1677 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1678 enum DisasFieldIndexC c
)
1680 assert(have_field1(f
, o
));
1684 /* Describe the layout of each field in each format. */
1685 typedef struct DisasField
{
1687 unsigned int size
:8;
1688 unsigned int type
:2;
1689 unsigned int indexC
:6;
1690 enum DisasFieldIndexO indexO
:8;
1693 typedef struct DisasFormatInfo
{
1694 DisasField op
[NUM_C_FIELD
];
1697 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1698 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1699 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1700 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1701 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1702 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1703 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1704 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1705 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1706 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1707 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1708 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1709 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1710 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1712 #define F0(N) { { } },
1713 #define F1(N, X1) { { X1 } },
1714 #define F2(N, X1, X2) { { X1, X2 } },
1715 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1716 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1717 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1719 static const DisasFormatInfo format_info
[] = {
1720 #include "insn-format.def"
1738 /* Generally, we'll extract operands into this structures, operate upon
1739 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1740 of routines below for more details. */
1742 bool g_out
, g_out2
, g_in1
, g_in2
;
1743 TCGv_i64 out
, out2
, in1
, in2
;
1747 /* Return values from translate_one, indicating the state of the TB. */
1749 /* Continue the TB. */
1751 /* We have emitted one or more goto_tb. No fixup required. */
1753 /* We are not using a goto_tb (for whatever reason), but have updated
1754 the PC (for whatever reason), so there's no need to do it again on
1757 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1758 updated the PC for the next instruction to be executed. */
1760 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1761 No following code will be executed. */
1765 typedef enum DisasFacility
{
1766 FAC_Z
, /* zarch (default) */
1767 FAC_CASS
, /* compare and swap and store */
1768 FAC_CASS2
, /* compare and swap and store 2*/
1769 FAC_DFP
, /* decimal floating point */
1770 FAC_DFPR
, /* decimal floating point rounding */
1771 FAC_DO
, /* distinct operands */
1772 FAC_EE
, /* execute extensions */
1773 FAC_EI
, /* extended immediate */
1774 FAC_FPE
, /* floating point extension */
1775 FAC_FPSSH
, /* floating point support sign handling */
1776 FAC_FPRGR
, /* FPR-GR transfer */
1777 FAC_GIE
, /* general instructions extension */
1778 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1779 FAC_HW
, /* high-word */
1780 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1781 FAC_LOC
, /* load/store on condition */
1782 FAC_LD
, /* long displacement */
1783 FAC_PC
, /* population count */
1784 FAC_SCF
, /* store clock fast */
1785 FAC_SFLE
, /* store facility list extended */
1791 DisasFacility fac
:6;
1795 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1796 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1797 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1798 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1799 void (*help_cout
)(DisasContext
*, DisasOps
*);
1800 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1805 /* ====================================================================== */
1806 /* Miscelaneous helpers, used by several operations. */
1808 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1809 DisasOps
*o
, int mask
)
1811 int b2
= get_field(f
, b2
);
1812 int d2
= get_field(f
, d2
);
1815 o
->in2
= tcg_const_i64(d2
& mask
);
1817 o
->in2
= get_address(s
, 0, b2
, d2
);
1818 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1822 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1824 if (dest
== s
->next_pc
) {
1827 if (use_goto_tb(s
, dest
)) {
1828 gen_update_cc_op(s
);
1830 tcg_gen_movi_i64(psw_addr
, dest
);
1831 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1832 return EXIT_GOTO_TB
;
1834 tcg_gen_movi_i64(psw_addr
, dest
);
1835 return EXIT_PC_UPDATED
;
1839 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1840 bool is_imm
, int imm
, TCGv_i64 cdest
)
1843 uint64_t dest
= s
->pc
+ 2 * imm
;
1846 /* Take care of the special cases first. */
1847 if (c
->cond
== TCG_COND_NEVER
) {
1852 if (dest
== s
->next_pc
) {
1853 /* Branch to next. */
1857 if (c
->cond
== TCG_COND_ALWAYS
) {
1858 ret
= help_goto_direct(s
, dest
);
1862 if (TCGV_IS_UNUSED_I64(cdest
)) {
1863 /* E.g. bcr %r0 -> no branch. */
1867 if (c
->cond
== TCG_COND_ALWAYS
) {
1868 tcg_gen_mov_i64(psw_addr
, cdest
);
1869 ret
= EXIT_PC_UPDATED
;
1874 if (use_goto_tb(s
, s
->next_pc
)) {
1875 if (is_imm
&& use_goto_tb(s
, dest
)) {
1876 /* Both exits can use goto_tb. */
1877 gen_update_cc_op(s
);
1879 lab
= gen_new_label();
1881 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1883 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1886 /* Branch not taken. */
1888 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1889 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1894 tcg_gen_movi_i64(psw_addr
, dest
);
1895 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1899 /* Fallthru can use goto_tb, but taken branch cannot. */
1900 /* Store taken branch destination before the brcond. This
1901 avoids having to allocate a new local temp to hold it.
1902 We'll overwrite this in the not taken case anyway. */
1904 tcg_gen_mov_i64(psw_addr
, cdest
);
1907 lab
= gen_new_label();
1909 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1911 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1914 /* Branch not taken. */
1915 gen_update_cc_op(s
);
1917 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1918 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1922 tcg_gen_movi_i64(psw_addr
, dest
);
1924 ret
= EXIT_PC_UPDATED
;
1927 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1928 Most commonly we're single-stepping or some other condition that
1929 disables all use of goto_tb. Just update the PC and exit. */
1931 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1933 cdest
= tcg_const_i64(dest
);
1937 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1940 TCGv_i32 t0
= tcg_temp_new_i32();
1941 TCGv_i64 t1
= tcg_temp_new_i64();
1942 TCGv_i64 z
= tcg_const_i64(0);
1943 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1944 tcg_gen_extu_i32_i64(t1
, t0
);
1945 tcg_temp_free_i32(t0
);
1946 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1947 tcg_temp_free_i64(t1
);
1948 tcg_temp_free_i64(z
);
1952 tcg_temp_free_i64(cdest
);
1954 tcg_temp_free_i64(next
);
1956 ret
= EXIT_PC_UPDATED
;
1964 /* ====================================================================== */
1965 /* The operations. These perform the bulk of the work for any insn,
1966 usually after the operands have been loaded and output initialized. */
1968 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1970 gen_helper_abs_i64(o
->out
, o
->in2
);
1974 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1976 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1980 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1982 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1986 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1988 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1989 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1993 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1995 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1999 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
2003 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2005 /* XXX possible optimization point */
2007 cc
= tcg_temp_new_i64();
2008 tcg_gen_extu_i32_i64(cc
, cc_op
);
2009 tcg_gen_shri_i64(cc
, cc
, 1);
2011 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
2012 tcg_temp_free_i64(cc
);
2016 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
2018 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2022 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
2024 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2028 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
2030 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2031 return_low128(o
->out2
);
2035 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
2037 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2041 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
2043 int shift
= s
->insn
->data
& 0xff;
2044 int size
= s
->insn
->data
>> 8;
2045 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2048 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2049 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2050 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2052 /* Produce the CC from only the bits manipulated. */
2053 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2054 set_cc_nz_u64(s
, cc_dst
);
2058 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
2060 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2061 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
2062 tcg_gen_mov_i64(psw_addr
, o
->in2
);
2063 return EXIT_PC_UPDATED
;
2069 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
2071 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2072 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
2075 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
2077 int m1
= get_field(s
->fields
, m1
);
2078 bool is_imm
= have_field(s
->fields
, i2
);
2079 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2082 disas_jcc(s
, &c
, m1
);
2083 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2086 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
2088 int r1
= get_field(s
->fields
, r1
);
2089 bool is_imm
= have_field(s
->fields
, i2
);
2090 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2094 c
.cond
= TCG_COND_NE
;
2099 t
= tcg_temp_new_i64();
2100 tcg_gen_subi_i64(t
, regs
[r1
], 1);
2101 store_reg32_i64(r1
, t
);
2102 c
.u
.s32
.a
= tcg_temp_new_i32();
2103 c
.u
.s32
.b
= tcg_const_i32(0);
2104 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
2105 tcg_temp_free_i64(t
);
2107 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2110 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
2112 int r1
= get_field(s
->fields
, r1
);
2113 bool is_imm
= have_field(s
->fields
, i2
);
2114 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2117 c
.cond
= TCG_COND_NE
;
2122 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
2123 c
.u
.s64
.a
= regs
[r1
];
2124 c
.u
.s64
.b
= tcg_const_i64(0);
2126 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2129 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
2131 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2136 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
2138 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2143 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
2145 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2150 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
2152 int l
= get_field(s
->fields
, l1
);
2157 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2158 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2161 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2162 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2165 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2166 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2169 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2170 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2173 potential_page_fault(s
);
2174 vl
= tcg_const_i32(l
);
2175 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2176 tcg_temp_free_i32(vl
);
2180 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2184 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
2186 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2187 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2188 potential_page_fault(s
);
2189 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2190 tcg_temp_free_i32(r1
);
2191 tcg_temp_free_i32(r3
);
2196 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
2198 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2199 TCGv_i32 t1
= tcg_temp_new_i32();
2200 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2201 potential_page_fault(s
);
2202 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2204 tcg_temp_free_i32(t1
);
2205 tcg_temp_free_i32(m3
);
2209 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
2211 int r3
= get_field(s
->fields
, r3
);
2212 potential_page_fault(s
);
2213 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2218 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
2220 int r3
= get_field(s
->fields
, r3
);
2221 potential_page_fault(s
);
2222 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2227 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
2229 int r3
= get_field(s
->fields
, r3
);
2230 TCGv_i64 in3
= tcg_temp_new_i64();
2231 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
2232 potential_page_fault(s
);
2233 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
2234 tcg_temp_free_i64(in3
);
2239 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2241 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2242 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2243 potential_page_fault(s
);
2244 /* XXX rewrite in tcg */
2245 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2250 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2252 TCGv_i64 t1
= tcg_temp_new_i64();
2253 TCGv_i32 t2
= tcg_temp_new_i32();
2254 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
2255 gen_helper_cvd(t1
, t2
);
2256 tcg_temp_free_i32(t2
);
2257 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2258 tcg_temp_free_i64(t1
);
2262 #ifndef CONFIG_USER_ONLY
2263 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2267 check_privileged(s
);
2268 potential_page_fault(s
);
2270 /* We pretend the format is RX_a so that D2 is the field we want. */
2271 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2272 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2273 tcg_temp_free_i32(tmp
);
2278 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2280 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2281 return_low128(o
->out
);
2285 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2287 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2288 return_low128(o
->out
);
2292 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2294 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2295 return_low128(o
->out
);
2299 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2301 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2302 return_low128(o
->out
);
2306 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2308 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2312 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2314 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2318 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2320 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2321 return_low128(o
->out2
);
2325 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2327 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2331 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2333 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2334 tb->flags, (ab)use the tb->cs_base field as the address of
2335 the template in memory, and grab 8 bits of tb->flags/cflags for
2336 the contents of the register. We would then recognize all this
2337 in gen_intermediate_code_internal, generating code for exactly
2338 one instruction. This new TB then gets executed normally.
2340 On the other hand, this seems to be mostly used for modifying
2341 MVC inside of memcpy, which needs a helper call anyway. So
2342 perhaps this doesn't bear thinking about any further. */
2349 tmp
= tcg_const_i64(s
->next_pc
);
2350 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2351 tcg_temp_free_i64(tmp
);
2357 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2359 int m3
= get_field(s
->fields
, m3
);
2360 int pos
, len
, base
= s
->insn
->data
;
2361 TCGv_i64 tmp
= tcg_temp_new_i64();
2366 /* Effectively a 32-bit load. */
2367 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2374 /* Effectively a 16-bit load. */
2375 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2383 /* Effectively an 8-bit load. */
2384 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2389 pos
= base
+ ctz32(m3
) * 8;
2390 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2391 ccm
= ((1ull << len
) - 1) << pos
;
2395 /* This is going to be a sequence of loads and inserts. */
2396 pos
= base
+ 32 - 8;
2400 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2401 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2402 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2405 m3
= (m3
<< 1) & 0xf;
2411 tcg_gen_movi_i64(tmp
, ccm
);
2412 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2413 tcg_temp_free_i64(tmp
);
2417 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2419 int shift
= s
->insn
->data
& 0xff;
2420 int size
= s
->insn
->data
>> 8;
2421 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2425 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2427 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2431 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2433 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2437 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2439 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2443 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2445 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2449 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2451 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2452 return_low128(o
->out2
);
2456 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2458 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2459 return_low128(o
->out2
);
2463 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2465 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2469 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2471 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2475 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2477 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2481 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2483 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2487 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2489 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2493 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2495 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2499 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2501 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2505 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2507 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2511 #ifndef CONFIG_USER_ONLY
2512 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2514 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2515 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2516 check_privileged(s
);
2517 potential_page_fault(s
);
2518 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2519 tcg_temp_free_i32(r1
);
2520 tcg_temp_free_i32(r3
);
2524 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2526 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2527 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2528 check_privileged(s
);
2529 potential_page_fault(s
);
2530 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2531 tcg_temp_free_i32(r1
);
2532 tcg_temp_free_i32(r3
);
2535 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2537 check_privileged(s
);
2538 potential_page_fault(s
);
2539 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2544 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2548 check_privileged(s
);
2550 t1
= tcg_temp_new_i64();
2551 t2
= tcg_temp_new_i64();
2552 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2553 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2554 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2555 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2556 tcg_gen_shli_i64(t1
, t1
, 32);
2557 gen_helper_load_psw(cpu_env
, t1
, t2
);
2558 tcg_temp_free_i64(t1
);
2559 tcg_temp_free_i64(t2
);
2560 return EXIT_NORETURN
;
2564 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2566 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2567 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2568 potential_page_fault(s
);
2569 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2570 tcg_temp_free_i32(r1
);
2571 tcg_temp_free_i32(r3
);
2575 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2577 int r1
= get_field(s
->fields
, r1
);
2578 int r3
= get_field(s
->fields
, r3
);
2579 TCGv_i64 t
= tcg_temp_new_i64();
2580 TCGv_i64 t4
= tcg_const_i64(4);
2583 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2584 store_reg32_i64(r1
, t
);
2588 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2592 tcg_temp_free_i64(t
);
2593 tcg_temp_free_i64(t4
);
2597 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2599 int r1
= get_field(s
->fields
, r1
);
2600 int r3
= get_field(s
->fields
, r3
);
2601 TCGv_i64 t
= tcg_temp_new_i64();
2602 TCGv_i64 t4
= tcg_const_i64(4);
2605 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2606 store_reg32h_i64(r1
, t
);
2610 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2614 tcg_temp_free_i64(t
);
2615 tcg_temp_free_i64(t4
);
2619 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2621 int r1
= get_field(s
->fields
, r1
);
2622 int r3
= get_field(s
->fields
, r3
);
2623 TCGv_i64 t8
= tcg_const_i64(8);
2626 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2630 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2634 tcg_temp_free_i64(t8
);
2638 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2641 o
->g_out
= o
->g_in2
;
2642 TCGV_UNUSED_I64(o
->in2
);
2647 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2651 o
->g_out
= o
->g_in1
;
2652 o
->g_out2
= o
->g_in2
;
2653 TCGV_UNUSED_I64(o
->in1
);
2654 TCGV_UNUSED_I64(o
->in2
);
2655 o
->g_in1
= o
->g_in2
= false;
2659 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2661 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2662 potential_page_fault(s
);
2663 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2664 tcg_temp_free_i32(l
);
2668 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2670 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2671 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2672 potential_page_fault(s
);
2673 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2674 tcg_temp_free_i32(r1
);
2675 tcg_temp_free_i32(r2
);
2680 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2682 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2683 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2684 potential_page_fault(s
);
2685 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2686 tcg_temp_free_i32(r1
);
2687 tcg_temp_free_i32(r3
);
2692 #ifndef CONFIG_USER_ONLY
2693 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2695 int r1
= get_field(s
->fields
, l1
);
2696 check_privileged(s
);
2697 potential_page_fault(s
);
2698 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2703 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2705 int r1
= get_field(s
->fields
, l1
);
2706 check_privileged(s
);
2707 potential_page_fault(s
);
2708 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2714 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2716 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2720 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2722 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2723 return_low128(o
->out2
);
2727 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2729 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2733 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2735 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2739 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2741 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2745 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2747 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2748 return_low128(o
->out2
);
2752 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2754 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2755 return_low128(o
->out2
);
2759 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2761 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2762 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2763 tcg_temp_free_i64(r3
);
2767 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2769 int r3
= get_field(s
->fields
, r3
);
2770 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2774 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2776 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2777 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2778 tcg_temp_free_i64(r3
);
2782 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2784 int r3
= get_field(s
->fields
, r3
);
2785 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2789 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2791 gen_helper_nabs_i64(o
->out
, o
->in2
);
2795 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2797 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2801 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2803 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2807 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2809 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2810 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2814 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2816 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2817 potential_page_fault(s
);
2818 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2819 tcg_temp_free_i32(l
);
2824 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2826 tcg_gen_neg_i64(o
->out
, o
->in2
);
2830 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2832 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2836 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2838 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2842 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2844 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2845 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2849 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2851 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2852 potential_page_fault(s
);
2853 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2854 tcg_temp_free_i32(l
);
2859 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2861 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2865 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2867 int shift
= s
->insn
->data
& 0xff;
2868 int size
= s
->insn
->data
>> 8;
2869 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2872 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2873 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2875 /* Produce the CC from only the bits manipulated. */
2876 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2877 set_cc_nz_u64(s
, cc_dst
);
2881 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2883 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2887 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2889 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2893 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2895 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2899 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2901 TCGv_i32 t1
= tcg_temp_new_i32();
2902 TCGv_i32 t2
= tcg_temp_new_i32();
2903 TCGv_i32 to
= tcg_temp_new_i32();
2904 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2905 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2906 tcg_gen_rotl_i32(to
, t1
, t2
);
2907 tcg_gen_extu_i32_i64(o
->out
, to
);
2908 tcg_temp_free_i32(t1
);
2909 tcg_temp_free_i32(t2
);
2910 tcg_temp_free_i32(to
);
2914 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2916 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2920 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2922 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2926 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2928 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2932 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2934 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2935 return_low128(o
->out2
);
2939 #ifndef CONFIG_USER_ONLY
2940 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2942 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2943 check_privileged(s
);
2944 potential_page_fault(s
);
2945 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2946 tcg_temp_free_i32(r1
);
2951 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2953 uint64_t sign
= 1ull << s
->insn
->data
;
2954 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2955 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2956 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2957 /* The arithmetic left shift is curious in that it does not affect
2958 the sign bit. Copy that over from the source unchanged. */
2959 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2960 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2961 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2965 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2967 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2971 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2973 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2977 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2979 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2983 #ifndef CONFIG_USER_ONLY
2984 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2986 check_privileged(s
);
2987 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2991 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2993 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2994 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2995 check_privileged(s
);
2996 potential_page_fault(s
);
2997 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2998 tcg_temp_free_i32(r1
);
2999 tcg_temp_free_i32(r3
);
3003 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3005 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3006 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3007 check_privileged(s
);
3008 potential_page_fault(s
);
3009 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3010 tcg_temp_free_i32(r1
);
3011 tcg_temp_free_i32(r3
);
3015 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3017 uint64_t i2
= get_field(s
->fields
, i2
);
3020 check_privileged(s
);
3022 /* It is important to do what the instruction name says: STORE THEN.
3023 If we let the output hook perform the store then if we fault and
3024 restart, we'll have the wrong SYSTEM MASK in place. */
3025 t
= tcg_temp_new_i64();
3026 tcg_gen_shri_i64(t
, psw_mask
, 56);
3027 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3028 tcg_temp_free_i64(t
);
3030 if (s
->fields
->op
== 0xac) {
3031 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3032 (i2
<< 56) | 0x00ffffffffffffffull
);
3034 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3040 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3042 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3046 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3048 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3052 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3054 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3058 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3060 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3064 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3066 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3067 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3068 potential_page_fault(s
);
3069 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3070 tcg_temp_free_i32(r1
);
3071 tcg_temp_free_i32(r3
);
3075 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3077 int m3
= get_field(s
->fields
, m3
);
3078 int pos
, base
= s
->insn
->data
;
3079 TCGv_i64 tmp
= tcg_temp_new_i64();
3081 pos
= base
+ ctz32(m3
) * 8;
3084 /* Effectively a 32-bit store. */
3085 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3086 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3092 /* Effectively a 16-bit store. */
3093 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3094 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3101 /* Effectively an 8-bit store. */
3102 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3103 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3107 /* This is going to be a sequence of shifts and stores. */
3108 pos
= base
+ 32 - 8;
3111 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3112 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3113 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3115 m3
= (m3
<< 1) & 0xf;
3120 tcg_temp_free_i64(tmp
);
3124 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3126 int r1
= get_field(s
->fields
, r1
);
3127 int r3
= get_field(s
->fields
, r3
);
3128 int size
= s
->insn
->data
;
3129 TCGv_i64 tsize
= tcg_const_i64(size
);
3133 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3135 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3140 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3144 tcg_temp_free_i64(tsize
);
3148 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3150 int r1
= get_field(s
->fields
, r1
);
3151 int r3
= get_field(s
->fields
, r3
);
3152 TCGv_i64 t
= tcg_temp_new_i64();
3153 TCGv_i64 t4
= tcg_const_i64(4);
3154 TCGv_i64 t32
= tcg_const_i64(32);
3157 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3158 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3162 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3166 tcg_temp_free_i64(t
);
3167 tcg_temp_free_i64(t4
);
3168 tcg_temp_free_i64(t32
);
3172 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3174 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3178 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3183 tcg_gen_not_i64(o
->in2
, o
->in2
);
3184 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3186 /* XXX possible optimization point */
3188 cc
= tcg_temp_new_i64();
3189 tcg_gen_extu_i32_i64(cc
, cc_op
);
3190 tcg_gen_shri_i64(cc
, cc
, 1);
3191 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3192 tcg_temp_free_i64(cc
);
3196 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3203 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3204 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3205 tcg_temp_free_i32(t
);
3207 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3208 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3209 tcg_temp_free_i32(t
);
3211 gen_exception(EXCP_SVC
);
3212 return EXIT_NORETURN
;
3215 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3217 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3222 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3224 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3229 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3231 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3236 #ifndef CONFIG_USER_ONLY
3237 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3239 potential_page_fault(s
);
3240 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3246 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3248 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3249 potential_page_fault(s
);
3250 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3251 tcg_temp_free_i32(l
);
3256 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3258 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3259 potential_page_fault(s
);
3260 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3261 tcg_temp_free_i32(l
);
3265 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3267 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3268 potential_page_fault(s
);
3269 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3270 tcg_temp_free_i32(l
);
3275 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3277 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3281 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3283 int shift
= s
->insn
->data
& 0xff;
3284 int size
= s
->insn
->data
>> 8;
3285 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3288 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3289 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3291 /* Produce the CC from only the bits manipulated. */
3292 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3293 set_cc_nz_u64(s
, cc_dst
);
3297 /* ====================================================================== */
3298 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3299 the original inputs), update the various cc data structures in order to
3300 be able to compute the new condition code. */
3302 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3304 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3307 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3309 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3312 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3314 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3317 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3319 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3322 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3324 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3327 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3329 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3332 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3334 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3337 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3339 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3342 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3344 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3347 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3349 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3352 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3354 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3357 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3359 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3362 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3364 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3367 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3369 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3372 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3374 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3377 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3379 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3382 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3384 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3387 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3389 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3392 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3394 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3397 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3399 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3400 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3403 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3405 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3408 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3410 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3413 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3415 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3418 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3420 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3423 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3425 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3428 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3430 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3433 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3435 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3438 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3440 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3443 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3445 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3448 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3450 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3453 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3455 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3458 /* ====================================================================== */
3459 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3460 with the TCG register to which we will write. Used in combination with
3461 the "wout" generators, in some cases we need a new temporary, and in
3462 some cases we can write to a TCG global. */
3464 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3466 o
->out
= tcg_temp_new_i64();
3469 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3471 o
->out
= tcg_temp_new_i64();
3472 o
->out2
= tcg_temp_new_i64();
3475 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3477 o
->out
= regs
[get_field(f
, r1
)];
3481 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3483 /* ??? Specification exception: r1 must be even. */
3484 int r1
= get_field(f
, r1
);
3486 o
->out2
= regs
[(r1
+ 1) & 15];
3487 o
->g_out
= o
->g_out2
= true;
3490 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3492 o
->out
= fregs
[get_field(f
, r1
)];
3496 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3498 /* ??? Specification exception: r1 must be < 14. */
3499 int r1
= get_field(f
, r1
);
3501 o
->out2
= fregs
[(r1
+ 2) & 15];
3502 o
->g_out
= o
->g_out2
= true;
3505 /* ====================================================================== */
3506 /* The "Write OUTput" generators. These generally perform some non-trivial
3507 copy of data to TCG globals, or to main memory. The trivial cases are
3508 generally handled by having a "prep" generator install the TCG global
3509 as the destination of the operation. */
3511 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3513 store_reg(get_field(f
, r1
), o
->out
);
3516 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3518 int r1
= get_field(f
, r1
);
3519 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3522 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3524 int r1
= get_field(f
, r1
);
3525 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3528 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3530 store_reg32_i64(get_field(f
, r1
), o
->out
);
3533 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3535 /* ??? Specification exception: r1 must be even. */
3536 int r1
= get_field(f
, r1
);
3537 store_reg32_i64(r1
, o
->out
);
3538 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3541 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3543 /* ??? Specification exception: r1 must be even. */
3544 int r1
= get_field(f
, r1
);
3545 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3546 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3547 store_reg32_i64(r1
, o
->out
);
3550 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3552 store_freg32_i64(get_field(f
, r1
), o
->out
);
3555 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3557 store_freg(get_field(f
, r1
), o
->out
);
3560 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3562 /* ??? Specification exception: r1 must be < 14. */
3563 int f1
= get_field(s
->fields
, r1
);
3564 store_freg(f1
, o
->out
);
3565 store_freg((f1
+ 2) & 15, o
->out2
);
3568 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3570 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3571 store_reg32_i64(get_field(f
, r1
), o
->out
);
3575 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3577 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3578 store_freg32_i64(get_field(f
, r1
), o
->out
);
3582 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3584 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3587 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3589 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3592 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3594 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3597 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3599 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3602 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3604 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3607 /* ====================================================================== */
3608 /* The "INput 1" generators. These load the first operand to an insn. */
3610 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3612 o
->in1
= load_reg(get_field(f
, r1
));
3615 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3617 o
->in1
= regs
[get_field(f
, r1
)];
3621 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3623 o
->in1
= tcg_temp_new_i64();
3624 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3627 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3629 o
->in1
= tcg_temp_new_i64();
3630 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3633 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3635 o
->in1
= tcg_temp_new_i64();
3636 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3639 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3641 /* ??? Specification exception: r1 must be even. */
3642 int r1
= get_field(f
, r1
);
3643 o
->in1
= load_reg((r1
+ 1) & 15);
3646 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3648 /* ??? Specification exception: r1 must be even. */
3649 int r1
= get_field(f
, r1
);
3650 o
->in1
= tcg_temp_new_i64();
3651 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3654 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3656 /* ??? Specification exception: r1 must be even. */
3657 int r1
= get_field(f
, r1
);
3658 o
->in1
= tcg_temp_new_i64();
3659 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3662 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3664 /* ??? Specification exception: r1 must be even. */
3665 int r1
= get_field(f
, r1
);
3666 o
->in1
= tcg_temp_new_i64();
3667 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3670 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3672 o
->in1
= load_reg(get_field(f
, r2
));
3675 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3677 o
->in1
= load_reg(get_field(f
, r3
));
3680 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3682 o
->in1
= regs
[get_field(f
, r3
)];
3686 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3688 o
->in1
= tcg_temp_new_i64();
3689 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3692 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3694 o
->in1
= tcg_temp_new_i64();
3695 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3698 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3700 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3703 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3705 o
->in1
= fregs
[get_field(f
, r1
)];
3709 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3711 /* ??? Specification exception: r1 must be < 14. */
3712 int r1
= get_field(f
, r1
);
3714 o
->out2
= fregs
[(r1
+ 2) & 15];
3715 o
->g_out
= o
->g_out2
= true;
3718 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3720 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3723 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3725 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3726 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3729 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3732 o
->in1
= tcg_temp_new_i64();
3733 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3736 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3739 o
->in1
= tcg_temp_new_i64();
3740 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3743 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3746 o
->in1
= tcg_temp_new_i64();
3747 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3750 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3753 o
->in1
= tcg_temp_new_i64();
3754 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3757 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3760 o
->in1
= tcg_temp_new_i64();
3761 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3764 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3767 o
->in1
= tcg_temp_new_i64();
3768 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3771 /* ====================================================================== */
3772 /* The "INput 2" generators. These load the second operand to an insn. */
3774 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3776 o
->in2
= regs
[get_field(f
, r1
)];
3780 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3782 o
->in2
= tcg_temp_new_i64();
3783 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3786 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3788 o
->in2
= tcg_temp_new_i64();
3789 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3792 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3794 o
->in2
= load_reg(get_field(f
, r2
));
3797 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3799 o
->in2
= regs
[get_field(f
, r2
)];
3803 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3805 int r2
= get_field(f
, r2
);
3807 o
->in2
= load_reg(r2
);
3811 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3813 o
->in2
= tcg_temp_new_i64();
3814 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3817 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3819 o
->in2
= tcg_temp_new_i64();
3820 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3823 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3825 o
->in2
= tcg_temp_new_i64();
3826 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3829 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3831 o
->in2
= tcg_temp_new_i64();
3832 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3835 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3837 o
->in2
= load_reg(get_field(f
, r3
));
3840 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3842 o
->in2
= tcg_temp_new_i64();
3843 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3846 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3848 o
->in2
= tcg_temp_new_i64();
3849 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3852 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3854 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3857 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3859 o
->in2
= fregs
[get_field(f
, r2
)];
3863 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3865 /* ??? Specification exception: r1 must be < 14. */
3866 int r2
= get_field(f
, r2
);
3868 o
->in2
= fregs
[(r2
+ 2) & 15];
3869 o
->g_in1
= o
->g_in2
= true;
3872 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3874 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3875 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3878 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3880 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3883 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3885 help_l2_shift(s
, f
, o
, 31);
3888 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3890 help_l2_shift(s
, f
, o
, 63);
3893 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3896 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3899 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3902 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3905 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3908 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3911 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3914 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3917 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3920 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3923 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3926 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3929 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3932 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3935 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3938 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3941 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3944 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3947 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3950 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3953 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3955 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3958 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3960 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3963 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3965 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3968 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3970 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3973 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3975 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3976 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3979 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3981 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3982 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3985 /* ====================================================================== */
3987 /* Find opc within the table of insns. This is formulated as a switch
3988 statement so that (1) we get compile-time notice of cut-paste errors
3989 for duplicated opcodes, and (2) the compiler generates the binary
3990 search tree, rather than us having to post-process the table. */
3992 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3993 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3995 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3997 enum DisasInsnEnum
{
3998 #include "insn-data.def"
4002 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4007 .help_in1 = in1_##I1, \
4008 .help_in2 = in2_##I2, \
4009 .help_prep = prep_##P, \
4010 .help_wout = wout_##W, \
4011 .help_cout = cout_##CC, \
4012 .help_op = op_##OP, \
4016 /* Allow 0 to be used for NULL in the table below. */
4024 static const DisasInsn insn_info
[] = {
4025 #include "insn-data.def"
4029 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4030 case OPC: return &insn_info[insn_ ## NM];
4032 static const DisasInsn
*lookup_opc(uint16_t opc
)
4035 #include "insn-data.def"
4044 /* Extract a field from the insn. The INSN should be left-aligned in
4045 the uint64_t so that we can more easily utilize the big-bit-endian
4046 definitions we extract from the Principals of Operation. */
4048 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4056 /* Zero extract the field from the insn. */
4057 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4059 /* Sign-extend, or un-swap the field as necessary. */
4061 case 0: /* unsigned */
4063 case 1: /* signed */
4064 assert(f
->size
<= 32);
4065 m
= 1u << (f
->size
- 1);
4068 case 2: /* dl+dh split, signed 20 bit. */
4069 r
= ((int8_t)r
<< 12) | (r
>> 8);
4075 /* Validate that the "compressed" encoding we selected above is valid.
4076 I.e. we havn't make two different original fields overlap. */
4077 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4078 o
->presentC
|= 1 << f
->indexC
;
4079 o
->presentO
|= 1 << f
->indexO
;
4081 o
->c
[f
->indexC
] = r
;
4084 /* Lookup the insn at the current PC, extracting the operands into O and
4085 returning the info struct for the insn. Returns NULL for invalid insn. */
4087 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4090 uint64_t insn
, pc
= s
->pc
;
4092 const DisasInsn
*info
;
4094 insn
= ld_code2(env
, pc
);
4095 op
= (insn
>> 8) & 0xff;
4096 ilen
= get_ilen(op
);
4097 s
->next_pc
= s
->pc
+ ilen
;
4104 insn
= ld_code4(env
, pc
) << 32;
4107 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4113 /* We can't actually determine the insn format until we've looked up
4114 the full insn opcode. Which we can't do without locating the
4115 secondary opcode. Assume by default that OP2 is at bit 40; for
4116 those smaller insns that don't actually have a secondary opcode
4117 this will correctly result in OP2 = 0. */
4123 case 0xb2: /* S, RRF, RRE */
4124 case 0xb3: /* RRE, RRD, RRF */
4125 case 0xb9: /* RRE, RRF */
4126 case 0xe5: /* SSE, SIL */
4127 op2
= (insn
<< 8) >> 56;
4131 case 0xc0: /* RIL */
4132 case 0xc2: /* RIL */
4133 case 0xc4: /* RIL */
4134 case 0xc6: /* RIL */
4135 case 0xc8: /* SSF */
4136 case 0xcc: /* RIL */
4137 op2
= (insn
<< 12) >> 60;
4139 case 0xd0 ... 0xdf: /* SS */
4145 case 0xee ... 0xf3: /* SS */
4146 case 0xf8 ... 0xfd: /* SS */
4150 op2
= (insn
<< 40) >> 56;
4154 memset(f
, 0, sizeof(*f
));
4158 /* Lookup the instruction. */
4159 info
= lookup_opc(op
<< 8 | op2
);
4161 /* If we found it, extract the operands. */
4163 DisasFormat fmt
= info
->fmt
;
4166 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4167 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4173 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4175 const DisasInsn
*insn
;
4176 ExitStatus ret
= NO_EXIT
;
4180 insn
= extract_insn(env
, s
, &f
);
4182 /* If not found, try the old interpreter. This includes ILLOPC. */
4184 disas_s390_insn(env
, s
);
4185 switch (s
->is_jmp
) {
4193 ret
= EXIT_PC_UPDATED
;
4196 ret
= EXIT_NORETURN
;
4206 /* Set up the strutures we use to communicate with the helpers. */
4209 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4210 TCGV_UNUSED_I64(o
.out
);
4211 TCGV_UNUSED_I64(o
.out2
);
4212 TCGV_UNUSED_I64(o
.in1
);
4213 TCGV_UNUSED_I64(o
.in2
);
4214 TCGV_UNUSED_I64(o
.addr1
);
4216 /* Implement the instruction. */
4217 if (insn
->help_in1
) {
4218 insn
->help_in1(s
, &f
, &o
);
4220 if (insn
->help_in2
) {
4221 insn
->help_in2(s
, &f
, &o
);
4223 if (insn
->help_prep
) {
4224 insn
->help_prep(s
, &f
, &o
);
4226 if (insn
->help_op
) {
4227 ret
= insn
->help_op(s
, &o
);
4229 if (insn
->help_wout
) {
4230 insn
->help_wout(s
, &f
, &o
);
4232 if (insn
->help_cout
) {
4233 insn
->help_cout(s
, &o
);
4236 /* Free any temporaries created by the helpers. */
4237 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4238 tcg_temp_free_i64(o
.out
);
4240 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4241 tcg_temp_free_i64(o
.out2
);
4243 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4244 tcg_temp_free_i64(o
.in1
);
4246 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4247 tcg_temp_free_i64(o
.in2
);
4249 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4250 tcg_temp_free_i64(o
.addr1
);
4253 /* Advance to the next instruction. */
4258 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4259 TranslationBlock
*tb
,
4263 target_ulong pc_start
;
4264 uint64_t next_page_start
;
4265 uint16_t *gen_opc_end
;
4267 int num_insns
, max_insns
;
4275 if (!(tb
->flags
& FLAG_MASK_64
)) {
4276 pc_start
&= 0x7fffffff;
4281 dc
.cc_op
= CC_OP_DYNAMIC
;
4282 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4283 dc
.is_jmp
= DISAS_NEXT
;
4285 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4287 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4290 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4291 if (max_insns
== 0) {
4292 max_insns
= CF_COUNT_MASK
;
4299 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4303 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4306 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4307 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4308 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4309 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4311 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4315 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4316 tcg_gen_debug_insn_start(dc
.pc
);
4320 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4321 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4322 if (bp
->pc
== dc
.pc
) {
4323 status
= EXIT_PC_STALE
;
4329 if (status
== NO_EXIT
) {
4330 status
= translate_one(env
, &dc
);
4333 /* If we reach a page boundary, are single stepping,
4334 or exhaust instruction count, stop generation. */
4335 if (status
== NO_EXIT
4336 && (dc
.pc
>= next_page_start
4337 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4338 || num_insns
>= max_insns
4340 || env
->singlestep_enabled
)) {
4341 status
= EXIT_PC_STALE
;
4343 } while (status
== NO_EXIT
);
4345 if (tb
->cflags
& CF_LAST_IO
) {
4354 update_psw_addr(&dc
);
4356 case EXIT_PC_UPDATED
:
4357 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4358 gen_op_calc_cc(&dc
);
4360 /* Next TB starts off with CC_OP_DYNAMIC,
4361 so make sure the cc op type is in env */
4362 gen_op_set_cc_op(&dc
);
4365 gen_exception(EXCP_DEBUG
);
4367 /* Generate the return instruction */
4375 gen_icount_end(tb
, num_insns
);
4376 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4378 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4381 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4384 tb
->size
= dc
.pc
- pc_start
;
4385 tb
->icount
= num_insns
;
4388 #if defined(S390X_DEBUG_DISAS)
4389 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4390 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4391 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4397 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4399 gen_intermediate_code_internal(env
, tb
, 0);
4402 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4404 gen_intermediate_code_internal(env
, tb
, 1);
4407 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4410 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4411 cc_op
= gen_opc_cc_op
[pc_pos
];
4412 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {