4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env
;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 struct TranslationBlock
*tb
;
58 const DisasInsn
*insn
;
64 bool singlestep_enabled
;
67 /* Information carried about a condition to be evaluated. */
74 struct { TCGv_i64 a
, b
; } s64
;
75 struct { TCGv_i32 a
, b
; } s32
;
81 #ifdef DEBUG_INLINE_BRANCHES
82 static uint64_t inline_branch_hit
[CC_OP_MAX
];
83 static uint64_t inline_branch_miss
[CC_OP_MAX
];
86 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
88 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
89 if (s
->tb
->flags
& FLAG_MASK_32
) {
90 return pc
| 0x80000000;
96 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
99 S390CPU
*cpu
= S390_CPU(cs
);
100 CPUS390XState
*env
= &cpu
->env
;
103 if (env
->cc_op
> 3) {
104 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
105 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
107 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
108 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
111 for (i
= 0; i
< 16; i
++) {
112 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
114 cpu_fprintf(f
, "\n");
120 for (i
= 0; i
< 16; i
++) {
121 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
123 cpu_fprintf(f
, "\n");
129 for (i
= 0; i
< 32; i
++) {
130 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
131 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
132 cpu_fprintf(f
, (i
% 2) ? "\n" : " ");
135 #ifndef CONFIG_USER_ONLY
136 for (i
= 0; i
< 16; i
++) {
137 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
139 cpu_fprintf(f
, "\n");
146 #ifdef DEBUG_INLINE_BRANCHES
147 for (i
= 0; i
< CC_OP_MAX
; i
++) {
148 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
149 inline_branch_miss
[i
], inline_branch_hit
[i
]);
153 cpu_fprintf(f
, "\n");
156 static TCGv_i64 psw_addr
;
157 static TCGv_i64 psw_mask
;
158 static TCGv_i64 gbea
;
160 static TCGv_i32 cc_op
;
161 static TCGv_i64 cc_src
;
162 static TCGv_i64 cc_dst
;
163 static TCGv_i64 cc_vr
;
165 static char cpu_reg_names
[32][4];
166 static TCGv_i64 regs
[16];
167 static TCGv_i64 fregs
[16];
169 void s390x_translate_init(void)
173 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
174 tcg_ctx
.tcg_env
= cpu_env
;
175 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
176 offsetof(CPUS390XState
, psw
.addr
),
178 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
179 offsetof(CPUS390XState
, psw
.mask
),
181 gbea
= tcg_global_mem_new_i64(cpu_env
,
182 offsetof(CPUS390XState
, gbea
),
185 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
187 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
189 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
191 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
194 for (i
= 0; i
< 16; i
++) {
195 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
196 regs
[i
] = tcg_global_mem_new(cpu_env
,
197 offsetof(CPUS390XState
, regs
[i
]),
201 for (i
= 0; i
< 16; i
++) {
202 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
203 fregs
[i
] = tcg_global_mem_new(cpu_env
,
204 offsetof(CPUS390XState
, vregs
[i
][0].d
),
205 cpu_reg_names
[i
+ 16]);
209 static TCGv_i64
load_reg(int reg
)
211 TCGv_i64 r
= tcg_temp_new_i64();
212 tcg_gen_mov_i64(r
, regs
[reg
]);
216 static TCGv_i64
load_freg32_i64(int reg
)
218 TCGv_i64 r
= tcg_temp_new_i64();
219 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
223 static void store_reg(int reg
, TCGv_i64 v
)
225 tcg_gen_mov_i64(regs
[reg
], v
);
228 static void store_freg(int reg
, TCGv_i64 v
)
230 tcg_gen_mov_i64(fregs
[reg
], v
);
233 static void store_reg32_i64(int reg
, TCGv_i64 v
)
235 /* 32 bit register writes keep the upper half */
236 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
239 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
241 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
244 static void store_freg32_i64(int reg
, TCGv_i64 v
)
246 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
249 static void return_low128(TCGv_i64 dest
)
251 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
254 static void update_psw_addr(DisasContext
*s
)
257 tcg_gen_movi_i64(psw_addr
, s
->pc
);
260 static void per_branch(DisasContext
*s
, bool to_next
)
262 #ifndef CONFIG_USER_ONLY
263 tcg_gen_movi_i64(gbea
, s
->pc
);
265 if (s
->tb
->flags
& FLAG_MASK_PER
) {
266 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
267 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
269 tcg_temp_free_i64(next_pc
);
275 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
276 TCGv_i64 arg1
, TCGv_i64 arg2
)
278 #ifndef CONFIG_USER_ONLY
279 if (s
->tb
->flags
& FLAG_MASK_PER
) {
280 TCGLabel
*lab
= gen_new_label();
281 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
283 tcg_gen_movi_i64(gbea
, s
->pc
);
284 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
288 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
289 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
290 tcg_temp_free_i64(pc
);
295 static void per_breaking_event(DisasContext
*s
)
297 tcg_gen_movi_i64(gbea
, s
->pc
);
300 static void update_cc_op(DisasContext
*s
)
302 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
303 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
307 static void potential_page_fault(DisasContext
*s
)
313 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
315 return (uint64_t)cpu_lduw_code(env
, pc
);
318 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
320 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
323 static int get_mem_index(DisasContext
*s
)
325 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
326 case PSW_ASC_PRIMARY
>> 32:
328 case PSW_ASC_SECONDARY
>> 32:
330 case PSW_ASC_HOME
>> 32:
338 static void gen_exception(int excp
)
340 TCGv_i32 tmp
= tcg_const_i32(excp
);
341 gen_helper_exception(cpu_env
, tmp
);
342 tcg_temp_free_i32(tmp
);
345 static void gen_program_exception(DisasContext
*s
, int code
)
349 /* Remember what pgm exeption this was. */
350 tmp
= tcg_const_i32(code
);
351 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
352 tcg_temp_free_i32(tmp
);
354 tmp
= tcg_const_i32(s
->ilen
);
355 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
356 tcg_temp_free_i32(tmp
);
358 /* Advance past instruction. */
365 /* Trigger exception. */
366 gen_exception(EXCP_PGM
);
369 static inline void gen_illegal_opcode(DisasContext
*s
)
371 gen_program_exception(s
, PGM_OPERATION
);
374 static inline void gen_trap(DisasContext
*s
)
378 /* Set DXC to 0xff. */
379 t
= tcg_temp_new_i32();
380 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
381 tcg_gen_ori_i32(t
, t
, 0xff00);
382 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
383 tcg_temp_free_i32(t
);
385 gen_program_exception(s
, PGM_DATA
);
388 #ifndef CONFIG_USER_ONLY
389 static void check_privileged(DisasContext
*s
)
391 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
392 gen_program_exception(s
, PGM_PRIVILEGED
);
397 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
399 TCGv_i64 tmp
= tcg_temp_new_i64();
400 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
402 /* Note that d2 is limited to 20 bits, signed. If we crop negative
403 displacements early we create larger immedate addends. */
405 /* Note that addi optimizes the imm==0 case. */
407 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
408 tcg_gen_addi_i64(tmp
, tmp
, d2
);
410 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
412 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
418 tcg_gen_movi_i64(tmp
, d2
);
421 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
427 static inline bool live_cc_data(DisasContext
*s
)
429 return (s
->cc_op
!= CC_OP_DYNAMIC
430 && s
->cc_op
!= CC_OP_STATIC
434 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
436 if (live_cc_data(s
)) {
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_discard_i64(cc_dst
);
439 tcg_gen_discard_i64(cc_vr
);
441 s
->cc_op
= CC_OP_CONST0
+ val
;
444 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
446 if (live_cc_data(s
)) {
447 tcg_gen_discard_i64(cc_src
);
448 tcg_gen_discard_i64(cc_vr
);
450 tcg_gen_mov_i64(cc_dst
, dst
);
454 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
457 if (live_cc_data(s
)) {
458 tcg_gen_discard_i64(cc_vr
);
460 tcg_gen_mov_i64(cc_src
, src
);
461 tcg_gen_mov_i64(cc_dst
, dst
);
465 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
466 TCGv_i64 dst
, TCGv_i64 vr
)
468 tcg_gen_mov_i64(cc_src
, src
);
469 tcg_gen_mov_i64(cc_dst
, dst
);
470 tcg_gen_mov_i64(cc_vr
, vr
);
474 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
476 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
479 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
481 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
484 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
486 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
489 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
491 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
494 /* CC value is in env->cc_op */
495 static void set_cc_static(DisasContext
*s
)
497 if (live_cc_data(s
)) {
498 tcg_gen_discard_i64(cc_src
);
499 tcg_gen_discard_i64(cc_dst
);
500 tcg_gen_discard_i64(cc_vr
);
502 s
->cc_op
= CC_OP_STATIC
;
505 /* calculates cc into cc_op */
506 static void gen_op_calc_cc(DisasContext
*s
)
508 TCGv_i32 local_cc_op
;
511 TCGV_UNUSED_I32(local_cc_op
);
512 TCGV_UNUSED_I64(dummy
);
515 dummy
= tcg_const_i64(0);
529 local_cc_op
= tcg_const_i32(s
->cc_op
);
545 /* s->cc_op is the cc value */
546 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
549 /* env->cc_op already is the cc value */
564 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
569 case CC_OP_LTUGTU_32
:
570 case CC_OP_LTUGTU_64
:
577 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
592 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
595 /* unknown operation - assume 3 arguments and cc_op in env */
596 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
602 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
603 tcg_temp_free_i32(local_cc_op
);
605 if (!TCGV_IS_UNUSED_I64(dummy
)) {
606 tcg_temp_free_i64(dummy
);
609 /* We now have cc in cc_op as constant */
613 static bool use_exit_tb(DisasContext
*s
)
615 return (s
->singlestep_enabled
||
616 (s
->tb
->cflags
& CF_LAST_IO
) ||
617 (s
->tb
->flags
& FLAG_MASK_PER
));
620 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
622 if (unlikely(use_exit_tb(s
))) {
625 #ifndef CONFIG_USER_ONLY
626 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
627 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
633 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_miss
[cc_op
]++;
640 static void account_inline_branch(DisasContext
*s
, int cc_op
)
642 #ifdef DEBUG_INLINE_BRANCHES
643 inline_branch_hit
[cc_op
]++;
647 /* Table of mask values to comparison codes, given a comparison as input.
648 For such, CC=3 should not be possible. */
649 static const TCGCond ltgt_cond
[16] = {
650 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
651 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
652 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
653 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
654 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
655 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
656 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
657 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
660 /* Table of mask values to comparison codes, given a logic op as input.
661 For such, only CC=0 and CC=1 should be possible. */
662 static const TCGCond nz_cond
[16] = {
663 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
664 TCG_COND_NEVER
, TCG_COND_NEVER
,
665 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
666 TCG_COND_NE
, TCG_COND_NE
,
667 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
668 TCG_COND_EQ
, TCG_COND_EQ
,
669 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
670 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
673 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
674 details required to generate a TCG comparison. */
675 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
678 enum cc_op old_cc_op
= s
->cc_op
;
680 if (mask
== 15 || mask
== 0) {
681 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
684 c
->g1
= c
->g2
= true;
689 /* Find the TCG condition for the mask + cc op. */
695 cond
= ltgt_cond
[mask
];
696 if (cond
== TCG_COND_NEVER
) {
699 account_inline_branch(s
, old_cc_op
);
702 case CC_OP_LTUGTU_32
:
703 case CC_OP_LTUGTU_64
:
704 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
705 if (cond
== TCG_COND_NEVER
) {
708 account_inline_branch(s
, old_cc_op
);
712 cond
= nz_cond
[mask
];
713 if (cond
== TCG_COND_NEVER
) {
716 account_inline_branch(s
, old_cc_op
);
731 account_inline_branch(s
, old_cc_op
);
746 account_inline_branch(s
, old_cc_op
);
750 switch (mask
& 0xa) {
751 case 8: /* src == 0 -> no one bit found */
754 case 2: /* src != 0 -> one bit found */
760 account_inline_branch(s
, old_cc_op
);
766 case 8 | 2: /* vr == 0 */
769 case 4 | 1: /* vr != 0 */
772 case 8 | 4: /* no carry -> vr >= src */
775 case 2 | 1: /* carry -> vr < src */
781 account_inline_branch(s
, old_cc_op
);
786 /* Note that CC=0 is impossible; treat it as dont-care. */
788 case 2: /* zero -> op1 == op2 */
791 case 4 | 1: /* !zero -> op1 != op2 */
794 case 4: /* borrow (!carry) -> op1 < op2 */
797 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
803 account_inline_branch(s
, old_cc_op
);
808 /* Calculate cc value. */
813 /* Jump based on CC. We'll load up the real cond below;
814 the assignment here merely avoids a compiler warning. */
815 account_noninline_branch(s
, old_cc_op
);
816 old_cc_op
= CC_OP_STATIC
;
817 cond
= TCG_COND_NEVER
;
821 /* Load up the arguments of the comparison. */
823 c
->g1
= c
->g2
= false;
827 c
->u
.s32
.a
= tcg_temp_new_i32();
828 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
829 c
->u
.s32
.b
= tcg_const_i32(0);
832 case CC_OP_LTUGTU_32
:
835 c
->u
.s32
.a
= tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
837 c
->u
.s32
.b
= tcg_temp_new_i32();
838 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
845 c
->u
.s64
.b
= tcg_const_i64(0);
849 case CC_OP_LTUGTU_64
:
853 c
->g1
= c
->g2
= true;
859 c
->u
.s64
.a
= tcg_temp_new_i64();
860 c
->u
.s64
.b
= tcg_const_i64(0);
861 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
866 c
->u
.s32
.a
= tcg_temp_new_i32();
867 c
->u
.s32
.b
= tcg_temp_new_i32();
868 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
869 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
870 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
872 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
879 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
880 c
->u
.s64
.b
= tcg_const_i64(0);
892 case 0x8 | 0x4 | 0x2: /* cc != 3 */
894 c
->u
.s32
.b
= tcg_const_i32(3);
896 case 0x8 | 0x4 | 0x1: /* cc != 2 */
898 c
->u
.s32
.b
= tcg_const_i32(2);
900 case 0x8 | 0x2 | 0x1: /* cc != 1 */
902 c
->u
.s32
.b
= tcg_const_i32(1);
904 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
907 c
->u
.s32
.a
= tcg_temp_new_i32();
908 c
->u
.s32
.b
= tcg_const_i32(0);
909 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
911 case 0x8 | 0x4: /* cc < 2 */
913 c
->u
.s32
.b
= tcg_const_i32(2);
915 case 0x8: /* cc == 0 */
917 c
->u
.s32
.b
= tcg_const_i32(0);
919 case 0x4 | 0x2 | 0x1: /* cc != 0 */
921 c
->u
.s32
.b
= tcg_const_i32(0);
923 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
926 c
->u
.s32
.a
= tcg_temp_new_i32();
927 c
->u
.s32
.b
= tcg_const_i32(0);
928 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
930 case 0x4: /* cc == 1 */
932 c
->u
.s32
.b
= tcg_const_i32(1);
934 case 0x2 | 0x1: /* cc > 1 */
936 c
->u
.s32
.b
= tcg_const_i32(1);
938 case 0x2: /* cc == 2 */
940 c
->u
.s32
.b
= tcg_const_i32(2);
942 case 0x1: /* cc == 3 */
944 c
->u
.s32
.b
= tcg_const_i32(3);
947 /* CC is masked by something else: (8 >> cc) & mask. */
950 c
->u
.s32
.a
= tcg_const_i32(8);
951 c
->u
.s32
.b
= tcg_const_i32(0);
952 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
953 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
964 static void free_compare(DisasCompare
*c
)
968 tcg_temp_free_i64(c
->u
.s64
.a
);
970 tcg_temp_free_i32(c
->u
.s32
.a
);
975 tcg_temp_free_i64(c
->u
.s64
.b
);
977 tcg_temp_free_i32(c
->u
.s32
.b
);
982 /* ====================================================================== */
983 /* Define the insn format enumeration. */
984 #define F0(N) FMT_##N,
985 #define F1(N, X1) F0(N)
986 #define F2(N, X1, X2) F0(N)
987 #define F3(N, X1, X2, X3) F0(N)
988 #define F4(N, X1, X2, X3, X4) F0(N)
989 #define F5(N, X1, X2, X3, X4, X5) F0(N)
992 #include "insn-format.def"
1002 /* Define a structure to hold the decoded fields. We'll store each inside
1003 an array indexed by an enum. In order to conserve memory, we'll arrange
1004 for fields that do not exist at the same time to overlap, thus the "C"
1005 for compact. For checking purposes there is an "O" for original index
1006 as well that will be applied to availability bitmaps. */
1008 enum DisasFieldIndexO
{
1031 enum DisasFieldIndexC
{
1062 struct DisasFields
{
1066 unsigned presentC
:16;
1067 unsigned int presentO
;
1071 /* This is the way fields are to be accessed out of DisasFields. */
1072 #define have_field(S, F) have_field1((S), FLD_O_##F)
1073 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1075 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1077 return (f
->presentO
>> c
) & 1;
1080 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1081 enum DisasFieldIndexC c
)
1083 assert(have_field1(f
, o
));
1087 /* Describe the layout of each field in each format. */
1088 typedef struct DisasField
{
1090 unsigned int size
:8;
1091 unsigned int type
:2;
1092 unsigned int indexC
:6;
1093 enum DisasFieldIndexO indexO
:8;
1096 typedef struct DisasFormatInfo
{
1097 DisasField op
[NUM_C_FIELD
];
1100 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1101 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1102 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1104 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1105 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1106 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1107 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1108 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1109 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1110 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1111 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1112 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1113 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1115 #define F0(N) { { } },
1116 #define F1(N, X1) { { X1 } },
1117 #define F2(N, X1, X2) { { X1, X2 } },
1118 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1119 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1120 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1122 static const DisasFormatInfo format_info
[] = {
1123 #include "insn-format.def"
1141 /* Generally, we'll extract operands into this structures, operate upon
1142 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1143 of routines below for more details. */
1145 bool g_out
, g_out2
, g_in1
, g_in2
;
1146 TCGv_i64 out
, out2
, in1
, in2
;
1150 /* Instructions can place constraints on their operands, raising specification
1151 exceptions if they are violated. To make this easy to automate, each "in1",
1152 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1153 of the following, or 0. To make this easy to document, we'll put the
1154 SPEC_<name> defines next to <name>. */
1156 #define SPEC_r1_even 1
1157 #define SPEC_r2_even 2
1158 #define SPEC_r3_even 4
1159 #define SPEC_r1_f128 8
1160 #define SPEC_r2_f128 16
1162 /* Return values from translate_one, indicating the state of the TB. */
1164 /* Continue the TB. */
1166 /* We have emitted one or more goto_tb. No fixup required. */
1168 /* We are not using a goto_tb (for whatever reason), but have updated
1169 the PC (for whatever reason), so there's no need to do it again on
1172 /* We have updated the PC and CC values. */
1174 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1175 updated the PC for the next instruction to be executed. */
1177 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1178 No following code will be executed. */
1182 typedef enum DisasFacility
{
1183 FAC_Z
, /* zarch (default) */
1184 FAC_CASS
, /* compare and swap and store */
1185 FAC_CASS2
, /* compare and swap and store 2*/
1186 FAC_DFP
, /* decimal floating point */
1187 FAC_DFPR
, /* decimal floating point rounding */
1188 FAC_DO
, /* distinct operands */
1189 FAC_EE
, /* execute extensions */
1190 FAC_EI
, /* extended immediate */
1191 FAC_FPE
, /* floating point extension */
1192 FAC_FPSSH
, /* floating point support sign handling */
1193 FAC_FPRGR
, /* FPR-GR transfer */
1194 FAC_GIE
, /* general instructions extension */
1195 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1196 FAC_HW
, /* high-word */
1197 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1198 FAC_MIE
, /* miscellaneous-instruction-extensions */
1199 FAC_LAT
, /* load-and-trap */
1200 FAC_LOC
, /* load/store on condition */
1201 FAC_LD
, /* long displacement */
1202 FAC_PC
, /* population count */
1203 FAC_SCF
, /* store clock fast */
1204 FAC_SFLE
, /* store facility list extended */
1205 FAC_ILA
, /* interlocked access facility 1 */
1206 FAC_LPP
, /* load-program-parameter */
1207 FAC_DAT_ENH
, /* DAT-enhancement */
1213 DisasFacility fac
:8;
1218 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1219 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1220 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1221 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1222 void (*help_cout
)(DisasContext
*, DisasOps
*);
1223 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1228 /* ====================================================================== */
1229 /* Miscellaneous helpers, used by several operations. */
1231 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1232 DisasOps
*o
, int mask
)
1234 int b2
= get_field(f
, b2
);
1235 int d2
= get_field(f
, d2
);
1238 o
->in2
= tcg_const_i64(d2
& mask
);
1240 o
->in2
= get_address(s
, 0, b2
, d2
);
1241 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1245 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1247 if (dest
== s
->next_pc
) {
1248 per_branch(s
, true);
1251 if (use_goto_tb(s
, dest
)) {
1253 per_breaking_event(s
);
1255 tcg_gen_movi_i64(psw_addr
, dest
);
1256 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1257 return EXIT_GOTO_TB
;
1259 tcg_gen_movi_i64(psw_addr
, dest
);
1260 per_branch(s
, false);
1261 return EXIT_PC_UPDATED
;
1265 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1266 bool is_imm
, int imm
, TCGv_i64 cdest
)
1269 uint64_t dest
= s
->pc
+ 2 * imm
;
1272 /* Take care of the special cases first. */
1273 if (c
->cond
== TCG_COND_NEVER
) {
1278 if (dest
== s
->next_pc
) {
1279 /* Branch to next. */
1280 per_branch(s
, true);
1284 if (c
->cond
== TCG_COND_ALWAYS
) {
1285 ret
= help_goto_direct(s
, dest
);
1289 if (TCGV_IS_UNUSED_I64(cdest
)) {
1290 /* E.g. bcr %r0 -> no branch. */
1294 if (c
->cond
== TCG_COND_ALWAYS
) {
1295 tcg_gen_mov_i64(psw_addr
, cdest
);
1296 per_branch(s
, false);
1297 ret
= EXIT_PC_UPDATED
;
1302 if (use_goto_tb(s
, s
->next_pc
)) {
1303 if (is_imm
&& use_goto_tb(s
, dest
)) {
1304 /* Both exits can use goto_tb. */
1307 lab
= gen_new_label();
1309 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1311 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1314 /* Branch not taken. */
1316 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1317 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1321 per_breaking_event(s
);
1323 tcg_gen_movi_i64(psw_addr
, dest
);
1324 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1328 /* Fallthru can use goto_tb, but taken branch cannot. */
1329 /* Store taken branch destination before the brcond. This
1330 avoids having to allocate a new local temp to hold it.
1331 We'll overwrite this in the not taken case anyway. */
1333 tcg_gen_mov_i64(psw_addr
, cdest
);
1336 lab
= gen_new_label();
1338 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1340 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1343 /* Branch not taken. */
1346 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1347 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1351 tcg_gen_movi_i64(psw_addr
, dest
);
1353 per_breaking_event(s
);
1354 ret
= EXIT_PC_UPDATED
;
1357 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1358 Most commonly we're single-stepping or some other condition that
1359 disables all use of goto_tb. Just update the PC and exit. */
1361 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1363 cdest
= tcg_const_i64(dest
);
1367 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1369 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1371 TCGv_i32 t0
= tcg_temp_new_i32();
1372 TCGv_i64 t1
= tcg_temp_new_i64();
1373 TCGv_i64 z
= tcg_const_i64(0);
1374 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1375 tcg_gen_extu_i32_i64(t1
, t0
);
1376 tcg_temp_free_i32(t0
);
1377 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1378 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1379 tcg_temp_free_i64(t1
);
1380 tcg_temp_free_i64(z
);
1384 tcg_temp_free_i64(cdest
);
1386 tcg_temp_free_i64(next
);
1388 ret
= EXIT_PC_UPDATED
;
1396 /* ====================================================================== */
1397 /* The operations. These perform the bulk of the work for any insn,
1398 usually after the operands have been loaded and output initialized. */
1400 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1403 z
= tcg_const_i64(0);
1404 n
= tcg_temp_new_i64();
1405 tcg_gen_neg_i64(n
, o
->in2
);
1406 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1407 tcg_temp_free_i64(n
);
1408 tcg_temp_free_i64(z
);
1412 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1414 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1418 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1420 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1424 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1426 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1427 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1431 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1433 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1437 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1442 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1444 /* The carry flag is the msb of CC, therefore the branch mask that would
1445 create that comparison is 3. Feeding the generated comparison to
1446 setcond produces the carry flag that we desire. */
1447 disas_jcc(s
, &cmp
, 3);
1448 carry
= tcg_temp_new_i64();
1450 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1452 TCGv_i32 t
= tcg_temp_new_i32();
1453 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1454 tcg_gen_extu_i32_i64(carry
, t
);
1455 tcg_temp_free_i32(t
);
1459 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1460 tcg_temp_free_i64(carry
);
1464 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1466 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1470 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1472 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1476 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1478 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1479 return_low128(o
->out2
);
1483 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1485 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1489 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1491 int shift
= s
->insn
->data
& 0xff;
1492 int size
= s
->insn
->data
>> 8;
1493 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1496 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1497 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1498 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1500 /* Produce the CC from only the bits manipulated. */
1501 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1502 set_cc_nz_u64(s
, cc_dst
);
1506 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1508 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1509 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1510 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1511 per_branch(s
, false);
1512 return EXIT_PC_UPDATED
;
1518 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1520 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1521 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1524 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1526 int m1
= get_field(s
->fields
, m1
);
1527 bool is_imm
= have_field(s
->fields
, i2
);
1528 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1531 /* BCR with R2 = 0 causes no branching */
1532 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1534 /* Perform serialization */
1535 /* FIXME: check for fast-BCR-serialization facility */
1536 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1539 /* Perform serialization */
1540 /* FIXME: perform checkpoint-synchronisation */
1541 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1546 disas_jcc(s
, &c
, m1
);
1547 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1550 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1552 int r1
= get_field(s
->fields
, r1
);
1553 bool is_imm
= have_field(s
->fields
, i2
);
1554 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1558 c
.cond
= TCG_COND_NE
;
1563 t
= tcg_temp_new_i64();
1564 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1565 store_reg32_i64(r1
, t
);
1566 c
.u
.s32
.a
= tcg_temp_new_i32();
1567 c
.u
.s32
.b
= tcg_const_i32(0);
1568 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1569 tcg_temp_free_i64(t
);
1571 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1574 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1576 int r1
= get_field(s
->fields
, r1
);
1577 int imm
= get_field(s
->fields
, i2
);
1581 c
.cond
= TCG_COND_NE
;
1586 t
= tcg_temp_new_i64();
1587 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1588 tcg_gen_subi_i64(t
, t
, 1);
1589 store_reg32h_i64(r1
, t
);
1590 c
.u
.s32
.a
= tcg_temp_new_i32();
1591 c
.u
.s32
.b
= tcg_const_i32(0);
1592 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1593 tcg_temp_free_i64(t
);
1595 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1598 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1600 int r1
= get_field(s
->fields
, r1
);
1601 bool is_imm
= have_field(s
->fields
, i2
);
1602 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1605 c
.cond
= TCG_COND_NE
;
1610 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1611 c
.u
.s64
.a
= regs
[r1
];
1612 c
.u
.s64
.b
= tcg_const_i64(0);
1614 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1617 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1619 int r1
= get_field(s
->fields
, r1
);
1620 int r3
= get_field(s
->fields
, r3
);
1621 bool is_imm
= have_field(s
->fields
, i2
);
1622 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1626 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1631 t
= tcg_temp_new_i64();
1632 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1633 c
.u
.s32
.a
= tcg_temp_new_i32();
1634 c
.u
.s32
.b
= tcg_temp_new_i32();
1635 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1636 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1637 store_reg32_i64(r1
, t
);
1638 tcg_temp_free_i64(t
);
1640 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1643 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1645 int r1
= get_field(s
->fields
, r1
);
1646 int r3
= get_field(s
->fields
, r3
);
1647 bool is_imm
= have_field(s
->fields
, i2
);
1648 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1651 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1654 if (r1
== (r3
| 1)) {
1655 c
.u
.s64
.b
= load_reg(r3
| 1);
1658 c
.u
.s64
.b
= regs
[r3
| 1];
1662 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1663 c
.u
.s64
.a
= regs
[r1
];
1666 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1669 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1671 int imm
, m3
= get_field(s
->fields
, m3
);
1675 c
.cond
= ltgt_cond
[m3
];
1676 if (s
->insn
->data
) {
1677 c
.cond
= tcg_unsigned_cond(c
.cond
);
1679 c
.is_64
= c
.g1
= c
.g2
= true;
1683 is_imm
= have_field(s
->fields
, i4
);
1685 imm
= get_field(s
->fields
, i4
);
1688 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1689 get_field(s
->fields
, d4
));
1692 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1695 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1697 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1702 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1704 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1709 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1711 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1716 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1718 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1719 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1720 tcg_temp_free_i32(m3
);
1721 gen_set_cc_nz_f32(s
, o
->in2
);
1725 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1727 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1728 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1729 tcg_temp_free_i32(m3
);
1730 gen_set_cc_nz_f64(s
, o
->in2
);
1734 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1736 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1737 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1738 tcg_temp_free_i32(m3
);
1739 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1743 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1745 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1746 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1747 tcg_temp_free_i32(m3
);
1748 gen_set_cc_nz_f32(s
, o
->in2
);
1752 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1754 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1755 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1756 tcg_temp_free_i32(m3
);
1757 gen_set_cc_nz_f64(s
, o
->in2
);
1761 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1763 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1764 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1765 tcg_temp_free_i32(m3
);
1766 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1770 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1772 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1773 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1774 tcg_temp_free_i32(m3
);
1775 gen_set_cc_nz_f32(s
, o
->in2
);
1779 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1781 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1782 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1783 tcg_temp_free_i32(m3
);
1784 gen_set_cc_nz_f64(s
, o
->in2
);
1788 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1790 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1791 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1792 tcg_temp_free_i32(m3
);
1793 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1797 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1799 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1800 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1801 tcg_temp_free_i32(m3
);
1802 gen_set_cc_nz_f32(s
, o
->in2
);
1806 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1808 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1809 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1810 tcg_temp_free_i32(m3
);
1811 gen_set_cc_nz_f64(s
, o
->in2
);
1815 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1817 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1818 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1819 tcg_temp_free_i32(m3
);
1820 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1824 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1826 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1827 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1828 tcg_temp_free_i32(m3
);
1832 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1834 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1835 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1836 tcg_temp_free_i32(m3
);
1840 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1842 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1843 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1844 tcg_temp_free_i32(m3
);
1845 return_low128(o
->out2
);
1849 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1851 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1852 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1853 tcg_temp_free_i32(m3
);
1857 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1859 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1860 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1861 tcg_temp_free_i32(m3
);
1865 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1867 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1868 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1869 tcg_temp_free_i32(m3
);
1870 return_low128(o
->out2
);
1874 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1876 int r2
= get_field(s
->fields
, r2
);
1877 TCGv_i64 len
= tcg_temp_new_i64();
1879 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1881 return_low128(o
->out
);
1883 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1884 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1885 tcg_temp_free_i64(len
);
1890 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1892 int l
= get_field(s
->fields
, l1
);
1897 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1898 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1901 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1902 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1905 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1906 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1909 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1910 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1913 vl
= tcg_const_i32(l
);
1914 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1915 tcg_temp_free_i32(vl
);
1919 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1923 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1925 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1926 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1927 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1928 tcg_temp_free_i32(r1
);
1929 tcg_temp_free_i32(r3
);
1934 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1936 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1937 TCGv_i32 t1
= tcg_temp_new_i32();
1938 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1939 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1941 tcg_temp_free_i32(t1
);
1942 tcg_temp_free_i32(m3
);
1946 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1948 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1950 return_low128(o
->in2
);
1954 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1956 TCGv_i64 t
= tcg_temp_new_i64();
1957 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1958 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1959 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1960 tcg_temp_free_i64(t
);
1964 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1966 int d2
= get_field(s
->fields
, d2
);
1967 int b2
= get_field(s
->fields
, b2
);
1970 /* Note that in1 = R3 (new value) and
1971 in2 = (zero-extended) R1 (expected value). */
1973 addr
= get_address(s
, 0, b2
, d2
);
1974 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1975 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1976 tcg_temp_free_i64(addr
);
1978 /* Are the memory and expected values (un)equal? Note that this setcond
1979 produces the output CC value, thus the NE sense of the test. */
1980 cc
= tcg_temp_new_i64();
1981 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1982 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1983 tcg_temp_free_i64(cc
);
1989 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1991 int r1
= get_field(s
->fields
, r1
);
1992 int r3
= get_field(s
->fields
, r3
);
1993 int d2
= get_field(s
->fields
, d2
);
1994 int b2
= get_field(s
->fields
, b2
);
1996 TCGv_i32 t_r1
, t_r3
;
1998 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1999 addr
= get_address(s
, 0, b2
, d2
);
2000 t_r1
= tcg_const_i32(r1
);
2001 t_r3
= tcg_const_i32(r3
);
2002 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2003 tcg_temp_free_i64(addr
);
2004 tcg_temp_free_i32(t_r1
);
2005 tcg_temp_free_i32(t_r3
);
2011 #ifndef CONFIG_USER_ONLY
2012 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2014 TCGMemOp mop
= s
->insn
->data
;
2015 TCGv_i64 addr
, old
, cc
;
2016 TCGLabel
*lab
= gen_new_label();
2018 /* Note that in1 = R1 (zero-extended expected value),
2019 out = R1 (original reg), out2 = R1+1 (new value). */
2021 check_privileged(s
);
2022 addr
= tcg_temp_new_i64();
2023 old
= tcg_temp_new_i64();
2024 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2025 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2026 get_mem_index(s
), mop
| MO_ALIGN
);
2027 tcg_temp_free_i64(addr
);
2029 /* Are the memory and expected values (un)equal? */
2030 cc
= tcg_temp_new_i64();
2031 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2032 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2034 /* Write back the output now, so that it happens before the
2035 following branch, so that we don't need local temps. */
2036 if ((mop
& MO_SIZE
) == MO_32
) {
2037 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2039 tcg_gen_mov_i64(o
->out
, old
);
2041 tcg_temp_free_i64(old
);
2043 /* If the comparison was equal, and the LSB of R2 was set,
2044 then we need to flush the TLB (for all cpus). */
2045 tcg_gen_xori_i64(cc
, cc
, 1);
2046 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2047 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2048 tcg_temp_free_i64(cc
);
2050 gen_helper_purge(cpu_env
);
2057 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2059 TCGv_i64 t1
= tcg_temp_new_i64();
2060 TCGv_i32 t2
= tcg_temp_new_i32();
2061 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2062 gen_helper_cvd(t1
, t2
);
2063 tcg_temp_free_i32(t2
);
2064 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2065 tcg_temp_free_i64(t1
);
2069 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2071 int m3
= get_field(s
->fields
, m3
);
2072 TCGLabel
*lab
= gen_new_label();
2075 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2076 if (s
->insn
->data
) {
2077 c
= tcg_unsigned_cond(c
);
2079 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2088 #ifndef CONFIG_USER_ONLY
2089 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2091 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2092 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2093 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2095 check_privileged(s
);
2099 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2101 tcg_temp_free_i32(func_code
);
2102 tcg_temp_free_i32(r3
);
2103 tcg_temp_free_i32(r1
);
2108 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2110 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2111 return_low128(o
->out
);
2115 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2117 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2118 return_low128(o
->out
);
2122 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2124 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2125 return_low128(o
->out
);
2129 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2131 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2132 return_low128(o
->out
);
2136 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2138 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2142 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2144 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2148 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2150 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2151 return_low128(o
->out2
);
2155 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2157 int r2
= get_field(s
->fields
, r2
);
2158 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2162 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2164 /* No cache information provided. */
2165 tcg_gen_movi_i64(o
->out
, -1);
2169 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2171 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2175 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2177 int r1
= get_field(s
->fields
, r1
);
2178 int r2
= get_field(s
->fields
, r2
);
2179 TCGv_i64 t
= tcg_temp_new_i64();
2181 /* Note the "subsequently" in the PoO, which implies a defined result
2182 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2183 tcg_gen_shri_i64(t
, psw_mask
, 32);
2184 store_reg32_i64(r1
, t
);
2186 store_reg32_i64(r2
, psw_mask
);
2189 tcg_temp_free_i64(t
);
2193 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2195 int r1
= get_field(s
->fields
, r1
);
2199 /* Nested EXECUTE is not allowed. */
2200 if (unlikely(s
->ex_value
)) {
2201 gen_program_exception(s
, PGM_EXECUTE
);
2202 return EXIT_NORETURN
;
2209 v1
= tcg_const_i64(0);
2214 ilen
= tcg_const_i32(s
->ilen
);
2215 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2216 tcg_temp_free_i32(ilen
);
2219 tcg_temp_free_i64(v1
);
2222 return EXIT_PC_CC_UPDATED
;
2225 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2227 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2228 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2229 tcg_temp_free_i32(m3
);
2233 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2235 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2236 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2237 tcg_temp_free_i32(m3
);
2241 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2243 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2244 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2245 return_low128(o
->out2
);
2246 tcg_temp_free_i32(m3
);
2250 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2252 /* We'll use the original input for cc computation, since we get to
2253 compare that against 0, which ought to be better than comparing
2254 the real output against 64. It also lets cc_dst be a convenient
2255 temporary during our computation. */
2256 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2258 /* R1 = IN ? CLZ(IN) : 64. */
2259 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2261 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2262 value by 64, which is undefined. But since the shift is 64 iff the
2263 input is zero, we still get the correct result after and'ing. */
2264 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2265 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2266 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2270 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2272 int m3
= get_field(s
->fields
, m3
);
2273 int pos
, len
, base
= s
->insn
->data
;
2274 TCGv_i64 tmp
= tcg_temp_new_i64();
2279 /* Effectively a 32-bit load. */
2280 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2287 /* Effectively a 16-bit load. */
2288 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2296 /* Effectively an 8-bit load. */
2297 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2302 pos
= base
+ ctz32(m3
) * 8;
2303 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2304 ccm
= ((1ull << len
) - 1) << pos
;
2308 /* This is going to be a sequence of loads and inserts. */
2309 pos
= base
+ 32 - 8;
2313 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2314 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2315 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2318 m3
= (m3
<< 1) & 0xf;
2324 tcg_gen_movi_i64(tmp
, ccm
);
2325 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2326 tcg_temp_free_i64(tmp
);
2330 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2332 int shift
= s
->insn
->data
& 0xff;
2333 int size
= s
->insn
->data
>> 8;
2334 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2338 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2343 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2345 t1
= tcg_temp_new_i64();
2346 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2347 tcg_gen_shri_i64(t1
, t1
, 36);
2348 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2350 tcg_gen_extu_i32_i64(t1
, cc_op
);
2351 tcg_gen_shli_i64(t1
, t1
, 28);
2352 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2353 tcg_temp_free_i64(t1
);
2357 #ifndef CONFIG_USER_ONLY
2358 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2362 check_privileged(s
);
2363 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2364 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2365 tcg_temp_free_i32(m4
);
2369 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2371 check_privileged(s
);
2372 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2377 static ExitStatus
op_keb(DisasContext
*s
, DisasOps
*o
)
2379 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2384 static ExitStatus
op_kdb(DisasContext
*s
, DisasOps
*o
)
2386 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2391 static ExitStatus
op_kxb(DisasContext
*s
, DisasOps
*o
)
2393 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2398 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2400 /* The real output is indeed the original value in memory;
2401 recompute the addition for the computation of CC. */
2402 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2403 s
->insn
->data
| MO_ALIGN
);
2404 /* However, we need to recompute the addition for setting CC. */
2405 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2409 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2411 /* The real output is indeed the original value in memory;
2412 recompute the addition for the computation of CC. */
2413 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2414 s
->insn
->data
| MO_ALIGN
);
2415 /* However, we need to recompute the operation for setting CC. */
2416 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2420 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2422 /* The real output is indeed the original value in memory;
2423 recompute the addition for the computation of CC. */
2424 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2425 s
->insn
->data
| MO_ALIGN
);
2426 /* However, we need to recompute the operation for setting CC. */
2427 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2431 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2433 /* The real output is indeed the original value in memory;
2434 recompute the addition for the computation of CC. */
2435 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2436 s
->insn
->data
| MO_ALIGN
);
2437 /* However, we need to recompute the operation for setting CC. */
2438 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2442 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2444 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2448 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2450 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2454 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2456 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2460 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2462 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2466 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2468 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2469 return_low128(o
->out2
);
2473 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2475 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2476 return_low128(o
->out2
);
2480 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2482 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2486 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2488 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2492 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2494 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2498 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2500 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2504 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2506 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2510 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2512 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2516 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2518 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2522 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2524 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2528 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2530 TCGLabel
*lab
= gen_new_label();
2531 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2532 /* The value is stored even in case of trap. */
2533 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2539 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2541 TCGLabel
*lab
= gen_new_label();
2542 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2543 /* The value is stored even in case of trap. */
2544 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2550 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2552 TCGLabel
*lab
= gen_new_label();
2553 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2554 /* The value is stored even in case of trap. */
2555 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2561 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2563 TCGLabel
*lab
= gen_new_label();
2564 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2565 /* The value is stored even in case of trap. */
2566 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2572 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2574 TCGLabel
*lab
= gen_new_label();
2575 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2576 /* The value is stored even in case of trap. */
2577 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2583 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2587 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2590 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2594 TCGv_i32 t32
= tcg_temp_new_i32();
2597 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2600 t
= tcg_temp_new_i64();
2601 tcg_gen_extu_i32_i64(t
, t32
);
2602 tcg_temp_free_i32(t32
);
2604 z
= tcg_const_i64(0);
2605 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2606 tcg_temp_free_i64(t
);
2607 tcg_temp_free_i64(z
);
2613 #ifndef CONFIG_USER_ONLY
2614 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2616 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2617 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2618 check_privileged(s
);
2619 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2620 tcg_temp_free_i32(r1
);
2621 tcg_temp_free_i32(r3
);
2625 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2627 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2628 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2629 check_privileged(s
);
2630 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2631 tcg_temp_free_i32(r1
);
2632 tcg_temp_free_i32(r3
);
2636 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2638 check_privileged(s
);
2639 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2644 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2646 check_privileged(s
);
2648 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2652 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2656 check_privileged(s
);
2657 per_breaking_event(s
);
2659 t1
= tcg_temp_new_i64();
2660 t2
= tcg_temp_new_i64();
2661 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2662 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2663 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2664 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2665 tcg_gen_shli_i64(t1
, t1
, 32);
2666 gen_helper_load_psw(cpu_env
, t1
, t2
);
2667 tcg_temp_free_i64(t1
);
2668 tcg_temp_free_i64(t2
);
2669 return EXIT_NORETURN
;
2672 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2676 check_privileged(s
);
2677 per_breaking_event(s
);
2679 t1
= tcg_temp_new_i64();
2680 t2
= tcg_temp_new_i64();
2681 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2682 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2683 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2684 gen_helper_load_psw(cpu_env
, t1
, t2
);
2685 tcg_temp_free_i64(t1
);
2686 tcg_temp_free_i64(t2
);
2687 return EXIT_NORETURN
;
2691 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2693 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2694 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2695 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2696 tcg_temp_free_i32(r1
);
2697 tcg_temp_free_i32(r3
);
2701 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2703 int r1
= get_field(s
->fields
, r1
);
2704 int r3
= get_field(s
->fields
, r3
);
2707 /* Only one register to read. */
2708 t1
= tcg_temp_new_i64();
2709 if (unlikely(r1
== r3
)) {
2710 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2711 store_reg32_i64(r1
, t1
);
2716 /* First load the values of the first and last registers to trigger
2717 possible page faults. */
2718 t2
= tcg_temp_new_i64();
2719 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2720 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2721 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2722 store_reg32_i64(r1
, t1
);
2723 store_reg32_i64(r3
, t2
);
2725 /* Only two registers to read. */
2726 if (((r1
+ 1) & 15) == r3
) {
2732 /* Then load the remaining registers. Page fault can't occur. */
2734 tcg_gen_movi_i64(t2
, 4);
2737 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2738 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2739 store_reg32_i64(r1
, t1
);
2747 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2749 int r1
= get_field(s
->fields
, r1
);
2750 int r3
= get_field(s
->fields
, r3
);
2753 /* Only one register to read. */
2754 t1
= tcg_temp_new_i64();
2755 if (unlikely(r1
== r3
)) {
2756 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2757 store_reg32h_i64(r1
, t1
);
2762 /* First load the values of the first and last registers to trigger
2763 possible page faults. */
2764 t2
= tcg_temp_new_i64();
2765 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2766 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2767 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2768 store_reg32h_i64(r1
, t1
);
2769 store_reg32h_i64(r3
, t2
);
2771 /* Only two registers to read. */
2772 if (((r1
+ 1) & 15) == r3
) {
2778 /* Then load the remaining registers. Page fault can't occur. */
2780 tcg_gen_movi_i64(t2
, 4);
2783 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2784 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2785 store_reg32h_i64(r1
, t1
);
2793 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2795 int r1
= get_field(s
->fields
, r1
);
2796 int r3
= get_field(s
->fields
, r3
);
2799 /* Only one register to read. */
2800 if (unlikely(r1
== r3
)) {
2801 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2805 /* First load the values of the first and last registers to trigger
2806 possible page faults. */
2807 t1
= tcg_temp_new_i64();
2808 t2
= tcg_temp_new_i64();
2809 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2810 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2811 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2812 tcg_gen_mov_i64(regs
[r1
], t1
);
2815 /* Only two registers to read. */
2816 if (((r1
+ 1) & 15) == r3
) {
2821 /* Then load the remaining registers. Page fault can't occur. */
2823 tcg_gen_movi_i64(t1
, 8);
2826 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2827 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2834 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2837 TCGMemOp mop
= s
->insn
->data
;
2839 /* In a parallel context, stop the world and single step. */
2840 if (parallel_cpus
) {
2841 potential_page_fault(s
);
2842 gen_exception(EXCP_ATOMIC
);
2843 return EXIT_NORETURN
;
2846 /* In a serial context, perform the two loads ... */
2847 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2848 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2849 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2850 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2851 tcg_temp_free_i64(a1
);
2852 tcg_temp_free_i64(a2
);
2854 /* ... and indicate that we performed them while interlocked. */
2855 gen_op_movi_cc(s
, 0);
2859 #ifndef CONFIG_USER_ONLY
2860 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2862 check_privileged(s
);
2863 potential_page_fault(s
);
2864 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2868 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2870 check_privileged(s
);
2871 potential_page_fault(s
);
2872 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2877 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2880 o
->g_out
= o
->g_in2
;
2881 TCGV_UNUSED_I64(o
->in2
);
2886 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2888 int b2
= get_field(s
->fields
, b2
);
2889 TCGv ar1
= tcg_temp_new_i64();
2892 o
->g_out
= o
->g_in2
;
2893 TCGV_UNUSED_I64(o
->in2
);
2896 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2897 case PSW_ASC_PRIMARY
>> 32:
2898 tcg_gen_movi_i64(ar1
, 0);
2900 case PSW_ASC_ACCREG
>> 32:
2901 tcg_gen_movi_i64(ar1
, 1);
2903 case PSW_ASC_SECONDARY
>> 32:
2905 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2907 tcg_gen_movi_i64(ar1
, 0);
2910 case PSW_ASC_HOME
>> 32:
2911 tcg_gen_movi_i64(ar1
, 2);
2915 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2916 tcg_temp_free_i64(ar1
);
2921 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2925 o
->g_out
= o
->g_in1
;
2926 o
->g_out2
= o
->g_in2
;
2927 TCGV_UNUSED_I64(o
->in1
);
2928 TCGV_UNUSED_I64(o
->in2
);
2929 o
->g_in1
= o
->g_in2
= false;
2933 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2935 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2936 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2937 tcg_temp_free_i32(l
);
2941 static ExitStatus
op_mvcin(DisasContext
*s
, DisasOps
*o
)
2943 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2944 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
2945 tcg_temp_free_i32(l
);
2949 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2951 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2952 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2953 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2954 tcg_temp_free_i32(r1
);
2955 tcg_temp_free_i32(r2
);
2960 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2962 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2963 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2964 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2965 tcg_temp_free_i32(r1
);
2966 tcg_temp_free_i32(r3
);
2971 #ifndef CONFIG_USER_ONLY
2972 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2974 int r1
= get_field(s
->fields
, l1
);
2975 check_privileged(s
);
2976 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2981 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2983 int r1
= get_field(s
->fields
, l1
);
2984 check_privileged(s
);
2985 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2991 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2993 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2998 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
3000 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3002 return_low128(o
->in2
);
3006 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3008 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3012 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3014 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3018 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
3020 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3024 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3026 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3030 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
3032 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3036 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
3038 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3039 return_low128(o
->out2
);
3043 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3045 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3046 return_low128(o
->out2
);
3050 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
3052 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3053 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3054 tcg_temp_free_i64(r3
);
3058 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
3060 int r3
= get_field(s
->fields
, r3
);
3061 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3065 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
3067 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3068 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3069 tcg_temp_free_i64(r3
);
3073 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3075 int r3
= get_field(s
->fields
, r3
);
3076 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3080 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3083 z
= tcg_const_i64(0);
3084 n
= tcg_temp_new_i64();
3085 tcg_gen_neg_i64(n
, o
->in2
);
3086 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3087 tcg_temp_free_i64(n
);
3088 tcg_temp_free_i64(z
);
3092 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3094 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3098 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3100 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3104 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3106 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3107 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3111 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3113 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3114 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3115 tcg_temp_free_i32(l
);
3120 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3122 tcg_gen_neg_i64(o
->out
, o
->in2
);
3126 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3128 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3132 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3134 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3138 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3140 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3141 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3145 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3147 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3148 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3149 tcg_temp_free_i32(l
);
3154 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3156 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3160 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3162 int shift
= s
->insn
->data
& 0xff;
3163 int size
= s
->insn
->data
>> 8;
3164 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3167 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3168 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3170 /* Produce the CC from only the bits manipulated. */
3171 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3172 set_cc_nz_u64(s
, cc_dst
);
3176 static ExitStatus
op_pack(DisasContext
*s
, DisasOps
*o
)
3178 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3179 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3180 tcg_temp_free_i32(l
);
3184 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3186 gen_helper_popcnt(o
->out
, o
->in2
);
3190 #ifndef CONFIG_USER_ONLY
3191 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3193 check_privileged(s
);
3194 gen_helper_ptlb(cpu_env
);
3199 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3201 int i3
= get_field(s
->fields
, i3
);
3202 int i4
= get_field(s
->fields
, i4
);
3203 int i5
= get_field(s
->fields
, i5
);
3204 int do_zero
= i4
& 0x80;
3205 uint64_t mask
, imask
, pmask
;
3208 /* Adjust the arguments for the specific insn. */
3209 switch (s
->fields
->op2
) {
3210 case 0x55: /* risbg */
3215 case 0x5d: /* risbhg */
3218 pmask
= 0xffffffff00000000ull
;
3220 case 0x51: /* risblg */
3223 pmask
= 0x00000000ffffffffull
;
3229 /* MASK is the set of bits to be inserted from R2.
3230 Take care for I3/I4 wraparound. */
3233 mask
^= pmask
>> i4
>> 1;
3235 mask
|= ~(pmask
>> i4
>> 1);
3239 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3240 insns, we need to keep the other half of the register. */
3241 imask
= ~mask
| ~pmask
;
3243 if (s
->fields
->op2
== 0x55) {
3253 if (s
->fields
->op2
== 0x5d) {
3257 /* In some cases we can implement this with extract. */
3258 if (imask
== 0 && pos
== 0 && len
> 0 && rot
+ len
<= 64) {
3259 tcg_gen_extract_i64(o
->out
, o
->in2
, rot
, len
);
3263 /* In some cases we can implement this with deposit. */
3264 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3265 /* Note that we rotate the bits to be inserted to the lsb, not to
3266 the position as described in the PoO. */
3267 rot
= (rot
- pos
) & 63;
3272 /* Rotate the input as necessary. */
3273 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3275 /* Insert the selected bits into the output. */
3278 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3280 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3282 } else if (imask
== 0) {
3283 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3285 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3286 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3287 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3292 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3294 int i3
= get_field(s
->fields
, i3
);
3295 int i4
= get_field(s
->fields
, i4
);
3296 int i5
= get_field(s
->fields
, i5
);
3299 /* If this is a test-only form, arrange to discard the result. */
3301 o
->out
= tcg_temp_new_i64();
3309 /* MASK is the set of bits to be operated on from R2.
3310 Take care for I3/I4 wraparound. */
3313 mask
^= ~0ull >> i4
>> 1;
3315 mask
|= ~(~0ull >> i4
>> 1);
3318 /* Rotate the input as necessary. */
3319 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3322 switch (s
->fields
->op2
) {
3323 case 0x55: /* AND */
3324 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3325 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3328 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3329 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3331 case 0x57: /* XOR */
3332 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3333 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3340 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3341 set_cc_nz_u64(s
, cc_dst
);
3345 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3347 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3351 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3353 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3357 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3359 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3363 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3365 TCGv_i32 t1
= tcg_temp_new_i32();
3366 TCGv_i32 t2
= tcg_temp_new_i32();
3367 TCGv_i32 to
= tcg_temp_new_i32();
3368 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3369 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3370 tcg_gen_rotl_i32(to
, t1
, t2
);
3371 tcg_gen_extu_i32_i64(o
->out
, to
);
3372 tcg_temp_free_i32(t1
);
3373 tcg_temp_free_i32(t2
);
3374 tcg_temp_free_i32(to
);
3378 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3380 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3384 #ifndef CONFIG_USER_ONLY
3385 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3387 check_privileged(s
);
3388 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3393 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3395 check_privileged(s
);
3396 gen_helper_sacf(cpu_env
, o
->in2
);
3397 /* Addressing mode has changed, so end the block. */
3398 return EXIT_PC_STALE
;
3402 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3404 int sam
= s
->insn
->data
;
3420 /* Bizarre but true, we check the address of the current insn for the
3421 specification exception, not the next to be executed. Thus the PoO
3422 documents that Bad Things Happen two bytes before the end. */
3423 if (s
->pc
& ~mask
) {
3424 gen_program_exception(s
, PGM_SPECIFICATION
);
3425 return EXIT_NORETURN
;
3429 tsam
= tcg_const_i64(sam
);
3430 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3431 tcg_temp_free_i64(tsam
);
3433 /* Always exit the TB, since we (may have) changed execution mode. */
3434 return EXIT_PC_STALE
;
3437 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3439 int r1
= get_field(s
->fields
, r1
);
3440 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3444 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3446 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3450 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3452 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3456 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3458 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3459 return_low128(o
->out2
);
3463 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3465 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3469 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3471 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3475 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3477 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3478 return_low128(o
->out2
);
3482 #ifndef CONFIG_USER_ONLY
3483 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3485 check_privileged(s
);
3486 potential_page_fault(s
);
3487 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3492 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3494 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3495 check_privileged(s
);
3496 potential_page_fault(s
);
3497 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3499 tcg_temp_free_i32(r1
);
3504 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3511 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3513 /* We want to store when the condition is fulfilled, so branch
3514 out when it's not */
3515 c
.cond
= tcg_invert_cond(c
.cond
);
3517 lab
= gen_new_label();
3519 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3521 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3525 r1
= get_field(s
->fields
, r1
);
3526 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3527 if (s
->insn
->data
) {
3528 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3530 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3532 tcg_temp_free_i64(a
);
3538 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3540 uint64_t sign
= 1ull << s
->insn
->data
;
3541 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3542 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3543 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3544 /* The arithmetic left shift is curious in that it does not affect
3545 the sign bit. Copy that over from the source unchanged. */
3546 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3547 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3548 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3552 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3554 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3558 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3560 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3564 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3566 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3570 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3572 gen_helper_sfpc(cpu_env
, o
->in2
);
3576 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3578 gen_helper_sfas(cpu_env
, o
->in2
);
3582 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3584 int b2
= get_field(s
->fields
, b2
);
3585 int d2
= get_field(s
->fields
, d2
);
3586 TCGv_i64 t1
= tcg_temp_new_i64();
3587 TCGv_i64 t2
= tcg_temp_new_i64();
3590 switch (s
->fields
->op2
) {
3591 case 0x99: /* SRNM */
3594 case 0xb8: /* SRNMB */
3597 case 0xb9: /* SRNMT */
3603 mask
= (1 << len
) - 1;
3605 /* Insert the value into the appropriate field of the FPC. */
3607 tcg_gen_movi_i64(t1
, d2
& mask
);
3609 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3610 tcg_gen_andi_i64(t1
, t1
, mask
);
3612 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3613 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3614 tcg_temp_free_i64(t1
);
3616 /* Then install the new FPC to set the rounding mode in fpu_status. */
3617 gen_helper_sfpc(cpu_env
, t2
);
3618 tcg_temp_free_i64(t2
);
3622 #ifndef CONFIG_USER_ONLY
3623 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3625 check_privileged(s
);
3626 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3627 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3631 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3633 check_privileged(s
);
3634 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3638 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3640 check_privileged(s
);
3641 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3645 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3647 check_privileged(s
);
3648 /* ??? Surely cpu address != cpu number. In any case the previous
3649 version of this stored more than the required half-word, so it
3650 is unlikely this has ever been tested. */
3651 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3655 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3657 gen_helper_stck(o
->out
, cpu_env
);
3658 /* ??? We don't implement clock states. */
3659 gen_op_movi_cc(s
, 0);
3663 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3665 TCGv_i64 c1
= tcg_temp_new_i64();
3666 TCGv_i64 c2
= tcg_temp_new_i64();
3667 gen_helper_stck(c1
, cpu_env
);
3668 /* Shift the 64-bit value into its place as a zero-extended
3669 104-bit value. Note that "bit positions 64-103 are always
3670 non-zero so that they compare differently to STCK"; we set
3671 the least significant bit to 1. */
3672 tcg_gen_shli_i64(c2
, c1
, 56);
3673 tcg_gen_shri_i64(c1
, c1
, 8);
3674 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3675 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3676 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3677 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3678 tcg_temp_free_i64(c1
);
3679 tcg_temp_free_i64(c2
);
3680 /* ??? We don't implement clock states. */
3681 gen_op_movi_cc(s
, 0);
3685 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3687 check_privileged(s
);
3688 gen_helper_sckc(cpu_env
, o
->in2
);
3692 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3694 check_privileged(s
);
3695 gen_helper_stckc(o
->out
, cpu_env
);
3699 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3701 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3702 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3703 check_privileged(s
);
3704 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3705 tcg_temp_free_i32(r1
);
3706 tcg_temp_free_i32(r3
);
3710 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3712 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3713 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3714 check_privileged(s
);
3715 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3716 tcg_temp_free_i32(r1
);
3717 tcg_temp_free_i32(r3
);
3721 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3723 TCGv_i64 t1
= tcg_temp_new_i64();
3725 check_privileged(s
);
3726 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3727 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3728 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3729 tcg_temp_free_i64(t1
);
3734 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3736 check_privileged(s
);
3737 gen_helper_spt(cpu_env
, o
->in2
);
3741 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3743 check_privileged(s
);
3744 gen_helper_stfl(cpu_env
);
3748 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3750 check_privileged(s
);
3751 gen_helper_stpt(o
->out
, cpu_env
);
3755 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3757 check_privileged(s
);
3758 potential_page_fault(s
);
3759 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3764 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3766 check_privileged(s
);
3767 gen_helper_spx(cpu_env
, o
->in2
);
3771 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3773 check_privileged(s
);
3774 potential_page_fault(s
);
3775 gen_helper_xsch(cpu_env
, regs
[1]);
3780 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3782 check_privileged(s
);
3783 potential_page_fault(s
);
3784 gen_helper_csch(cpu_env
, regs
[1]);
3789 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3791 check_privileged(s
);
3792 potential_page_fault(s
);
3793 gen_helper_hsch(cpu_env
, regs
[1]);
3798 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3800 check_privileged(s
);
3801 potential_page_fault(s
);
3802 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3807 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3809 check_privileged(s
);
3810 potential_page_fault(s
);
3811 gen_helper_rchp(cpu_env
, regs
[1]);
3816 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3818 check_privileged(s
);
3819 potential_page_fault(s
);
3820 gen_helper_rsch(cpu_env
, regs
[1]);
3825 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3827 check_privileged(s
);
3828 potential_page_fault(s
);
3829 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
3834 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
3836 check_privileged(s
);
3837 potential_page_fault(s
);
3838 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
3843 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
3845 check_privileged(s
);
3846 potential_page_fault(s
);
3847 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
3852 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
3854 check_privileged(s
);
3855 potential_page_fault(s
);
3856 gen_helper_chsc(cpu_env
, o
->in2
);
3861 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3863 check_privileged(s
);
3864 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3865 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3869 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3871 uint64_t i2
= get_field(s
->fields
, i2
);
3874 check_privileged(s
);
3876 /* It is important to do what the instruction name says: STORE THEN.
3877 If we let the output hook perform the store then if we fault and
3878 restart, we'll have the wrong SYSTEM MASK in place. */
3879 t
= tcg_temp_new_i64();
3880 tcg_gen_shri_i64(t
, psw_mask
, 56);
3881 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3882 tcg_temp_free_i64(t
);
3884 if (s
->fields
->op
== 0xac) {
3885 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3886 (i2
<< 56) | 0x00ffffffffffffffull
);
3888 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3893 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3895 check_privileged(s
);
3896 potential_page_fault(s
);
3897 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3901 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3903 check_privileged(s
);
3904 potential_page_fault(s
);
3905 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3910 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
3912 potential_page_fault(s
);
3913 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
3918 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3920 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3924 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3926 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3930 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3932 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3936 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3938 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3942 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3944 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3945 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3946 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3947 tcg_temp_free_i32(r1
);
3948 tcg_temp_free_i32(r3
);
3952 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3954 int m3
= get_field(s
->fields
, m3
);
3955 int pos
, base
= s
->insn
->data
;
3956 TCGv_i64 tmp
= tcg_temp_new_i64();
3958 pos
= base
+ ctz32(m3
) * 8;
3961 /* Effectively a 32-bit store. */
3962 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3963 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3969 /* Effectively a 16-bit store. */
3970 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3971 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3978 /* Effectively an 8-bit store. */
3979 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3980 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3984 /* This is going to be a sequence of shifts and stores. */
3985 pos
= base
+ 32 - 8;
3988 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3989 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3990 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3992 m3
= (m3
<< 1) & 0xf;
3997 tcg_temp_free_i64(tmp
);
4001 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
4003 int r1
= get_field(s
->fields
, r1
);
4004 int r3
= get_field(s
->fields
, r3
);
4005 int size
= s
->insn
->data
;
4006 TCGv_i64 tsize
= tcg_const_i64(size
);
4010 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4012 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4017 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4021 tcg_temp_free_i64(tsize
);
4025 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
4027 int r1
= get_field(s
->fields
, r1
);
4028 int r3
= get_field(s
->fields
, r3
);
4029 TCGv_i64 t
= tcg_temp_new_i64();
4030 TCGv_i64 t4
= tcg_const_i64(4);
4031 TCGv_i64 t32
= tcg_const_i64(32);
4034 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4035 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4039 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4043 tcg_temp_free_i64(t
);
4044 tcg_temp_free_i64(t4
);
4045 tcg_temp_free_i64(t32
);
4049 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
4051 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
4053 return_low128(o
->in2
);
4057 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
4059 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4063 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
4068 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4070 /* The !borrow flag is the msb of CC. Since we want the inverse of
4071 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4072 disas_jcc(s
, &cmp
, 8 | 4);
4073 borrow
= tcg_temp_new_i64();
4075 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4077 TCGv_i32 t
= tcg_temp_new_i32();
4078 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4079 tcg_gen_extu_i32_i64(borrow
, t
);
4080 tcg_temp_free_i32(t
);
4084 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4085 tcg_temp_free_i64(borrow
);
4089 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4096 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4097 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4098 tcg_temp_free_i32(t
);
4100 t
= tcg_const_i32(s
->ilen
);
4101 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4102 tcg_temp_free_i32(t
);
4104 gen_exception(EXCP_SVC
);
4105 return EXIT_NORETURN
;
4108 static ExitStatus
op_tam(DisasContext
*s
, DisasOps
*o
)
4112 cc
|= (s
->tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4113 cc
|= (s
->tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4114 gen_op_movi_cc(s
, cc
);
4118 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4120 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4125 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4127 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4132 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4134 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4139 #ifndef CONFIG_USER_ONLY
4141 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4143 check_privileged(s
);
4144 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4149 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4151 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4158 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4160 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4161 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4162 tcg_temp_free_i32(l
);
4167 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4169 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4170 return_low128(o
->out2
);
4175 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4177 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4178 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4179 tcg_temp_free_i32(l
);
4184 static ExitStatus
op_ts(DisasContext
*s
, DisasOps
*o
)
4186 TCGv_i32 t1
= tcg_const_i32(0xff);
4187 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4188 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4189 tcg_temp_free_i32(t1
);
4194 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4196 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4197 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4198 tcg_temp_free_i32(l
);
4202 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4204 int d1
= get_field(s
->fields
, d1
);
4205 int d2
= get_field(s
->fields
, d2
);
4206 int b1
= get_field(s
->fields
, b1
);
4207 int b2
= get_field(s
->fields
, b2
);
4208 int l
= get_field(s
->fields
, l1
);
4211 o
->addr1
= get_address(s
, 0, b1
, d1
);
4213 /* If the addresses are identical, this is a store/memset of zero. */
4214 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4215 o
->in2
= tcg_const_i64(0);
4219 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4222 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4226 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4229 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4233 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4236 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4240 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4242 gen_op_movi_cc(s
, 0);
4246 /* But in general we'll defer to a helper. */
4247 o
->in2
= get_address(s
, 0, b2
, d2
);
4248 t32
= tcg_const_i32(l
);
4249 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4250 tcg_temp_free_i32(t32
);
4255 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4257 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4261 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4263 int shift
= s
->insn
->data
& 0xff;
4264 int size
= s
->insn
->data
>> 8;
4265 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4268 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4269 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4271 /* Produce the CC from only the bits manipulated. */
4272 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4273 set_cc_nz_u64(s
, cc_dst
);
4277 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4279 o
->out
= tcg_const_i64(0);
4283 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4285 o
->out
= tcg_const_i64(0);
4291 /* ====================================================================== */
4292 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4293 the original inputs), update the various cc data structures in order to
4294 be able to compute the new condition code. */
4296 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4298 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4301 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4303 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4306 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4308 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4311 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4313 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4316 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4318 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4321 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4323 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4326 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4328 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4331 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4333 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4336 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4338 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4341 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4343 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4346 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4348 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4351 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4353 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4356 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4358 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4361 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4363 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4366 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4368 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4371 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4373 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4376 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4378 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4381 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4383 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4386 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4388 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4391 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4393 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4394 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4397 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4399 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4402 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4404 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4407 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4409 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4412 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4414 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4417 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4419 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4422 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4424 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4427 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4429 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4432 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4434 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4437 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4439 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4442 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4444 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4447 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4449 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4452 /* ====================================================================== */
4453 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4454 with the TCG register to which we will write. Used in combination with
4455 the "wout" generators, in some cases we need a new temporary, and in
4456 some cases we can write to a TCG global. */
4458 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4460 o
->out
= tcg_temp_new_i64();
4462 #define SPEC_prep_new 0
4464 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4466 o
->out
= tcg_temp_new_i64();
4467 o
->out2
= tcg_temp_new_i64();
4469 #define SPEC_prep_new_P 0
4471 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4473 o
->out
= regs
[get_field(f
, r1
)];
4476 #define SPEC_prep_r1 0
4478 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4480 int r1
= get_field(f
, r1
);
4482 o
->out2
= regs
[r1
+ 1];
4483 o
->g_out
= o
->g_out2
= true;
4485 #define SPEC_prep_r1_P SPEC_r1_even
4487 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4489 o
->out
= fregs
[get_field(f
, r1
)];
4492 #define SPEC_prep_f1 0
4494 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4496 int r1
= get_field(f
, r1
);
4498 o
->out2
= fregs
[r1
+ 2];
4499 o
->g_out
= o
->g_out2
= true;
4501 #define SPEC_prep_x1 SPEC_r1_f128
4503 /* ====================================================================== */
4504 /* The "Write OUTput" generators. These generally perform some non-trivial
4505 copy of data to TCG globals, or to main memory. The trivial cases are
4506 generally handled by having a "prep" generator install the TCG global
4507 as the destination of the operation. */
4509 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4511 store_reg(get_field(f
, r1
), o
->out
);
4513 #define SPEC_wout_r1 0
4515 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4517 int r1
= get_field(f
, r1
);
4518 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4520 #define SPEC_wout_r1_8 0
4522 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4524 int r1
= get_field(f
, r1
);
4525 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4527 #define SPEC_wout_r1_16 0
4529 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4531 store_reg32_i64(get_field(f
, r1
), o
->out
);
4533 #define SPEC_wout_r1_32 0
4535 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4537 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4539 #define SPEC_wout_r1_32h 0
4541 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4543 int r1
= get_field(f
, r1
);
4544 store_reg32_i64(r1
, o
->out
);
4545 store_reg32_i64(r1
+ 1, o
->out2
);
4547 #define SPEC_wout_r1_P32 SPEC_r1_even
4549 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4551 int r1
= get_field(f
, r1
);
4552 store_reg32_i64(r1
+ 1, o
->out
);
4553 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4554 store_reg32_i64(r1
, o
->out
);
4556 #define SPEC_wout_r1_D32 SPEC_r1_even
4558 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4560 int r3
= get_field(f
, r3
);
4561 store_reg32_i64(r3
, o
->out
);
4562 store_reg32_i64(r3
+ 1, o
->out2
);
4564 #define SPEC_wout_r3_P32 SPEC_r3_even
4566 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4568 int r3
= get_field(f
, r3
);
4569 store_reg(r3
, o
->out
);
4570 store_reg(r3
+ 1, o
->out2
);
4572 #define SPEC_wout_r3_P64 SPEC_r3_even
4574 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4576 store_freg32_i64(get_field(f
, r1
), o
->out
);
4578 #define SPEC_wout_e1 0
4580 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4582 store_freg(get_field(f
, r1
), o
->out
);
4584 #define SPEC_wout_f1 0
4586 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4588 int f1
= get_field(s
->fields
, r1
);
4589 store_freg(f1
, o
->out
);
4590 store_freg(f1
+ 2, o
->out2
);
4592 #define SPEC_wout_x1 SPEC_r1_f128
4594 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4596 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4597 store_reg32_i64(get_field(f
, r1
), o
->out
);
4600 #define SPEC_wout_cond_r1r2_32 0
4602 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4604 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4605 store_freg32_i64(get_field(f
, r1
), o
->out
);
4608 #define SPEC_wout_cond_e1e2 0
4610 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4612 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4614 #define SPEC_wout_m1_8 0
4616 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4618 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4620 #define SPEC_wout_m1_16 0
4622 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4624 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4626 #define SPEC_wout_m1_32 0
4628 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4630 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4632 #define SPEC_wout_m1_64 0
4634 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4636 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4638 #define SPEC_wout_m2_32 0
4640 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4642 store_reg(get_field(f
, r1
), o
->in2
);
4644 #define SPEC_wout_in2_r1 0
4646 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4648 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4650 #define SPEC_wout_in2_r1_32 0
4652 /* ====================================================================== */
4653 /* The "INput 1" generators. These load the first operand to an insn. */
4655 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4657 o
->in1
= load_reg(get_field(f
, r1
));
4659 #define SPEC_in1_r1 0
4661 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4663 o
->in1
= regs
[get_field(f
, r1
)];
4666 #define SPEC_in1_r1_o 0
4668 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4670 o
->in1
= tcg_temp_new_i64();
4671 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4673 #define SPEC_in1_r1_32s 0
4675 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4677 o
->in1
= tcg_temp_new_i64();
4678 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4680 #define SPEC_in1_r1_32u 0
4682 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4684 o
->in1
= tcg_temp_new_i64();
4685 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4687 #define SPEC_in1_r1_sr32 0
4689 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4691 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4693 #define SPEC_in1_r1p1 SPEC_r1_even
4695 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4697 o
->in1
= tcg_temp_new_i64();
4698 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4700 #define SPEC_in1_r1p1_32s SPEC_r1_even
4702 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4704 o
->in1
= tcg_temp_new_i64();
4705 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4707 #define SPEC_in1_r1p1_32u SPEC_r1_even
4709 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4711 int r1
= get_field(f
, r1
);
4712 o
->in1
= tcg_temp_new_i64();
4713 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4715 #define SPEC_in1_r1_D32 SPEC_r1_even
4717 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4719 o
->in1
= load_reg(get_field(f
, r2
));
4721 #define SPEC_in1_r2 0
4723 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4725 o
->in1
= tcg_temp_new_i64();
4726 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
4728 #define SPEC_in1_r2_sr32 0
4730 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4732 o
->in1
= load_reg(get_field(f
, r3
));
4734 #define SPEC_in1_r3 0
4736 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4738 o
->in1
= regs
[get_field(f
, r3
)];
4741 #define SPEC_in1_r3_o 0
4743 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4745 o
->in1
= tcg_temp_new_i64();
4746 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4748 #define SPEC_in1_r3_32s 0
4750 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4752 o
->in1
= tcg_temp_new_i64();
4753 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4755 #define SPEC_in1_r3_32u 0
4757 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4759 int r3
= get_field(f
, r3
);
4760 o
->in1
= tcg_temp_new_i64();
4761 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4763 #define SPEC_in1_r3_D32 SPEC_r3_even
4765 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4767 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4769 #define SPEC_in1_e1 0
4771 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4773 o
->in1
= fregs
[get_field(f
, r1
)];
4776 #define SPEC_in1_f1_o 0
4778 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4780 int r1
= get_field(f
, r1
);
4782 o
->out2
= fregs
[r1
+ 2];
4783 o
->g_out
= o
->g_out2
= true;
4785 #define SPEC_in1_x1_o SPEC_r1_f128
4787 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4789 o
->in1
= fregs
[get_field(f
, r3
)];
4792 #define SPEC_in1_f3_o 0
4794 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4796 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4798 #define SPEC_in1_la1 0
4800 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4802 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4803 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4805 #define SPEC_in1_la2 0
4807 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4810 o
->in1
= tcg_temp_new_i64();
4811 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4813 #define SPEC_in1_m1_8u 0
4815 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4818 o
->in1
= tcg_temp_new_i64();
4819 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4821 #define SPEC_in1_m1_16s 0
4823 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4826 o
->in1
= tcg_temp_new_i64();
4827 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4829 #define SPEC_in1_m1_16u 0
4831 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4834 o
->in1
= tcg_temp_new_i64();
4835 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4837 #define SPEC_in1_m1_32s 0
4839 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4842 o
->in1
= tcg_temp_new_i64();
4843 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4845 #define SPEC_in1_m1_32u 0
4847 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4850 o
->in1
= tcg_temp_new_i64();
4851 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4853 #define SPEC_in1_m1_64 0
4855 /* ====================================================================== */
4856 /* The "INput 2" generators. These load the second operand to an insn. */
4858 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4860 o
->in2
= regs
[get_field(f
, r1
)];
4863 #define SPEC_in2_r1_o 0
4865 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4867 o
->in2
= tcg_temp_new_i64();
4868 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4870 #define SPEC_in2_r1_16u 0
4872 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4874 o
->in2
= tcg_temp_new_i64();
4875 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4877 #define SPEC_in2_r1_32u 0
4879 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4881 int r1
= get_field(f
, r1
);
4882 o
->in2
= tcg_temp_new_i64();
4883 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4885 #define SPEC_in2_r1_D32 SPEC_r1_even
4887 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4889 o
->in2
= load_reg(get_field(f
, r2
));
4891 #define SPEC_in2_r2 0
4893 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4895 o
->in2
= regs
[get_field(f
, r2
)];
4898 #define SPEC_in2_r2_o 0
4900 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4902 int r2
= get_field(f
, r2
);
4904 o
->in2
= load_reg(r2
);
4907 #define SPEC_in2_r2_nz 0
4909 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4911 o
->in2
= tcg_temp_new_i64();
4912 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4914 #define SPEC_in2_r2_8s 0
4916 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4918 o
->in2
= tcg_temp_new_i64();
4919 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4921 #define SPEC_in2_r2_8u 0
4923 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4925 o
->in2
= tcg_temp_new_i64();
4926 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4928 #define SPEC_in2_r2_16s 0
4930 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4932 o
->in2
= tcg_temp_new_i64();
4933 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4935 #define SPEC_in2_r2_16u 0
4937 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4939 o
->in2
= load_reg(get_field(f
, r3
));
4941 #define SPEC_in2_r3 0
4943 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4945 o
->in2
= tcg_temp_new_i64();
4946 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
4948 #define SPEC_in2_r3_sr32 0
4950 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4952 o
->in2
= tcg_temp_new_i64();
4953 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4955 #define SPEC_in2_r2_32s 0
4957 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4959 o
->in2
= tcg_temp_new_i64();
4960 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4962 #define SPEC_in2_r2_32u 0
4964 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4966 o
->in2
= tcg_temp_new_i64();
4967 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
4969 #define SPEC_in2_r2_sr32 0
4971 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4973 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4975 #define SPEC_in2_e2 0
4977 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4979 o
->in2
= fregs
[get_field(f
, r2
)];
4982 #define SPEC_in2_f2_o 0
4984 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4986 int r2
= get_field(f
, r2
);
4988 o
->in2
= fregs
[r2
+ 2];
4989 o
->g_in1
= o
->g_in2
= true;
4991 #define SPEC_in2_x2_o SPEC_r2_f128
4993 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4995 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4997 #define SPEC_in2_ra2 0
4999 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5001 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5002 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5004 #define SPEC_in2_a2 0
5006 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5008 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
5010 #define SPEC_in2_ri2 0
5012 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5014 help_l2_shift(s
, f
, o
, 31);
5016 #define SPEC_in2_sh32 0
5018 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5020 help_l2_shift(s
, f
, o
, 63);
5022 #define SPEC_in2_sh64 0
5024 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5027 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5029 #define SPEC_in2_m2_8u 0
5031 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5034 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5036 #define SPEC_in2_m2_16s 0
5038 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5041 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5043 #define SPEC_in2_m2_16u 0
5045 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5048 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5050 #define SPEC_in2_m2_32s 0
5052 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5055 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5057 #define SPEC_in2_m2_32u 0
5059 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5062 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5064 #define SPEC_in2_m2_64 0
5066 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5069 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5071 #define SPEC_in2_mri2_16u 0
5073 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5076 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5078 #define SPEC_in2_mri2_32s 0
5080 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5083 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5085 #define SPEC_in2_mri2_32u 0
5087 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5090 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5092 #define SPEC_in2_mri2_64 0
5094 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5096 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5098 #define SPEC_in2_i2 0
5100 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5102 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5104 #define SPEC_in2_i2_8u 0
5106 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5108 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5110 #define SPEC_in2_i2_16u 0
5112 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5114 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5116 #define SPEC_in2_i2_32u 0
5118 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5120 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5121 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5123 #define SPEC_in2_i2_16u_shl 0
5125 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5127 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5128 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5130 #define SPEC_in2_i2_32u_shl 0
5132 #ifndef CONFIG_USER_ONLY
5133 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5135 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5137 #define SPEC_in2_insn 0
5140 /* ====================================================================== */
5142 /* Find opc within the table of insns. This is formulated as a switch
5143 statement so that (1) we get compile-time notice of cut-paste errors
5144 for duplicated opcodes, and (2) the compiler generates the binary
5145 search tree, rather than us having to post-process the table. */
5147 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5148 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5150 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5152 enum DisasInsnEnum
{
5153 #include "insn-data.def"
5157 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5161 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5163 .help_in1 = in1_##I1, \
5164 .help_in2 = in2_##I2, \
5165 .help_prep = prep_##P, \
5166 .help_wout = wout_##W, \
5167 .help_cout = cout_##CC, \
5168 .help_op = op_##OP, \
5172 /* Allow 0 to be used for NULL in the table below. */
5180 #define SPEC_in1_0 0
5181 #define SPEC_in2_0 0
5182 #define SPEC_prep_0 0
5183 #define SPEC_wout_0 0
5185 static const DisasInsn insn_info
[] = {
5186 #include "insn-data.def"
5190 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5191 case OPC: return &insn_info[insn_ ## NM];
5193 static const DisasInsn
*lookup_opc(uint16_t opc
)
5196 #include "insn-data.def"
5205 /* Extract a field from the insn. The INSN should be left-aligned in
5206 the uint64_t so that we can more easily utilize the big-bit-endian
5207 definitions we extract from the Principals of Operation. */
5209 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5217 /* Zero extract the field from the insn. */
5218 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5220 /* Sign-extend, or un-swap the field as necessary. */
5222 case 0: /* unsigned */
5224 case 1: /* signed */
5225 assert(f
->size
<= 32);
5226 m
= 1u << (f
->size
- 1);
5229 case 2: /* dl+dh split, signed 20 bit. */
5230 r
= ((int8_t)r
<< 12) | (r
>> 8);
5236 /* Validate that the "compressed" encoding we selected above is valid.
5237 I.e. we havn't make two different original fields overlap. */
5238 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5239 o
->presentC
|= 1 << f
->indexC
;
5240 o
->presentO
|= 1 << f
->indexO
;
5242 o
->c
[f
->indexC
] = r
;
5245 /* Lookup the insn at the current PC, extracting the operands into O and
5246 returning the info struct for the insn. Returns NULL for invalid insn. */
5248 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5251 uint64_t insn
, pc
= s
->pc
;
5253 const DisasInsn
*info
;
5255 if (unlikely(s
->ex_value
)) {
5256 /* Drop the EX data now, so that it's clear on exception paths. */
5257 TCGv_i64 zero
= tcg_const_i64(0);
5258 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5259 tcg_temp_free_i64(zero
);
5261 /* Extract the values saved by EXECUTE. */
5262 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5263 ilen
= s
->ex_value
& 0xf;
5266 insn
= ld_code2(env
, pc
);
5267 op
= (insn
>> 8) & 0xff;
5268 ilen
= get_ilen(op
);
5274 insn
= ld_code4(env
, pc
) << 32;
5277 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5280 g_assert_not_reached();
5283 s
->next_pc
= s
->pc
+ ilen
;
5286 /* We can't actually determine the insn format until we've looked up
5287 the full insn opcode. Which we can't do without locating the
5288 secondary opcode. Assume by default that OP2 is at bit 40; for
5289 those smaller insns that don't actually have a secondary opcode
5290 this will correctly result in OP2 = 0. */
5296 case 0xb2: /* S, RRF, RRE */
5297 case 0xb3: /* RRE, RRD, RRF */
5298 case 0xb9: /* RRE, RRF */
5299 case 0xe5: /* SSE, SIL */
5300 op2
= (insn
<< 8) >> 56;
5304 case 0xc0: /* RIL */
5305 case 0xc2: /* RIL */
5306 case 0xc4: /* RIL */
5307 case 0xc6: /* RIL */
5308 case 0xc8: /* SSF */
5309 case 0xcc: /* RIL */
5310 op2
= (insn
<< 12) >> 60;
5312 case 0xd0 ... 0xdf: /* SS */
5318 case 0xee ... 0xf3: /* SS */
5319 case 0xf8 ... 0xfd: /* SS */
5323 op2
= (insn
<< 40) >> 56;
5327 memset(f
, 0, sizeof(*f
));
5332 /* Lookup the instruction. */
5333 info
= lookup_opc(op
<< 8 | op2
);
5335 /* If we found it, extract the operands. */
5337 DisasFormat fmt
= info
->fmt
;
5340 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5341 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5347 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5349 const DisasInsn
*insn
;
5350 ExitStatus ret
= NO_EXIT
;
5354 /* Search for the insn in the table. */
5355 insn
= extract_insn(env
, s
, &f
);
5357 /* Not found means unimplemented/illegal opcode. */
5359 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5361 gen_illegal_opcode(s
);
5362 return EXIT_NORETURN
;
5365 #ifndef CONFIG_USER_ONLY
5366 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5367 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5368 gen_helper_per_ifetch(cpu_env
, addr
);
5369 tcg_temp_free_i64(addr
);
5373 /* Check for insn specification exceptions. */
5375 int spec
= insn
->spec
, excp
= 0, r
;
5377 if (spec
& SPEC_r1_even
) {
5378 r
= get_field(&f
, r1
);
5380 excp
= PGM_SPECIFICATION
;
5383 if (spec
& SPEC_r2_even
) {
5384 r
= get_field(&f
, r2
);
5386 excp
= PGM_SPECIFICATION
;
5389 if (spec
& SPEC_r3_even
) {
5390 r
= get_field(&f
, r3
);
5392 excp
= PGM_SPECIFICATION
;
5395 if (spec
& SPEC_r1_f128
) {
5396 r
= get_field(&f
, r1
);
5398 excp
= PGM_SPECIFICATION
;
5401 if (spec
& SPEC_r2_f128
) {
5402 r
= get_field(&f
, r2
);
5404 excp
= PGM_SPECIFICATION
;
5408 gen_program_exception(s
, excp
);
5409 return EXIT_NORETURN
;
5413 /* Set up the strutures we use to communicate with the helpers. */
5416 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5417 TCGV_UNUSED_I64(o
.out
);
5418 TCGV_UNUSED_I64(o
.out2
);
5419 TCGV_UNUSED_I64(o
.in1
);
5420 TCGV_UNUSED_I64(o
.in2
);
5421 TCGV_UNUSED_I64(o
.addr1
);
5423 /* Implement the instruction. */
5424 if (insn
->help_in1
) {
5425 insn
->help_in1(s
, &f
, &o
);
5427 if (insn
->help_in2
) {
5428 insn
->help_in2(s
, &f
, &o
);
5430 if (insn
->help_prep
) {
5431 insn
->help_prep(s
, &f
, &o
);
5433 if (insn
->help_op
) {
5434 ret
= insn
->help_op(s
, &o
);
5436 if (insn
->help_wout
) {
5437 insn
->help_wout(s
, &f
, &o
);
5439 if (insn
->help_cout
) {
5440 insn
->help_cout(s
, &o
);
5443 /* Free any temporaries created by the helpers. */
5444 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5445 tcg_temp_free_i64(o
.out
);
5447 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5448 tcg_temp_free_i64(o
.out2
);
5450 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5451 tcg_temp_free_i64(o
.in1
);
5453 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5454 tcg_temp_free_i64(o
.in2
);
5456 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5457 tcg_temp_free_i64(o
.addr1
);
5460 #ifndef CONFIG_USER_ONLY
5461 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5462 /* An exception might be triggered, save PSW if not already done. */
5463 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5464 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5470 /* Call the helper to check for a possible PER exception. */
5471 gen_helper_per_check_exception(cpu_env
);
5475 /* Advance to the next instruction. */
5480 void gen_intermediate_code(CPUS390XState
*env
, struct TranslationBlock
*tb
)
5482 S390CPU
*cpu
= s390_env_get_cpu(env
);
5483 CPUState
*cs
= CPU(cpu
);
5485 target_ulong pc_start
;
5486 uint64_t next_page_start
;
5487 int num_insns
, max_insns
;
5494 if (!(tb
->flags
& FLAG_MASK_64
)) {
5495 pc_start
&= 0x7fffffff;
5500 dc
.cc_op
= CC_OP_DYNAMIC
;
5501 dc
.ex_value
= tb
->cs_base
;
5502 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5504 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5507 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5508 if (max_insns
== 0) {
5509 max_insns
= CF_COUNT_MASK
;
5511 if (max_insns
> TCG_MAX_INSNS
) {
5512 max_insns
= TCG_MAX_INSNS
;
5518 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5521 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5522 status
= EXIT_PC_STALE
;
5524 /* The address covered by the breakpoint must be included in
5525 [tb->pc, tb->pc + tb->size) in order to for it to be
5526 properly cleared -- thus we increment the PC here so that
5527 the logic setting tb->size below does the right thing. */
5532 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5536 status
= translate_one(env
, &dc
);
5538 /* If we reach a page boundary, are single stepping,
5539 or exhaust instruction count, stop generation. */
5540 if (status
== NO_EXIT
5541 && (dc
.pc
>= next_page_start
5542 || tcg_op_buf_full()
5543 || num_insns
>= max_insns
5545 || cs
->singlestep_enabled
5547 status
= EXIT_PC_STALE
;
5549 } while (status
== NO_EXIT
);
5551 if (tb
->cflags
& CF_LAST_IO
) {
5560 update_psw_addr(&dc
);
5562 case EXIT_PC_UPDATED
:
5563 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5564 cc op type is in env */
5567 case EXIT_PC_CC_UPDATED
:
5568 /* Exit the TB, either by raising a debug exception or by return. */
5570 gen_exception(EXCP_DEBUG
);
5571 } else if (use_exit_tb(&dc
)) {
5574 tcg_gen_lookup_and_goto_ptr(psw_addr
);
5581 gen_tb_end(tb
, num_insns
);
5583 tb
->size
= dc
.pc
- pc_start
;
5584 tb
->icount
= num_insns
;
5586 #if defined(S390X_DEBUG_DISAS)
5587 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5588 && qemu_log_in_addr_range(pc_start
)) {
5590 if (unlikely(dc
.ex_value
)) {
5591 /* ??? Unfortunately log_target_disas can't use host memory. */
5592 qemu_log("IN: EXECUTE %016" PRIx64
"\n", dc
.ex_value
);
5594 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5595 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5603 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5606 int cc_op
= data
[1];
5607 env
->psw
.addr
= data
[0];
5608 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {