4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env
;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 struct TranslationBlock
*tb
;
58 const DisasInsn
*insn
;
64 bool singlestep_enabled
;
67 /* Information carried about a condition to be evaluated. */
74 struct { TCGv_i64 a
, b
; } s64
;
75 struct { TCGv_i32 a
, b
; } s32
;
81 #ifdef DEBUG_INLINE_BRANCHES
82 static uint64_t inline_branch_hit
[CC_OP_MAX
];
83 static uint64_t inline_branch_miss
[CC_OP_MAX
];
86 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
88 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
89 if (s
->tb
->flags
& FLAG_MASK_32
) {
90 return pc
| 0x80000000;
96 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
99 S390CPU
*cpu
= S390_CPU(cs
);
100 CPUS390XState
*env
= &cpu
->env
;
103 if (env
->cc_op
> 3) {
104 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
105 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
107 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
108 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
111 for (i
= 0; i
< 16; i
++) {
112 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
114 cpu_fprintf(f
, "\n");
120 for (i
= 0; i
< 16; i
++) {
121 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
123 cpu_fprintf(f
, "\n");
129 for (i
= 0; i
< 32; i
++) {
130 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
131 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
132 cpu_fprintf(f
, (i
% 2) ? "\n" : " ");
135 #ifndef CONFIG_USER_ONLY
136 for (i
= 0; i
< 16; i
++) {
137 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
139 cpu_fprintf(f
, "\n");
146 #ifdef DEBUG_INLINE_BRANCHES
147 for (i
= 0; i
< CC_OP_MAX
; i
++) {
148 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
149 inline_branch_miss
[i
], inline_branch_hit
[i
]);
153 cpu_fprintf(f
, "\n");
156 static TCGv_i64 psw_addr
;
157 static TCGv_i64 psw_mask
;
158 static TCGv_i64 gbea
;
160 static TCGv_i32 cc_op
;
161 static TCGv_i64 cc_src
;
162 static TCGv_i64 cc_dst
;
163 static TCGv_i64 cc_vr
;
165 static char cpu_reg_names
[32][4];
166 static TCGv_i64 regs
[16];
167 static TCGv_i64 fregs
[16];
169 void s390x_translate_init(void)
173 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
174 tcg_ctx
.tcg_env
= cpu_env
;
175 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
176 offsetof(CPUS390XState
, psw
.addr
),
178 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
179 offsetof(CPUS390XState
, psw
.mask
),
181 gbea
= tcg_global_mem_new_i64(cpu_env
,
182 offsetof(CPUS390XState
, gbea
),
185 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
187 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
189 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
191 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
194 for (i
= 0; i
< 16; i
++) {
195 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
196 regs
[i
] = tcg_global_mem_new(cpu_env
,
197 offsetof(CPUS390XState
, regs
[i
]),
201 for (i
= 0; i
< 16; i
++) {
202 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
203 fregs
[i
] = tcg_global_mem_new(cpu_env
,
204 offsetof(CPUS390XState
, vregs
[i
][0].d
),
205 cpu_reg_names
[i
+ 16]);
209 static TCGv_i64
load_reg(int reg
)
211 TCGv_i64 r
= tcg_temp_new_i64();
212 tcg_gen_mov_i64(r
, regs
[reg
]);
216 static TCGv_i64
load_freg32_i64(int reg
)
218 TCGv_i64 r
= tcg_temp_new_i64();
219 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
223 static void store_reg(int reg
, TCGv_i64 v
)
225 tcg_gen_mov_i64(regs
[reg
], v
);
228 static void store_freg(int reg
, TCGv_i64 v
)
230 tcg_gen_mov_i64(fregs
[reg
], v
);
233 static void store_reg32_i64(int reg
, TCGv_i64 v
)
235 /* 32 bit register writes keep the upper half */
236 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
239 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
241 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
244 static void store_freg32_i64(int reg
, TCGv_i64 v
)
246 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
249 static void return_low128(TCGv_i64 dest
)
251 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
254 static void update_psw_addr(DisasContext
*s
)
257 tcg_gen_movi_i64(psw_addr
, s
->pc
);
260 static void per_branch(DisasContext
*s
, bool to_next
)
262 #ifndef CONFIG_USER_ONLY
263 tcg_gen_movi_i64(gbea
, s
->pc
);
265 if (s
->tb
->flags
& FLAG_MASK_PER
) {
266 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
267 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
269 tcg_temp_free_i64(next_pc
);
275 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
276 TCGv_i64 arg1
, TCGv_i64 arg2
)
278 #ifndef CONFIG_USER_ONLY
279 if (s
->tb
->flags
& FLAG_MASK_PER
) {
280 TCGLabel
*lab
= gen_new_label();
281 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
283 tcg_gen_movi_i64(gbea
, s
->pc
);
284 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
288 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
289 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
290 tcg_temp_free_i64(pc
);
295 static void per_breaking_event(DisasContext
*s
)
297 tcg_gen_movi_i64(gbea
, s
->pc
);
300 static void update_cc_op(DisasContext
*s
)
302 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
303 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
307 static void potential_page_fault(DisasContext
*s
)
313 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
315 return (uint64_t)cpu_lduw_code(env
, pc
);
318 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
320 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
323 static int get_mem_index(DisasContext
*s
)
325 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
326 case PSW_ASC_PRIMARY
>> 32:
328 case PSW_ASC_SECONDARY
>> 32:
330 case PSW_ASC_HOME
>> 32:
338 static void gen_exception(int excp
)
340 TCGv_i32 tmp
= tcg_const_i32(excp
);
341 gen_helper_exception(cpu_env
, tmp
);
342 tcg_temp_free_i32(tmp
);
345 static void gen_program_exception(DisasContext
*s
, int code
)
349 /* Remember what pgm exeption this was. */
350 tmp
= tcg_const_i32(code
);
351 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
352 tcg_temp_free_i32(tmp
);
354 tmp
= tcg_const_i32(s
->ilen
);
355 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
356 tcg_temp_free_i32(tmp
);
364 /* Trigger exception. */
365 gen_exception(EXCP_PGM
);
368 static inline void gen_illegal_opcode(DisasContext
*s
)
370 gen_program_exception(s
, PGM_OPERATION
);
373 static inline void gen_trap(DisasContext
*s
)
377 /* Set DXC to 0xff. */
378 t
= tcg_temp_new_i32();
379 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
380 tcg_gen_ori_i32(t
, t
, 0xff00);
381 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
382 tcg_temp_free_i32(t
);
384 gen_program_exception(s
, PGM_DATA
);
387 #ifndef CONFIG_USER_ONLY
388 static void check_privileged(DisasContext
*s
)
390 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
391 gen_program_exception(s
, PGM_PRIVILEGED
);
396 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
398 TCGv_i64 tmp
= tcg_temp_new_i64();
399 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
401 /* Note that d2 is limited to 20 bits, signed. If we crop negative
402 displacements early we create larger immedate addends. */
404 /* Note that addi optimizes the imm==0 case. */
406 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
407 tcg_gen_addi_i64(tmp
, tmp
, d2
);
409 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
411 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
417 tcg_gen_movi_i64(tmp
, d2
);
420 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
426 static inline bool live_cc_data(DisasContext
*s
)
428 return (s
->cc_op
!= CC_OP_DYNAMIC
429 && s
->cc_op
!= CC_OP_STATIC
433 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
435 if (live_cc_data(s
)) {
436 tcg_gen_discard_i64(cc_src
);
437 tcg_gen_discard_i64(cc_dst
);
438 tcg_gen_discard_i64(cc_vr
);
440 s
->cc_op
= CC_OP_CONST0
+ val
;
443 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
445 if (live_cc_data(s
)) {
446 tcg_gen_discard_i64(cc_src
);
447 tcg_gen_discard_i64(cc_vr
);
449 tcg_gen_mov_i64(cc_dst
, dst
);
453 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
456 if (live_cc_data(s
)) {
457 tcg_gen_discard_i64(cc_vr
);
459 tcg_gen_mov_i64(cc_src
, src
);
460 tcg_gen_mov_i64(cc_dst
, dst
);
464 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
465 TCGv_i64 dst
, TCGv_i64 vr
)
467 tcg_gen_mov_i64(cc_src
, src
);
468 tcg_gen_mov_i64(cc_dst
, dst
);
469 tcg_gen_mov_i64(cc_vr
, vr
);
473 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
475 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
478 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
480 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
483 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
488 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
490 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
493 /* CC value is in env->cc_op */
494 static void set_cc_static(DisasContext
*s
)
496 if (live_cc_data(s
)) {
497 tcg_gen_discard_i64(cc_src
);
498 tcg_gen_discard_i64(cc_dst
);
499 tcg_gen_discard_i64(cc_vr
);
501 s
->cc_op
= CC_OP_STATIC
;
504 /* calculates cc into cc_op */
505 static void gen_op_calc_cc(DisasContext
*s
)
507 TCGv_i32 local_cc_op
;
510 TCGV_UNUSED_I32(local_cc_op
);
511 TCGV_UNUSED_I64(dummy
);
514 dummy
= tcg_const_i64(0);
528 local_cc_op
= tcg_const_i32(s
->cc_op
);
544 /* s->cc_op is the cc value */
545 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
548 /* env->cc_op already is the cc value */
563 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
568 case CC_OP_LTUGTU_32
:
569 case CC_OP_LTUGTU_64
:
576 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
591 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
594 /* unknown operation - assume 3 arguments and cc_op in env */
595 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
601 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
602 tcg_temp_free_i32(local_cc_op
);
604 if (!TCGV_IS_UNUSED_I64(dummy
)) {
605 tcg_temp_free_i64(dummy
);
608 /* We now have cc in cc_op as constant */
612 static bool use_exit_tb(DisasContext
*s
)
614 return (s
->singlestep_enabled
||
615 (s
->tb
->cflags
& CF_LAST_IO
) ||
616 (s
->tb
->flags
& FLAG_MASK_PER
));
619 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
621 if (unlikely(use_exit_tb(s
))) {
624 #ifndef CONFIG_USER_ONLY
625 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
626 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
632 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
634 #ifdef DEBUG_INLINE_BRANCHES
635 inline_branch_miss
[cc_op
]++;
639 static void account_inline_branch(DisasContext
*s
, int cc_op
)
641 #ifdef DEBUG_INLINE_BRANCHES
642 inline_branch_hit
[cc_op
]++;
646 /* Table of mask values to comparison codes, given a comparison as input.
647 For such, CC=3 should not be possible. */
648 static const TCGCond ltgt_cond
[16] = {
649 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
650 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
651 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
652 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
653 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
654 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
655 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
656 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
659 /* Table of mask values to comparison codes, given a logic op as input.
660 For such, only CC=0 and CC=1 should be possible. */
661 static const TCGCond nz_cond
[16] = {
662 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
663 TCG_COND_NEVER
, TCG_COND_NEVER
,
664 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
665 TCG_COND_NE
, TCG_COND_NE
,
666 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
667 TCG_COND_EQ
, TCG_COND_EQ
,
668 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
669 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
672 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
673 details required to generate a TCG comparison. */
674 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
677 enum cc_op old_cc_op
= s
->cc_op
;
679 if (mask
== 15 || mask
== 0) {
680 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
683 c
->g1
= c
->g2
= true;
688 /* Find the TCG condition for the mask + cc op. */
694 cond
= ltgt_cond
[mask
];
695 if (cond
== TCG_COND_NEVER
) {
698 account_inline_branch(s
, old_cc_op
);
701 case CC_OP_LTUGTU_32
:
702 case CC_OP_LTUGTU_64
:
703 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
704 if (cond
== TCG_COND_NEVER
) {
707 account_inline_branch(s
, old_cc_op
);
711 cond
= nz_cond
[mask
];
712 if (cond
== TCG_COND_NEVER
) {
715 account_inline_branch(s
, old_cc_op
);
730 account_inline_branch(s
, old_cc_op
);
745 account_inline_branch(s
, old_cc_op
);
749 switch (mask
& 0xa) {
750 case 8: /* src == 0 -> no one bit found */
753 case 2: /* src != 0 -> one bit found */
759 account_inline_branch(s
, old_cc_op
);
765 case 8 | 2: /* vr == 0 */
768 case 4 | 1: /* vr != 0 */
771 case 8 | 4: /* no carry -> vr >= src */
774 case 2 | 1: /* carry -> vr < src */
780 account_inline_branch(s
, old_cc_op
);
785 /* Note that CC=0 is impossible; treat it as dont-care. */
787 case 2: /* zero -> op1 == op2 */
790 case 4 | 1: /* !zero -> op1 != op2 */
793 case 4: /* borrow (!carry) -> op1 < op2 */
796 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
802 account_inline_branch(s
, old_cc_op
);
807 /* Calculate cc value. */
812 /* Jump based on CC. We'll load up the real cond below;
813 the assignment here merely avoids a compiler warning. */
814 account_noninline_branch(s
, old_cc_op
);
815 old_cc_op
= CC_OP_STATIC
;
816 cond
= TCG_COND_NEVER
;
820 /* Load up the arguments of the comparison. */
822 c
->g1
= c
->g2
= false;
826 c
->u
.s32
.a
= tcg_temp_new_i32();
827 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
828 c
->u
.s32
.b
= tcg_const_i32(0);
831 case CC_OP_LTUGTU_32
:
834 c
->u
.s32
.a
= tcg_temp_new_i32();
835 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
836 c
->u
.s32
.b
= tcg_temp_new_i32();
837 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
844 c
->u
.s64
.b
= tcg_const_i64(0);
848 case CC_OP_LTUGTU_64
:
852 c
->g1
= c
->g2
= true;
858 c
->u
.s64
.a
= tcg_temp_new_i64();
859 c
->u
.s64
.b
= tcg_const_i64(0);
860 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
865 c
->u
.s32
.a
= tcg_temp_new_i32();
866 c
->u
.s32
.b
= tcg_temp_new_i32();
867 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
868 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
869 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
871 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
878 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
879 c
->u
.s64
.b
= tcg_const_i64(0);
891 case 0x8 | 0x4 | 0x2: /* cc != 3 */
893 c
->u
.s32
.b
= tcg_const_i32(3);
895 case 0x8 | 0x4 | 0x1: /* cc != 2 */
897 c
->u
.s32
.b
= tcg_const_i32(2);
899 case 0x8 | 0x2 | 0x1: /* cc != 1 */
901 c
->u
.s32
.b
= tcg_const_i32(1);
903 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
906 c
->u
.s32
.a
= tcg_temp_new_i32();
907 c
->u
.s32
.b
= tcg_const_i32(0);
908 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
910 case 0x8 | 0x4: /* cc < 2 */
912 c
->u
.s32
.b
= tcg_const_i32(2);
914 case 0x8: /* cc == 0 */
916 c
->u
.s32
.b
= tcg_const_i32(0);
918 case 0x4 | 0x2 | 0x1: /* cc != 0 */
920 c
->u
.s32
.b
= tcg_const_i32(0);
922 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
925 c
->u
.s32
.a
= tcg_temp_new_i32();
926 c
->u
.s32
.b
= tcg_const_i32(0);
927 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
929 case 0x4: /* cc == 1 */
931 c
->u
.s32
.b
= tcg_const_i32(1);
933 case 0x2 | 0x1: /* cc > 1 */
935 c
->u
.s32
.b
= tcg_const_i32(1);
937 case 0x2: /* cc == 2 */
939 c
->u
.s32
.b
= tcg_const_i32(2);
941 case 0x1: /* cc == 3 */
943 c
->u
.s32
.b
= tcg_const_i32(3);
946 /* CC is masked by something else: (8 >> cc) & mask. */
949 c
->u
.s32
.a
= tcg_const_i32(8);
950 c
->u
.s32
.b
= tcg_const_i32(0);
951 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
952 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
963 static void free_compare(DisasCompare
*c
)
967 tcg_temp_free_i64(c
->u
.s64
.a
);
969 tcg_temp_free_i32(c
->u
.s32
.a
);
974 tcg_temp_free_i64(c
->u
.s64
.b
);
976 tcg_temp_free_i32(c
->u
.s32
.b
);
981 /* ====================================================================== */
982 /* Define the insn format enumeration. */
983 #define F0(N) FMT_##N,
984 #define F1(N, X1) F0(N)
985 #define F2(N, X1, X2) F0(N)
986 #define F3(N, X1, X2, X3) F0(N)
987 #define F4(N, X1, X2, X3, X4) F0(N)
988 #define F5(N, X1, X2, X3, X4, X5) F0(N)
991 #include "insn-format.def"
1001 /* Define a structure to hold the decoded fields. We'll store each inside
1002 an array indexed by an enum. In order to conserve memory, we'll arrange
1003 for fields that do not exist at the same time to overlap, thus the "C"
1004 for compact. For checking purposes there is an "O" for original index
1005 as well that will be applied to availability bitmaps. */
1007 enum DisasFieldIndexO
{
1030 enum DisasFieldIndexC
{
1061 struct DisasFields
{
1065 unsigned presentC
:16;
1066 unsigned int presentO
;
1070 /* This is the way fields are to be accessed out of DisasFields. */
1071 #define have_field(S, F) have_field1((S), FLD_O_##F)
1072 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1074 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1076 return (f
->presentO
>> c
) & 1;
1079 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1080 enum DisasFieldIndexC c
)
1082 assert(have_field1(f
, o
));
1086 /* Describe the layout of each field in each format. */
1087 typedef struct DisasField
{
1089 unsigned int size
:8;
1090 unsigned int type
:2;
1091 unsigned int indexC
:6;
1092 enum DisasFieldIndexO indexO
:8;
1095 typedef struct DisasFormatInfo
{
1096 DisasField op
[NUM_C_FIELD
];
1099 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1100 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1101 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1102 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1103 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1104 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1105 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1106 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1107 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1108 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1109 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1110 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1111 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1112 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1114 #define F0(N) { { } },
1115 #define F1(N, X1) { { X1 } },
1116 #define F2(N, X1, X2) { { X1, X2 } },
1117 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1118 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1119 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1121 static const DisasFormatInfo format_info
[] = {
1122 #include "insn-format.def"
1140 /* Generally, we'll extract operands into this structures, operate upon
1141 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1142 of routines below for more details. */
1144 bool g_out
, g_out2
, g_in1
, g_in2
;
1145 TCGv_i64 out
, out2
, in1
, in2
;
1149 /* Instructions can place constraints on their operands, raising specification
1150 exceptions if they are violated. To make this easy to automate, each "in1",
1151 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1152 of the following, or 0. To make this easy to document, we'll put the
1153 SPEC_<name> defines next to <name>. */
1155 #define SPEC_r1_even 1
1156 #define SPEC_r2_even 2
1157 #define SPEC_r3_even 4
1158 #define SPEC_r1_f128 8
1159 #define SPEC_r2_f128 16
1161 /* Return values from translate_one, indicating the state of the TB. */
1163 /* Continue the TB. */
1165 /* We have emitted one or more goto_tb. No fixup required. */
1167 /* We are not using a goto_tb (for whatever reason), but have updated
1168 the PC (for whatever reason), so there's no need to do it again on
1171 /* We have updated the PC and CC values. */
1173 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1174 updated the PC for the next instruction to be executed. */
1176 /* We are exiting the TB to the main loop. */
1177 EXIT_PC_STALE_NOCHAIN
,
1178 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1179 No following code will be executed. */
1191 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1192 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1193 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1194 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1195 void (*help_cout
)(DisasContext
*, DisasOps
*);
1196 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1201 /* ====================================================================== */
1202 /* Miscellaneous helpers, used by several operations. */
1204 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1205 DisasOps
*o
, int mask
)
1207 int b2
= get_field(f
, b2
);
1208 int d2
= get_field(f
, d2
);
1211 o
->in2
= tcg_const_i64(d2
& mask
);
1213 o
->in2
= get_address(s
, 0, b2
, d2
);
1214 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1218 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1220 if (dest
== s
->next_pc
) {
1221 per_branch(s
, true);
1224 if (use_goto_tb(s
, dest
)) {
1226 per_breaking_event(s
);
1228 tcg_gen_movi_i64(psw_addr
, dest
);
1229 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1230 return EXIT_GOTO_TB
;
1232 tcg_gen_movi_i64(psw_addr
, dest
);
1233 per_branch(s
, false);
1234 return EXIT_PC_UPDATED
;
1238 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1239 bool is_imm
, int imm
, TCGv_i64 cdest
)
1242 uint64_t dest
= s
->pc
+ 2 * imm
;
1245 /* Take care of the special cases first. */
1246 if (c
->cond
== TCG_COND_NEVER
) {
1251 if (dest
== s
->next_pc
) {
1252 /* Branch to next. */
1253 per_branch(s
, true);
1257 if (c
->cond
== TCG_COND_ALWAYS
) {
1258 ret
= help_goto_direct(s
, dest
);
1262 if (TCGV_IS_UNUSED_I64(cdest
)) {
1263 /* E.g. bcr %r0 -> no branch. */
1267 if (c
->cond
== TCG_COND_ALWAYS
) {
1268 tcg_gen_mov_i64(psw_addr
, cdest
);
1269 per_branch(s
, false);
1270 ret
= EXIT_PC_UPDATED
;
1275 if (use_goto_tb(s
, s
->next_pc
)) {
1276 if (is_imm
&& use_goto_tb(s
, dest
)) {
1277 /* Both exits can use goto_tb. */
1280 lab
= gen_new_label();
1282 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1284 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1287 /* Branch not taken. */
1289 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1290 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1294 per_breaking_event(s
);
1296 tcg_gen_movi_i64(psw_addr
, dest
);
1297 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1301 /* Fallthru can use goto_tb, but taken branch cannot. */
1302 /* Store taken branch destination before the brcond. This
1303 avoids having to allocate a new local temp to hold it.
1304 We'll overwrite this in the not taken case anyway. */
1306 tcg_gen_mov_i64(psw_addr
, cdest
);
1309 lab
= gen_new_label();
1311 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1313 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1316 /* Branch not taken. */
1319 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1320 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1324 tcg_gen_movi_i64(psw_addr
, dest
);
1326 per_breaking_event(s
);
1327 ret
= EXIT_PC_UPDATED
;
1330 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1331 Most commonly we're single-stepping or some other condition that
1332 disables all use of goto_tb. Just update the PC and exit. */
1334 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1336 cdest
= tcg_const_i64(dest
);
1340 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1342 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1344 TCGv_i32 t0
= tcg_temp_new_i32();
1345 TCGv_i64 t1
= tcg_temp_new_i64();
1346 TCGv_i64 z
= tcg_const_i64(0);
1347 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1348 tcg_gen_extu_i32_i64(t1
, t0
);
1349 tcg_temp_free_i32(t0
);
1350 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1351 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1352 tcg_temp_free_i64(t1
);
1353 tcg_temp_free_i64(z
);
1357 tcg_temp_free_i64(cdest
);
1359 tcg_temp_free_i64(next
);
1361 ret
= EXIT_PC_UPDATED
;
1369 /* ====================================================================== */
1370 /* The operations. These perform the bulk of the work for any insn,
1371 usually after the operands have been loaded and output initialized. */
1373 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1376 z
= tcg_const_i64(0);
1377 n
= tcg_temp_new_i64();
1378 tcg_gen_neg_i64(n
, o
->in2
);
1379 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1380 tcg_temp_free_i64(n
);
1381 tcg_temp_free_i64(z
);
1385 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1387 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1391 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1393 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1397 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1399 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1400 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1404 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1406 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1410 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1415 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1417 /* The carry flag is the msb of CC, therefore the branch mask that would
1418 create that comparison is 3. Feeding the generated comparison to
1419 setcond produces the carry flag that we desire. */
1420 disas_jcc(s
, &cmp
, 3);
1421 carry
= tcg_temp_new_i64();
1423 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1425 TCGv_i32 t
= tcg_temp_new_i32();
1426 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1427 tcg_gen_extu_i32_i64(carry
, t
);
1428 tcg_temp_free_i32(t
);
1432 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1433 tcg_temp_free_i64(carry
);
1437 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1439 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1443 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1445 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1449 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1451 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1452 return_low128(o
->out2
);
1456 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1458 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1462 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1464 int shift
= s
->insn
->data
& 0xff;
1465 int size
= s
->insn
->data
>> 8;
1466 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1469 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1470 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1471 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1473 /* Produce the CC from only the bits manipulated. */
1474 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1475 set_cc_nz_u64(s
, cc_dst
);
1479 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1481 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1482 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1483 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1484 per_branch(s
, false);
1485 return EXIT_PC_UPDATED
;
1491 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1493 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1494 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1497 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1499 int m1
= get_field(s
->fields
, m1
);
1500 bool is_imm
= have_field(s
->fields
, i2
);
1501 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1504 /* BCR with R2 = 0 causes no branching */
1505 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1507 /* Perform serialization */
1508 /* FIXME: check for fast-BCR-serialization facility */
1509 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1512 /* Perform serialization */
1513 /* FIXME: perform checkpoint-synchronisation */
1514 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1519 disas_jcc(s
, &c
, m1
);
1520 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1523 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1525 int r1
= get_field(s
->fields
, r1
);
1526 bool is_imm
= have_field(s
->fields
, i2
);
1527 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1531 c
.cond
= TCG_COND_NE
;
1536 t
= tcg_temp_new_i64();
1537 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1538 store_reg32_i64(r1
, t
);
1539 c
.u
.s32
.a
= tcg_temp_new_i32();
1540 c
.u
.s32
.b
= tcg_const_i32(0);
1541 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1542 tcg_temp_free_i64(t
);
1544 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1547 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1549 int r1
= get_field(s
->fields
, r1
);
1550 int imm
= get_field(s
->fields
, i2
);
1554 c
.cond
= TCG_COND_NE
;
1559 t
= tcg_temp_new_i64();
1560 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1561 tcg_gen_subi_i64(t
, t
, 1);
1562 store_reg32h_i64(r1
, t
);
1563 c
.u
.s32
.a
= tcg_temp_new_i32();
1564 c
.u
.s32
.b
= tcg_const_i32(0);
1565 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1566 tcg_temp_free_i64(t
);
1568 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1571 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1573 int r1
= get_field(s
->fields
, r1
);
1574 bool is_imm
= have_field(s
->fields
, i2
);
1575 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1578 c
.cond
= TCG_COND_NE
;
1583 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1584 c
.u
.s64
.a
= regs
[r1
];
1585 c
.u
.s64
.b
= tcg_const_i64(0);
1587 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1590 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1592 int r1
= get_field(s
->fields
, r1
);
1593 int r3
= get_field(s
->fields
, r3
);
1594 bool is_imm
= have_field(s
->fields
, i2
);
1595 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1599 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1604 t
= tcg_temp_new_i64();
1605 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1606 c
.u
.s32
.a
= tcg_temp_new_i32();
1607 c
.u
.s32
.b
= tcg_temp_new_i32();
1608 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1609 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1610 store_reg32_i64(r1
, t
);
1611 tcg_temp_free_i64(t
);
1613 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1616 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1618 int r1
= get_field(s
->fields
, r1
);
1619 int r3
= get_field(s
->fields
, r3
);
1620 bool is_imm
= have_field(s
->fields
, i2
);
1621 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1624 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1627 if (r1
== (r3
| 1)) {
1628 c
.u
.s64
.b
= load_reg(r3
| 1);
1631 c
.u
.s64
.b
= regs
[r3
| 1];
1635 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1636 c
.u
.s64
.a
= regs
[r1
];
1639 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1642 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1644 int imm
, m3
= get_field(s
->fields
, m3
);
1648 c
.cond
= ltgt_cond
[m3
];
1649 if (s
->insn
->data
) {
1650 c
.cond
= tcg_unsigned_cond(c
.cond
);
1652 c
.is_64
= c
.g1
= c
.g2
= true;
1656 is_imm
= have_field(s
->fields
, i4
);
1658 imm
= get_field(s
->fields
, i4
);
1661 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1662 get_field(s
->fields
, d4
));
1665 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1668 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1670 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1675 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1677 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1682 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1684 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1689 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1691 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1692 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1693 tcg_temp_free_i32(m3
);
1694 gen_set_cc_nz_f32(s
, o
->in2
);
1698 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1700 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1701 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1702 tcg_temp_free_i32(m3
);
1703 gen_set_cc_nz_f64(s
, o
->in2
);
1707 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1709 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1710 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1711 tcg_temp_free_i32(m3
);
1712 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1716 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1718 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1719 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1720 tcg_temp_free_i32(m3
);
1721 gen_set_cc_nz_f32(s
, o
->in2
);
1725 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1727 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1728 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1729 tcg_temp_free_i32(m3
);
1730 gen_set_cc_nz_f64(s
, o
->in2
);
1734 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1736 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1737 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1738 tcg_temp_free_i32(m3
);
1739 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1743 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1745 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1746 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1747 tcg_temp_free_i32(m3
);
1748 gen_set_cc_nz_f32(s
, o
->in2
);
1752 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1754 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1755 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1756 tcg_temp_free_i32(m3
);
1757 gen_set_cc_nz_f64(s
, o
->in2
);
1761 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1763 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1764 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1765 tcg_temp_free_i32(m3
);
1766 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1770 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1772 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1773 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1774 tcg_temp_free_i32(m3
);
1775 gen_set_cc_nz_f32(s
, o
->in2
);
1779 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1781 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1782 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1783 tcg_temp_free_i32(m3
);
1784 gen_set_cc_nz_f64(s
, o
->in2
);
1788 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1790 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1791 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1792 tcg_temp_free_i32(m3
);
1793 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1797 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1799 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1800 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1801 tcg_temp_free_i32(m3
);
1805 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1807 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1808 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1809 tcg_temp_free_i32(m3
);
1813 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1815 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1816 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1817 tcg_temp_free_i32(m3
);
1818 return_low128(o
->out2
);
1822 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1824 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1825 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1826 tcg_temp_free_i32(m3
);
1830 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1832 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1833 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1834 tcg_temp_free_i32(m3
);
1838 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1840 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1841 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1842 tcg_temp_free_i32(m3
);
1843 return_low128(o
->out2
);
1847 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1849 int r2
= get_field(s
->fields
, r2
);
1850 TCGv_i64 len
= tcg_temp_new_i64();
1852 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1854 return_low128(o
->out
);
1856 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1857 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1858 tcg_temp_free_i64(len
);
1863 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1865 int l
= get_field(s
->fields
, l1
);
1870 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1871 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1874 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1875 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1878 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1879 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1882 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1883 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1886 vl
= tcg_const_i32(l
);
1887 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1888 tcg_temp_free_i32(vl
);
1892 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1896 static ExitStatus
op_clcl(DisasContext
*s
, DisasOps
*o
)
1898 int r1
= get_field(s
->fields
, r1
);
1899 int r2
= get_field(s
->fields
, r2
);
1902 /* r1 and r2 must be even. */
1903 if (r1
& 1 || r2
& 1) {
1904 gen_program_exception(s
, PGM_SPECIFICATION
);
1905 return EXIT_NORETURN
;
1908 t1
= tcg_const_i32(r1
);
1909 t2
= tcg_const_i32(r2
);
1910 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1911 tcg_temp_free_i32(t1
);
1912 tcg_temp_free_i32(t2
);
1917 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1919 int r1
= get_field(s
->fields
, r1
);
1920 int r3
= get_field(s
->fields
, r3
);
1923 /* r1 and r3 must be even. */
1924 if (r1
& 1 || r3
& 1) {
1925 gen_program_exception(s
, PGM_SPECIFICATION
);
1926 return EXIT_NORETURN
;
1929 t1
= tcg_const_i32(r1
);
1930 t3
= tcg_const_i32(r3
);
1931 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1932 tcg_temp_free_i32(t1
);
1933 tcg_temp_free_i32(t3
);
1938 static ExitStatus
op_clclu(DisasContext
*s
, DisasOps
*o
)
1940 int r1
= get_field(s
->fields
, r1
);
1941 int r3
= get_field(s
->fields
, r3
);
1944 /* r1 and r3 must be even. */
1945 if (r1
& 1 || r3
& 1) {
1946 gen_program_exception(s
, PGM_SPECIFICATION
);
1947 return EXIT_NORETURN
;
1950 t1
= tcg_const_i32(r1
);
1951 t3
= tcg_const_i32(r3
);
1952 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1953 tcg_temp_free_i32(t1
);
1954 tcg_temp_free_i32(t3
);
1959 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1961 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1962 TCGv_i32 t1
= tcg_temp_new_i32();
1963 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1964 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1966 tcg_temp_free_i32(t1
);
1967 tcg_temp_free_i32(m3
);
1971 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1973 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1975 return_low128(o
->in2
);
1979 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1981 TCGv_i64 t
= tcg_temp_new_i64();
1982 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1983 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1984 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1985 tcg_temp_free_i64(t
);
1989 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1991 int d2
= get_field(s
->fields
, d2
);
1992 int b2
= get_field(s
->fields
, b2
);
1995 /* Note that in1 = R3 (new value) and
1996 in2 = (zero-extended) R1 (expected value). */
1998 addr
= get_address(s
, 0, b2
, d2
);
1999 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2000 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2001 tcg_temp_free_i64(addr
);
2003 /* Are the memory and expected values (un)equal? Note that this setcond
2004 produces the output CC value, thus the NE sense of the test. */
2005 cc
= tcg_temp_new_i64();
2006 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2007 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2008 tcg_temp_free_i64(cc
);
2014 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2016 int r1
= get_field(s
->fields
, r1
);
2017 int r3
= get_field(s
->fields
, r3
);
2018 int d2
= get_field(s
->fields
, d2
);
2019 int b2
= get_field(s
->fields
, b2
);
2021 TCGv_i32 t_r1
, t_r3
;
2023 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2024 addr
= get_address(s
, 0, b2
, d2
);
2025 t_r1
= tcg_const_i32(r1
);
2026 t_r3
= tcg_const_i32(r3
);
2027 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2028 tcg_temp_free_i64(addr
);
2029 tcg_temp_free_i32(t_r1
);
2030 tcg_temp_free_i32(t_r3
);
2036 #ifndef CONFIG_USER_ONLY
2037 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2039 TCGMemOp mop
= s
->insn
->data
;
2040 TCGv_i64 addr
, old
, cc
;
2041 TCGLabel
*lab
= gen_new_label();
2043 /* Note that in1 = R1 (zero-extended expected value),
2044 out = R1 (original reg), out2 = R1+1 (new value). */
2046 check_privileged(s
);
2047 addr
= tcg_temp_new_i64();
2048 old
= tcg_temp_new_i64();
2049 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2050 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2051 get_mem_index(s
), mop
| MO_ALIGN
);
2052 tcg_temp_free_i64(addr
);
2054 /* Are the memory and expected values (un)equal? */
2055 cc
= tcg_temp_new_i64();
2056 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2057 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2059 /* Write back the output now, so that it happens before the
2060 following branch, so that we don't need local temps. */
2061 if ((mop
& MO_SIZE
) == MO_32
) {
2062 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2064 tcg_gen_mov_i64(o
->out
, old
);
2066 tcg_temp_free_i64(old
);
2068 /* If the comparison was equal, and the LSB of R2 was set,
2069 then we need to flush the TLB (for all cpus). */
2070 tcg_gen_xori_i64(cc
, cc
, 1);
2071 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2072 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2073 tcg_temp_free_i64(cc
);
2075 gen_helper_purge(cpu_env
);
2082 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2084 TCGv_i64 t1
= tcg_temp_new_i64();
2085 TCGv_i32 t2
= tcg_temp_new_i32();
2086 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2087 gen_helper_cvd(t1
, t2
);
2088 tcg_temp_free_i32(t2
);
2089 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2090 tcg_temp_free_i64(t1
);
2094 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2096 int m3
= get_field(s
->fields
, m3
);
2097 TCGLabel
*lab
= gen_new_label();
2100 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2101 if (s
->insn
->data
) {
2102 c
= tcg_unsigned_cond(c
);
2104 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2113 #ifndef CONFIG_USER_ONLY
2114 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2116 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2117 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2118 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2120 check_privileged(s
);
2124 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2126 tcg_temp_free_i32(func_code
);
2127 tcg_temp_free_i32(r3
);
2128 tcg_temp_free_i32(r1
);
2133 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2135 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2136 return_low128(o
->out
);
2140 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2142 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2143 return_low128(o
->out
);
2147 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2149 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2150 return_low128(o
->out
);
2154 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2156 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2157 return_low128(o
->out
);
2161 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2163 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2167 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2169 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2173 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2175 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2176 return_low128(o
->out2
);
2180 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2182 int r2
= get_field(s
->fields
, r2
);
2183 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2187 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2189 /* No cache information provided. */
2190 tcg_gen_movi_i64(o
->out
, -1);
2194 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2196 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2200 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2202 int r1
= get_field(s
->fields
, r1
);
2203 int r2
= get_field(s
->fields
, r2
);
2204 TCGv_i64 t
= tcg_temp_new_i64();
2206 /* Note the "subsequently" in the PoO, which implies a defined result
2207 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2208 tcg_gen_shri_i64(t
, psw_mask
, 32);
2209 store_reg32_i64(r1
, t
);
2211 store_reg32_i64(r2
, psw_mask
);
2214 tcg_temp_free_i64(t
);
2218 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2220 int r1
= get_field(s
->fields
, r1
);
2224 /* Nested EXECUTE is not allowed. */
2225 if (unlikely(s
->ex_value
)) {
2226 gen_program_exception(s
, PGM_EXECUTE
);
2227 return EXIT_NORETURN
;
2234 v1
= tcg_const_i64(0);
2239 ilen
= tcg_const_i32(s
->ilen
);
2240 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2241 tcg_temp_free_i32(ilen
);
2244 tcg_temp_free_i64(v1
);
2247 return EXIT_PC_CC_UPDATED
;
2250 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2252 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2253 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2254 tcg_temp_free_i32(m3
);
2258 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2260 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2261 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2262 tcg_temp_free_i32(m3
);
2266 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2268 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2269 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2270 return_low128(o
->out2
);
2271 tcg_temp_free_i32(m3
);
2275 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2277 /* We'll use the original input for cc computation, since we get to
2278 compare that against 0, which ought to be better than comparing
2279 the real output against 64. It also lets cc_dst be a convenient
2280 temporary during our computation. */
2281 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2283 /* R1 = IN ? CLZ(IN) : 64. */
2284 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2286 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2287 value by 64, which is undefined. But since the shift is 64 iff the
2288 input is zero, we still get the correct result after and'ing. */
2289 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2290 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2291 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2295 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2297 int m3
= get_field(s
->fields
, m3
);
2298 int pos
, len
, base
= s
->insn
->data
;
2299 TCGv_i64 tmp
= tcg_temp_new_i64();
2304 /* Effectively a 32-bit load. */
2305 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2312 /* Effectively a 16-bit load. */
2313 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2321 /* Effectively an 8-bit load. */
2322 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2327 pos
= base
+ ctz32(m3
) * 8;
2328 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2329 ccm
= ((1ull << len
) - 1) << pos
;
2333 /* This is going to be a sequence of loads and inserts. */
2334 pos
= base
+ 32 - 8;
2338 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2339 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2340 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2343 m3
= (m3
<< 1) & 0xf;
2349 tcg_gen_movi_i64(tmp
, ccm
);
2350 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2351 tcg_temp_free_i64(tmp
);
2355 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2357 int shift
= s
->insn
->data
& 0xff;
2358 int size
= s
->insn
->data
>> 8;
2359 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2363 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2368 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2370 t1
= tcg_temp_new_i64();
2371 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2372 tcg_gen_shri_i64(t1
, t1
, 36);
2373 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2375 tcg_gen_extu_i32_i64(t1
, cc_op
);
2376 tcg_gen_shli_i64(t1
, t1
, 28);
2377 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2378 tcg_temp_free_i64(t1
);
2382 #ifndef CONFIG_USER_ONLY
2383 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2387 check_privileged(s
);
2388 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2389 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2390 tcg_temp_free_i32(m4
);
2394 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2396 check_privileged(s
);
2397 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2402 static ExitStatus
op_keb(DisasContext
*s
, DisasOps
*o
)
2404 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2409 static ExitStatus
op_kdb(DisasContext
*s
, DisasOps
*o
)
2411 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2416 static ExitStatus
op_kxb(DisasContext
*s
, DisasOps
*o
)
2418 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2423 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2425 /* The real output is indeed the original value in memory;
2426 recompute the addition for the computation of CC. */
2427 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2428 s
->insn
->data
| MO_ALIGN
);
2429 /* However, we need to recompute the addition for setting CC. */
2430 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2434 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2436 /* The real output is indeed the original value in memory;
2437 recompute the addition for the computation of CC. */
2438 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2439 s
->insn
->data
| MO_ALIGN
);
2440 /* However, we need to recompute the operation for setting CC. */
2441 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2445 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2447 /* The real output is indeed the original value in memory;
2448 recompute the addition for the computation of CC. */
2449 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2450 s
->insn
->data
| MO_ALIGN
);
2451 /* However, we need to recompute the operation for setting CC. */
2452 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2456 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2458 /* The real output is indeed the original value in memory;
2459 recompute the addition for the computation of CC. */
2460 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2461 s
->insn
->data
| MO_ALIGN
);
2462 /* However, we need to recompute the operation for setting CC. */
2463 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2467 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2469 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2473 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2475 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2479 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2481 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2485 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2487 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2491 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2493 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2494 return_low128(o
->out2
);
2498 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2500 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2501 return_low128(o
->out2
);
2505 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2507 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2511 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2513 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2517 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2519 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2523 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2525 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2529 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2531 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2535 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2537 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2541 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2543 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2547 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2549 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2553 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2555 TCGLabel
*lab
= gen_new_label();
2556 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2557 /* The value is stored even in case of trap. */
2558 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2564 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2566 TCGLabel
*lab
= gen_new_label();
2567 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2568 /* The value is stored even in case of trap. */
2569 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2575 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2577 TCGLabel
*lab
= gen_new_label();
2578 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2579 /* The value is stored even in case of trap. */
2580 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2586 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2588 TCGLabel
*lab
= gen_new_label();
2589 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2590 /* The value is stored even in case of trap. */
2591 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2597 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2599 TCGLabel
*lab
= gen_new_label();
2600 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2601 /* The value is stored even in case of trap. */
2602 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2608 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2612 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2615 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2619 TCGv_i32 t32
= tcg_temp_new_i32();
2622 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2625 t
= tcg_temp_new_i64();
2626 tcg_gen_extu_i32_i64(t
, t32
);
2627 tcg_temp_free_i32(t32
);
2629 z
= tcg_const_i64(0);
2630 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2631 tcg_temp_free_i64(t
);
2632 tcg_temp_free_i64(z
);
2638 #ifndef CONFIG_USER_ONLY
2639 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2641 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2642 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2643 check_privileged(s
);
2644 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2645 tcg_temp_free_i32(r1
);
2646 tcg_temp_free_i32(r3
);
2650 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2652 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2653 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2654 check_privileged(s
);
2655 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2656 tcg_temp_free_i32(r1
);
2657 tcg_temp_free_i32(r3
);
2661 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2663 check_privileged(s
);
2664 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2669 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2671 check_privileged(s
);
2673 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2677 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2681 check_privileged(s
);
2682 per_breaking_event(s
);
2684 t1
= tcg_temp_new_i64();
2685 t2
= tcg_temp_new_i64();
2686 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2687 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2688 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2689 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2690 tcg_gen_shli_i64(t1
, t1
, 32);
2691 gen_helper_load_psw(cpu_env
, t1
, t2
);
2692 tcg_temp_free_i64(t1
);
2693 tcg_temp_free_i64(t2
);
2694 return EXIT_NORETURN
;
2697 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2701 check_privileged(s
);
2702 per_breaking_event(s
);
2704 t1
= tcg_temp_new_i64();
2705 t2
= tcg_temp_new_i64();
2706 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2707 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2708 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2709 gen_helper_load_psw(cpu_env
, t1
, t2
);
2710 tcg_temp_free_i64(t1
);
2711 tcg_temp_free_i64(t2
);
2712 return EXIT_NORETURN
;
2716 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2718 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2719 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2720 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2721 tcg_temp_free_i32(r1
);
2722 tcg_temp_free_i32(r3
);
2726 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2728 int r1
= get_field(s
->fields
, r1
);
2729 int r3
= get_field(s
->fields
, r3
);
2732 /* Only one register to read. */
2733 t1
= tcg_temp_new_i64();
2734 if (unlikely(r1
== r3
)) {
2735 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2736 store_reg32_i64(r1
, t1
);
2741 /* First load the values of the first and last registers to trigger
2742 possible page faults. */
2743 t2
= tcg_temp_new_i64();
2744 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2745 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2746 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2747 store_reg32_i64(r1
, t1
);
2748 store_reg32_i64(r3
, t2
);
2750 /* Only two registers to read. */
2751 if (((r1
+ 1) & 15) == r3
) {
2757 /* Then load the remaining registers. Page fault can't occur. */
2759 tcg_gen_movi_i64(t2
, 4);
2762 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2763 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2764 store_reg32_i64(r1
, t1
);
2772 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2774 int r1
= get_field(s
->fields
, r1
);
2775 int r3
= get_field(s
->fields
, r3
);
2778 /* Only one register to read. */
2779 t1
= tcg_temp_new_i64();
2780 if (unlikely(r1
== r3
)) {
2781 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2782 store_reg32h_i64(r1
, t1
);
2787 /* First load the values of the first and last registers to trigger
2788 possible page faults. */
2789 t2
= tcg_temp_new_i64();
2790 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2791 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2792 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2793 store_reg32h_i64(r1
, t1
);
2794 store_reg32h_i64(r3
, t2
);
2796 /* Only two registers to read. */
2797 if (((r1
+ 1) & 15) == r3
) {
2803 /* Then load the remaining registers. Page fault can't occur. */
2805 tcg_gen_movi_i64(t2
, 4);
2808 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2809 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2810 store_reg32h_i64(r1
, t1
);
2818 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2820 int r1
= get_field(s
->fields
, r1
);
2821 int r3
= get_field(s
->fields
, r3
);
2824 /* Only one register to read. */
2825 if (unlikely(r1
== r3
)) {
2826 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2830 /* First load the values of the first and last registers to trigger
2831 possible page faults. */
2832 t1
= tcg_temp_new_i64();
2833 t2
= tcg_temp_new_i64();
2834 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2835 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2836 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2837 tcg_gen_mov_i64(regs
[r1
], t1
);
2840 /* Only two registers to read. */
2841 if (((r1
+ 1) & 15) == r3
) {
2846 /* Then load the remaining registers. Page fault can't occur. */
2848 tcg_gen_movi_i64(t1
, 8);
2851 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2852 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2859 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2862 TCGMemOp mop
= s
->insn
->data
;
2864 /* In a parallel context, stop the world and single step. */
2865 if (parallel_cpus
) {
2866 potential_page_fault(s
);
2867 gen_exception(EXCP_ATOMIC
);
2868 return EXIT_NORETURN
;
2871 /* In a serial context, perform the two loads ... */
2872 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2873 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2874 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2875 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2876 tcg_temp_free_i64(a1
);
2877 tcg_temp_free_i64(a2
);
2879 /* ... and indicate that we performed them while interlocked. */
2880 gen_op_movi_cc(s
, 0);
2884 static ExitStatus
op_lpq(DisasContext
*s
, DisasOps
*o
)
2886 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
2887 return_low128(o
->out2
);
2891 #ifndef CONFIG_USER_ONLY
2892 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2894 check_privileged(s
);
2895 potential_page_fault(s
);
2896 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2900 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2902 check_privileged(s
);
2903 potential_page_fault(s
);
2904 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2909 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2912 o
->g_out
= o
->g_in2
;
2913 TCGV_UNUSED_I64(o
->in2
);
2918 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2920 int b2
= get_field(s
->fields
, b2
);
2921 TCGv ar1
= tcg_temp_new_i64();
2924 o
->g_out
= o
->g_in2
;
2925 TCGV_UNUSED_I64(o
->in2
);
2928 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2929 case PSW_ASC_PRIMARY
>> 32:
2930 tcg_gen_movi_i64(ar1
, 0);
2932 case PSW_ASC_ACCREG
>> 32:
2933 tcg_gen_movi_i64(ar1
, 1);
2935 case PSW_ASC_SECONDARY
>> 32:
2937 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2939 tcg_gen_movi_i64(ar1
, 0);
2942 case PSW_ASC_HOME
>> 32:
2943 tcg_gen_movi_i64(ar1
, 2);
2947 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2948 tcg_temp_free_i64(ar1
);
2953 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2957 o
->g_out
= o
->g_in1
;
2958 o
->g_out2
= o
->g_in2
;
2959 TCGV_UNUSED_I64(o
->in1
);
2960 TCGV_UNUSED_I64(o
->in2
);
2961 o
->g_in1
= o
->g_in2
= false;
2965 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2967 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2968 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2969 tcg_temp_free_i32(l
);
2973 static ExitStatus
op_mvcin(DisasContext
*s
, DisasOps
*o
)
2975 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2976 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
2977 tcg_temp_free_i32(l
);
2981 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2983 int r1
= get_field(s
->fields
, r1
);
2984 int r2
= get_field(s
->fields
, r2
);
2987 /* r1 and r2 must be even. */
2988 if (r1
& 1 || r2
& 1) {
2989 gen_program_exception(s
, PGM_SPECIFICATION
);
2990 return EXIT_NORETURN
;
2993 t1
= tcg_const_i32(r1
);
2994 t2
= tcg_const_i32(r2
);
2995 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
2996 tcg_temp_free_i32(t1
);
2997 tcg_temp_free_i32(t2
);
3002 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3004 int r1
= get_field(s
->fields
, r1
);
3005 int r3
= get_field(s
->fields
, r3
);
3008 /* r1 and r3 must be even. */
3009 if (r1
& 1 || r3
& 1) {
3010 gen_program_exception(s
, PGM_SPECIFICATION
);
3011 return EXIT_NORETURN
;
3014 t1
= tcg_const_i32(r1
);
3015 t3
= tcg_const_i32(r3
);
3016 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3017 tcg_temp_free_i32(t1
);
3018 tcg_temp_free_i32(t3
);
3023 static ExitStatus
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3025 int r1
= get_field(s
->fields
, r1
);
3026 int r3
= get_field(s
->fields
, r3
);
3029 /* r1 and r3 must be even. */
3030 if (r1
& 1 || r3
& 1) {
3031 gen_program_exception(s
, PGM_SPECIFICATION
);
3032 return EXIT_NORETURN
;
3035 t1
= tcg_const_i32(r1
);
3036 t3
= tcg_const_i32(r3
);
3037 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3038 tcg_temp_free_i32(t1
);
3039 tcg_temp_free_i32(t3
);
3044 #ifndef CONFIG_USER_ONLY
3045 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3047 int r1
= get_field(s
->fields
, l1
);
3048 check_privileged(s
);
3049 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3054 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3056 int r1
= get_field(s
->fields
, l1
);
3057 check_privileged(s
);
3058 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3064 static ExitStatus
op_mvn(DisasContext
*s
, DisasOps
*o
)
3066 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3067 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3068 tcg_temp_free_i32(l
);
3072 static ExitStatus
op_mvo(DisasContext
*s
, DisasOps
*o
)
3074 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3075 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3076 tcg_temp_free_i32(l
);
3080 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3082 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3087 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
3089 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3091 return_low128(o
->in2
);
3095 static ExitStatus
op_mvz(DisasContext
*s
, DisasOps
*o
)
3097 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3098 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3099 tcg_temp_free_i32(l
);
3103 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3105 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3109 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3111 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3115 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
3117 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3121 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3123 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3127 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
3129 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3133 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
3135 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3136 return_low128(o
->out2
);
3140 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3142 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3143 return_low128(o
->out2
);
3147 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
3149 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3150 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3151 tcg_temp_free_i64(r3
);
3155 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
3157 int r3
= get_field(s
->fields
, r3
);
3158 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3162 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
3164 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3165 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3166 tcg_temp_free_i64(r3
);
3170 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3172 int r3
= get_field(s
->fields
, r3
);
3173 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3177 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3180 z
= tcg_const_i64(0);
3181 n
= tcg_temp_new_i64();
3182 tcg_gen_neg_i64(n
, o
->in2
);
3183 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3184 tcg_temp_free_i64(n
);
3185 tcg_temp_free_i64(z
);
3189 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3191 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3195 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3197 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3201 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3203 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3204 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3208 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3210 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3211 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3212 tcg_temp_free_i32(l
);
3217 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3219 tcg_gen_neg_i64(o
->out
, o
->in2
);
3223 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3225 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3229 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3231 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3235 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3237 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3238 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3242 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3244 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3245 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3246 tcg_temp_free_i32(l
);
3251 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3253 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3257 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3259 int shift
= s
->insn
->data
& 0xff;
3260 int size
= s
->insn
->data
>> 8;
3261 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3264 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3265 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3267 /* Produce the CC from only the bits manipulated. */
3268 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3269 set_cc_nz_u64(s
, cc_dst
);
3273 static ExitStatus
op_pack(DisasContext
*s
, DisasOps
*o
)
3275 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3276 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3277 tcg_temp_free_i32(l
);
3281 static ExitStatus
op_pka(DisasContext
*s
, DisasOps
*o
)
3283 int l2
= get_field(s
->fields
, l2
) + 1;
3286 /* The length must not exceed 32 bytes. */
3288 gen_program_exception(s
, PGM_SPECIFICATION
);
3289 return EXIT_NORETURN
;
3291 l
= tcg_const_i32(l2
);
3292 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3293 tcg_temp_free_i32(l
);
3297 static ExitStatus
op_pku(DisasContext
*s
, DisasOps
*o
)
3299 int l2
= get_field(s
->fields
, l2
) + 1;
3302 /* The length must be even and should not exceed 64 bytes. */
3303 if ((l2
& 1) || (l2
> 64)) {
3304 gen_program_exception(s
, PGM_SPECIFICATION
);
3305 return EXIT_NORETURN
;
3307 l
= tcg_const_i32(l2
);
3308 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3309 tcg_temp_free_i32(l
);
3313 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3315 gen_helper_popcnt(o
->out
, o
->in2
);
3319 #ifndef CONFIG_USER_ONLY
3320 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3322 check_privileged(s
);
3323 gen_helper_ptlb(cpu_env
);
3328 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3330 int i3
= get_field(s
->fields
, i3
);
3331 int i4
= get_field(s
->fields
, i4
);
3332 int i5
= get_field(s
->fields
, i5
);
3333 int do_zero
= i4
& 0x80;
3334 uint64_t mask
, imask
, pmask
;
3337 /* Adjust the arguments for the specific insn. */
3338 switch (s
->fields
->op2
) {
3339 case 0x55: /* risbg */
3344 case 0x5d: /* risbhg */
3347 pmask
= 0xffffffff00000000ull
;
3349 case 0x51: /* risblg */
3352 pmask
= 0x00000000ffffffffull
;
3358 /* MASK is the set of bits to be inserted from R2.
3359 Take care for I3/I4 wraparound. */
3362 mask
^= pmask
>> i4
>> 1;
3364 mask
|= ~(pmask
>> i4
>> 1);
3368 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3369 insns, we need to keep the other half of the register. */
3370 imask
= ~mask
| ~pmask
;
3372 if (s
->fields
->op2
== 0x55) {
3382 if (s
->fields
->op2
== 0x5d) {
3386 /* In some cases we can implement this with extract. */
3387 if (imask
== 0 && pos
== 0 && len
> 0 && rot
+ len
<= 64) {
3388 tcg_gen_extract_i64(o
->out
, o
->in2
, rot
, len
);
3392 /* In some cases we can implement this with deposit. */
3393 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3394 /* Note that we rotate the bits to be inserted to the lsb, not to
3395 the position as described in the PoO. */
3396 rot
= (rot
- pos
) & 63;
3401 /* Rotate the input as necessary. */
3402 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3404 /* Insert the selected bits into the output. */
3407 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3409 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3411 } else if (imask
== 0) {
3412 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3414 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3415 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3416 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3421 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3423 int i3
= get_field(s
->fields
, i3
);
3424 int i4
= get_field(s
->fields
, i4
);
3425 int i5
= get_field(s
->fields
, i5
);
3428 /* If this is a test-only form, arrange to discard the result. */
3430 o
->out
= tcg_temp_new_i64();
3438 /* MASK is the set of bits to be operated on from R2.
3439 Take care for I3/I4 wraparound. */
3442 mask
^= ~0ull >> i4
>> 1;
3444 mask
|= ~(~0ull >> i4
>> 1);
3447 /* Rotate the input as necessary. */
3448 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3451 switch (s
->fields
->op2
) {
3452 case 0x55: /* AND */
3453 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3454 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3457 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3458 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3460 case 0x57: /* XOR */
3461 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3462 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3469 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3470 set_cc_nz_u64(s
, cc_dst
);
3474 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3476 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3480 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3482 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3486 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3488 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3492 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3494 TCGv_i32 t1
= tcg_temp_new_i32();
3495 TCGv_i32 t2
= tcg_temp_new_i32();
3496 TCGv_i32 to
= tcg_temp_new_i32();
3497 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3498 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3499 tcg_gen_rotl_i32(to
, t1
, t2
);
3500 tcg_gen_extu_i32_i64(o
->out
, to
);
3501 tcg_temp_free_i32(t1
);
3502 tcg_temp_free_i32(t2
);
3503 tcg_temp_free_i32(to
);
3507 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3509 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3513 #ifndef CONFIG_USER_ONLY
3514 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3516 check_privileged(s
);
3517 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3522 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3524 check_privileged(s
);
3525 gen_helper_sacf(cpu_env
, o
->in2
);
3526 /* Addressing mode has changed, so end the block. */
3527 return EXIT_PC_STALE
;
3531 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3533 int sam
= s
->insn
->data
;
3549 /* Bizarre but true, we check the address of the current insn for the
3550 specification exception, not the next to be executed. Thus the PoO
3551 documents that Bad Things Happen two bytes before the end. */
3552 if (s
->pc
& ~mask
) {
3553 gen_program_exception(s
, PGM_SPECIFICATION
);
3554 return EXIT_NORETURN
;
3558 tsam
= tcg_const_i64(sam
);
3559 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3560 tcg_temp_free_i64(tsam
);
3562 /* Always exit the TB, since we (may have) changed execution mode. */
3563 return EXIT_PC_STALE
;
3566 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3568 int r1
= get_field(s
->fields
, r1
);
3569 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3573 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3575 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3579 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3581 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3585 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3587 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3588 return_low128(o
->out2
);
3592 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3594 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3598 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3600 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3604 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3606 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3607 return_low128(o
->out2
);
3611 #ifndef CONFIG_USER_ONLY
3612 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3614 check_privileged(s
);
3615 potential_page_fault(s
);
3616 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3621 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3623 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3624 check_privileged(s
);
3625 potential_page_fault(s
);
3626 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3628 tcg_temp_free_i32(r1
);
3633 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3640 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3642 /* We want to store when the condition is fulfilled, so branch
3643 out when it's not */
3644 c
.cond
= tcg_invert_cond(c
.cond
);
3646 lab
= gen_new_label();
3648 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3650 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3654 r1
= get_field(s
->fields
, r1
);
3655 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3656 if (s
->insn
->data
) {
3657 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3659 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3661 tcg_temp_free_i64(a
);
3667 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3669 uint64_t sign
= 1ull << s
->insn
->data
;
3670 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3671 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3672 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3673 /* The arithmetic left shift is curious in that it does not affect
3674 the sign bit. Copy that over from the source unchanged. */
3675 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3676 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3677 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3681 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3683 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3687 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3689 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3693 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3695 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3699 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3701 gen_helper_sfpc(cpu_env
, o
->in2
);
3705 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3707 gen_helper_sfas(cpu_env
, o
->in2
);
3711 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3713 int b2
= get_field(s
->fields
, b2
);
3714 int d2
= get_field(s
->fields
, d2
);
3715 TCGv_i64 t1
= tcg_temp_new_i64();
3716 TCGv_i64 t2
= tcg_temp_new_i64();
3719 switch (s
->fields
->op2
) {
3720 case 0x99: /* SRNM */
3723 case 0xb8: /* SRNMB */
3726 case 0xb9: /* SRNMT */
3732 mask
= (1 << len
) - 1;
3734 /* Insert the value into the appropriate field of the FPC. */
3736 tcg_gen_movi_i64(t1
, d2
& mask
);
3738 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3739 tcg_gen_andi_i64(t1
, t1
, mask
);
3741 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3742 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3743 tcg_temp_free_i64(t1
);
3745 /* Then install the new FPC to set the rounding mode in fpu_status. */
3746 gen_helper_sfpc(cpu_env
, t2
);
3747 tcg_temp_free_i64(t2
);
3751 #ifndef CONFIG_USER_ONLY
3752 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3754 check_privileged(s
);
3755 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3756 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3760 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3762 check_privileged(s
);
3763 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3767 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3769 check_privileged(s
);
3770 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3771 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3772 return EXIT_PC_STALE_NOCHAIN
;
3775 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3777 check_privileged(s
);
3778 /* ??? Surely cpu address != cpu number. In any case the previous
3779 version of this stored more than the required half-word, so it
3780 is unlikely this has ever been tested. */
3781 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3785 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3787 gen_helper_stck(o
->out
, cpu_env
);
3788 /* ??? We don't implement clock states. */
3789 gen_op_movi_cc(s
, 0);
3793 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3795 TCGv_i64 c1
= tcg_temp_new_i64();
3796 TCGv_i64 c2
= tcg_temp_new_i64();
3797 gen_helper_stck(c1
, cpu_env
);
3798 /* Shift the 64-bit value into its place as a zero-extended
3799 104-bit value. Note that "bit positions 64-103 are always
3800 non-zero so that they compare differently to STCK"; we set
3801 the least significant bit to 1. */
3802 tcg_gen_shli_i64(c2
, c1
, 56);
3803 tcg_gen_shri_i64(c1
, c1
, 8);
3804 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3805 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3806 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3807 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3808 tcg_temp_free_i64(c1
);
3809 tcg_temp_free_i64(c2
);
3810 /* ??? We don't implement clock states. */
3811 gen_op_movi_cc(s
, 0);
3815 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3817 check_privileged(s
);
3818 gen_helper_sckc(cpu_env
, o
->in2
);
3822 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3824 check_privileged(s
);
3825 gen_helper_stckc(o
->out
, cpu_env
);
3829 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3831 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3832 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3833 check_privileged(s
);
3834 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3835 tcg_temp_free_i32(r1
);
3836 tcg_temp_free_i32(r3
);
3840 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3842 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3843 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3844 check_privileged(s
);
3845 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3846 tcg_temp_free_i32(r1
);
3847 tcg_temp_free_i32(r3
);
3851 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3853 check_privileged(s
);
3854 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
3855 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
3859 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3861 check_privileged(s
);
3862 gen_helper_spt(cpu_env
, o
->in2
);
3866 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3868 check_privileged(s
);
3869 gen_helper_stfl(cpu_env
);
3873 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3875 check_privileged(s
);
3876 gen_helper_stpt(o
->out
, cpu_env
);
3880 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3882 check_privileged(s
);
3883 potential_page_fault(s
);
3884 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3889 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3891 check_privileged(s
);
3892 gen_helper_spx(cpu_env
, o
->in2
);
3896 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3898 check_privileged(s
);
3899 potential_page_fault(s
);
3900 gen_helper_xsch(cpu_env
, regs
[1]);
3905 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3907 check_privileged(s
);
3908 potential_page_fault(s
);
3909 gen_helper_csch(cpu_env
, regs
[1]);
3914 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3916 check_privileged(s
);
3917 potential_page_fault(s
);
3918 gen_helper_hsch(cpu_env
, regs
[1]);
3923 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3925 check_privileged(s
);
3926 potential_page_fault(s
);
3927 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3932 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3934 check_privileged(s
);
3935 potential_page_fault(s
);
3936 gen_helper_rchp(cpu_env
, regs
[1]);
3941 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3943 check_privileged(s
);
3944 potential_page_fault(s
);
3945 gen_helper_rsch(cpu_env
, regs
[1]);
3950 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3952 check_privileged(s
);
3953 potential_page_fault(s
);
3954 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
3959 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
3961 check_privileged(s
);
3962 potential_page_fault(s
);
3963 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
3968 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
3970 check_privileged(s
);
3971 potential_page_fault(s
);
3972 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
3977 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
3979 check_privileged(s
);
3980 potential_page_fault(s
);
3981 gen_helper_chsc(cpu_env
, o
->in2
);
3986 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3988 check_privileged(s
);
3989 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3990 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3994 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3996 uint64_t i2
= get_field(s
->fields
, i2
);
3999 check_privileged(s
);
4001 /* It is important to do what the instruction name says: STORE THEN.
4002 If we let the output hook perform the store then if we fault and
4003 restart, we'll have the wrong SYSTEM MASK in place. */
4004 t
= tcg_temp_new_i64();
4005 tcg_gen_shri_i64(t
, psw_mask
, 56);
4006 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4007 tcg_temp_free_i64(t
);
4009 if (s
->fields
->op
== 0xac) {
4010 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4011 (i2
<< 56) | 0x00ffffffffffffffull
);
4013 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4016 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4017 return EXIT_PC_STALE_NOCHAIN
;
4020 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
4022 check_privileged(s
);
4023 potential_page_fault(s
);
4024 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4028 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
4030 check_privileged(s
);
4031 potential_page_fault(s
);
4032 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4037 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
4039 potential_page_fault(s
);
4040 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4045 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
4047 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4051 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
4053 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4057 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
4059 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4063 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
4065 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4069 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
4071 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4072 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4073 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4074 tcg_temp_free_i32(r1
);
4075 tcg_temp_free_i32(r3
);
4079 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
4081 int m3
= get_field(s
->fields
, m3
);
4082 int pos
, base
= s
->insn
->data
;
4083 TCGv_i64 tmp
= tcg_temp_new_i64();
4085 pos
= base
+ ctz32(m3
) * 8;
4088 /* Effectively a 32-bit store. */
4089 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4090 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4096 /* Effectively a 16-bit store. */
4097 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4098 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4105 /* Effectively an 8-bit store. */
4106 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4107 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4111 /* This is going to be a sequence of shifts and stores. */
4112 pos
= base
+ 32 - 8;
4115 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4116 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4117 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4119 m3
= (m3
<< 1) & 0xf;
4124 tcg_temp_free_i64(tmp
);
4128 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
4130 int r1
= get_field(s
->fields
, r1
);
4131 int r3
= get_field(s
->fields
, r3
);
4132 int size
= s
->insn
->data
;
4133 TCGv_i64 tsize
= tcg_const_i64(size
);
4137 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4139 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4144 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4148 tcg_temp_free_i64(tsize
);
4152 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
4154 int r1
= get_field(s
->fields
, r1
);
4155 int r3
= get_field(s
->fields
, r3
);
4156 TCGv_i64 t
= tcg_temp_new_i64();
4157 TCGv_i64 t4
= tcg_const_i64(4);
4158 TCGv_i64 t32
= tcg_const_i64(32);
4161 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4162 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4166 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4170 tcg_temp_free_i64(t
);
4171 tcg_temp_free_i64(t4
);
4172 tcg_temp_free_i64(t32
);
4176 static ExitStatus
op_stpq(DisasContext
*s
, DisasOps
*o
)
4178 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4182 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
4184 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
4186 return_low128(o
->in2
);
4190 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
4192 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4196 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
4201 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4203 /* The !borrow flag is the msb of CC. Since we want the inverse of
4204 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4205 disas_jcc(s
, &cmp
, 8 | 4);
4206 borrow
= tcg_temp_new_i64();
4208 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4210 TCGv_i32 t
= tcg_temp_new_i32();
4211 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4212 tcg_gen_extu_i32_i64(borrow
, t
);
4213 tcg_temp_free_i32(t
);
4217 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4218 tcg_temp_free_i64(borrow
);
4222 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4229 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4230 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4231 tcg_temp_free_i32(t
);
4233 t
= tcg_const_i32(s
->ilen
);
4234 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4235 tcg_temp_free_i32(t
);
4237 gen_exception(EXCP_SVC
);
4238 return EXIT_NORETURN
;
4241 static ExitStatus
op_tam(DisasContext
*s
, DisasOps
*o
)
4245 cc
|= (s
->tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4246 cc
|= (s
->tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4247 gen_op_movi_cc(s
, cc
);
4251 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4253 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4258 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4260 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4265 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4267 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4272 #ifndef CONFIG_USER_ONLY
4274 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4276 check_privileged(s
);
4277 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4282 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4284 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4291 static ExitStatus
op_tp(DisasContext
*s
, DisasOps
*o
)
4293 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4294 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4295 tcg_temp_free_i32(l1
);
4300 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4302 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4303 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4304 tcg_temp_free_i32(l
);
4309 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4311 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4312 return_low128(o
->out2
);
4317 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4319 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4320 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4321 tcg_temp_free_i32(l
);
4326 static ExitStatus
op_trXX(DisasContext
*s
, DisasOps
*o
)
4328 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4329 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4330 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4331 TCGv_i32 tst
= tcg_temp_new_i32();
4332 int m3
= get_field(s
->fields
, m3
);
4334 /* XXX: the C bit in M3 should be considered as 0 when the
4335 ETF2-enhancement facility is not installed. */
4337 tcg_gen_movi_i32(tst
, -1);
4339 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4340 if (s
->insn
->opc
& 3) {
4341 tcg_gen_ext8u_i32(tst
, tst
);
4343 tcg_gen_ext16u_i32(tst
, tst
);
4346 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4348 tcg_temp_free_i32(r1
);
4349 tcg_temp_free_i32(r2
);
4350 tcg_temp_free_i32(sizes
);
4351 tcg_temp_free_i32(tst
);
4356 static ExitStatus
op_ts(DisasContext
*s
, DisasOps
*o
)
4358 TCGv_i32 t1
= tcg_const_i32(0xff);
4359 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4360 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4361 tcg_temp_free_i32(t1
);
4366 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4368 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4369 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4370 tcg_temp_free_i32(l
);
4374 static ExitStatus
op_unpka(DisasContext
*s
, DisasOps
*o
)
4376 int l1
= get_field(s
->fields
, l1
) + 1;
4379 /* The length must not exceed 32 bytes. */
4381 gen_program_exception(s
, PGM_SPECIFICATION
);
4382 return EXIT_NORETURN
;
4384 l
= tcg_const_i32(l1
);
4385 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4386 tcg_temp_free_i32(l
);
4391 static ExitStatus
op_unpku(DisasContext
*s
, DisasOps
*o
)
4393 int l1
= get_field(s
->fields
, l1
) + 1;
4396 /* The length must be even and should not exceed 64 bytes. */
4397 if ((l1
& 1) || (l1
> 64)) {
4398 gen_program_exception(s
, PGM_SPECIFICATION
);
4399 return EXIT_NORETURN
;
4401 l
= tcg_const_i32(l1
);
4402 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4403 tcg_temp_free_i32(l
);
4409 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4411 int d1
= get_field(s
->fields
, d1
);
4412 int d2
= get_field(s
->fields
, d2
);
4413 int b1
= get_field(s
->fields
, b1
);
4414 int b2
= get_field(s
->fields
, b2
);
4415 int l
= get_field(s
->fields
, l1
);
4418 o
->addr1
= get_address(s
, 0, b1
, d1
);
4420 /* If the addresses are identical, this is a store/memset of zero. */
4421 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4422 o
->in2
= tcg_const_i64(0);
4426 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4429 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4433 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4436 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4440 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4443 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4447 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4449 gen_op_movi_cc(s
, 0);
4453 /* But in general we'll defer to a helper. */
4454 o
->in2
= get_address(s
, 0, b2
, d2
);
4455 t32
= tcg_const_i32(l
);
4456 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4457 tcg_temp_free_i32(t32
);
4462 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4464 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4468 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4470 int shift
= s
->insn
->data
& 0xff;
4471 int size
= s
->insn
->data
>> 8;
4472 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4475 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4476 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4478 /* Produce the CC from only the bits manipulated. */
4479 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4480 set_cc_nz_u64(s
, cc_dst
);
4484 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4486 o
->out
= tcg_const_i64(0);
4490 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4492 o
->out
= tcg_const_i64(0);
4498 /* ====================================================================== */
4499 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4500 the original inputs), update the various cc data structures in order to
4501 be able to compute the new condition code. */
4503 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4505 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4508 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4510 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4513 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4515 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4518 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4520 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4523 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4525 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4528 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4530 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4533 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4535 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4538 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4540 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4543 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4545 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4548 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4550 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4553 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4555 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4558 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4560 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4563 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4565 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4568 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4570 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4573 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4575 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4578 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4580 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4583 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4585 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4588 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4590 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4593 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4595 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4598 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4600 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4601 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4604 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4606 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4609 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4611 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4614 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4616 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4619 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4621 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4624 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4626 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4629 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4631 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4634 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4636 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4639 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4641 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4644 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4646 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4649 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4651 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4654 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4656 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4659 /* ====================================================================== */
4660 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4661 with the TCG register to which we will write. Used in combination with
4662 the "wout" generators, in some cases we need a new temporary, and in
4663 some cases we can write to a TCG global. */
4665 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4667 o
->out
= tcg_temp_new_i64();
4669 #define SPEC_prep_new 0
4671 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4673 o
->out
= tcg_temp_new_i64();
4674 o
->out2
= tcg_temp_new_i64();
4676 #define SPEC_prep_new_P 0
4678 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4680 o
->out
= regs
[get_field(f
, r1
)];
4683 #define SPEC_prep_r1 0
4685 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4687 int r1
= get_field(f
, r1
);
4689 o
->out2
= regs
[r1
+ 1];
4690 o
->g_out
= o
->g_out2
= true;
4692 #define SPEC_prep_r1_P SPEC_r1_even
4694 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4696 o
->out
= fregs
[get_field(f
, r1
)];
4699 #define SPEC_prep_f1 0
4701 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4703 int r1
= get_field(f
, r1
);
4705 o
->out2
= fregs
[r1
+ 2];
4706 o
->g_out
= o
->g_out2
= true;
4708 #define SPEC_prep_x1 SPEC_r1_f128
4710 /* ====================================================================== */
4711 /* The "Write OUTput" generators. These generally perform some non-trivial
4712 copy of data to TCG globals, or to main memory. The trivial cases are
4713 generally handled by having a "prep" generator install the TCG global
4714 as the destination of the operation. */
4716 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4718 store_reg(get_field(f
, r1
), o
->out
);
4720 #define SPEC_wout_r1 0
4722 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4724 int r1
= get_field(f
, r1
);
4725 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4727 #define SPEC_wout_r1_8 0
4729 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4731 int r1
= get_field(f
, r1
);
4732 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4734 #define SPEC_wout_r1_16 0
4736 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4738 store_reg32_i64(get_field(f
, r1
), o
->out
);
4740 #define SPEC_wout_r1_32 0
4742 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4744 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4746 #define SPEC_wout_r1_32h 0
4748 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4750 int r1
= get_field(f
, r1
);
4751 store_reg32_i64(r1
, o
->out
);
4752 store_reg32_i64(r1
+ 1, o
->out2
);
4754 #define SPEC_wout_r1_P32 SPEC_r1_even
4756 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4758 int r1
= get_field(f
, r1
);
4759 store_reg32_i64(r1
+ 1, o
->out
);
4760 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4761 store_reg32_i64(r1
, o
->out
);
4763 #define SPEC_wout_r1_D32 SPEC_r1_even
4765 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4767 int r3
= get_field(f
, r3
);
4768 store_reg32_i64(r3
, o
->out
);
4769 store_reg32_i64(r3
+ 1, o
->out2
);
4771 #define SPEC_wout_r3_P32 SPEC_r3_even
4773 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4775 int r3
= get_field(f
, r3
);
4776 store_reg(r3
, o
->out
);
4777 store_reg(r3
+ 1, o
->out2
);
4779 #define SPEC_wout_r3_P64 SPEC_r3_even
4781 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4783 store_freg32_i64(get_field(f
, r1
), o
->out
);
4785 #define SPEC_wout_e1 0
4787 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4789 store_freg(get_field(f
, r1
), o
->out
);
4791 #define SPEC_wout_f1 0
4793 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4795 int f1
= get_field(s
->fields
, r1
);
4796 store_freg(f1
, o
->out
);
4797 store_freg(f1
+ 2, o
->out2
);
4799 #define SPEC_wout_x1 SPEC_r1_f128
4801 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4803 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4804 store_reg32_i64(get_field(f
, r1
), o
->out
);
4807 #define SPEC_wout_cond_r1r2_32 0
4809 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4811 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4812 store_freg32_i64(get_field(f
, r1
), o
->out
);
4815 #define SPEC_wout_cond_e1e2 0
4817 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4819 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4821 #define SPEC_wout_m1_8 0
4823 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4825 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4827 #define SPEC_wout_m1_16 0
4829 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4831 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4833 #define SPEC_wout_m1_32 0
4835 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4837 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4839 #define SPEC_wout_m1_64 0
4841 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4843 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4845 #define SPEC_wout_m2_32 0
4847 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4849 store_reg(get_field(f
, r1
), o
->in2
);
4851 #define SPEC_wout_in2_r1 0
4853 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4855 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4857 #define SPEC_wout_in2_r1_32 0
4859 /* ====================================================================== */
4860 /* The "INput 1" generators. These load the first operand to an insn. */
4862 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4864 o
->in1
= load_reg(get_field(f
, r1
));
4866 #define SPEC_in1_r1 0
4868 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4870 o
->in1
= regs
[get_field(f
, r1
)];
4873 #define SPEC_in1_r1_o 0
4875 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4877 o
->in1
= tcg_temp_new_i64();
4878 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4880 #define SPEC_in1_r1_32s 0
4882 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4884 o
->in1
= tcg_temp_new_i64();
4885 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4887 #define SPEC_in1_r1_32u 0
4889 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4891 o
->in1
= tcg_temp_new_i64();
4892 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4894 #define SPEC_in1_r1_sr32 0
4896 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4898 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4900 #define SPEC_in1_r1p1 SPEC_r1_even
4902 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4904 o
->in1
= tcg_temp_new_i64();
4905 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4907 #define SPEC_in1_r1p1_32s SPEC_r1_even
4909 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4911 o
->in1
= tcg_temp_new_i64();
4912 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4914 #define SPEC_in1_r1p1_32u SPEC_r1_even
4916 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4918 int r1
= get_field(f
, r1
);
4919 o
->in1
= tcg_temp_new_i64();
4920 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4922 #define SPEC_in1_r1_D32 SPEC_r1_even
4924 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4926 o
->in1
= load_reg(get_field(f
, r2
));
4928 #define SPEC_in1_r2 0
4930 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4932 o
->in1
= tcg_temp_new_i64();
4933 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
4935 #define SPEC_in1_r2_sr32 0
4937 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4939 o
->in1
= load_reg(get_field(f
, r3
));
4941 #define SPEC_in1_r3 0
4943 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4945 o
->in1
= regs
[get_field(f
, r3
)];
4948 #define SPEC_in1_r3_o 0
4950 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4952 o
->in1
= tcg_temp_new_i64();
4953 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4955 #define SPEC_in1_r3_32s 0
4957 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4959 o
->in1
= tcg_temp_new_i64();
4960 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4962 #define SPEC_in1_r3_32u 0
4964 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4966 int r3
= get_field(f
, r3
);
4967 o
->in1
= tcg_temp_new_i64();
4968 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4970 #define SPEC_in1_r3_D32 SPEC_r3_even
4972 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4974 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4976 #define SPEC_in1_e1 0
4978 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4980 o
->in1
= fregs
[get_field(f
, r1
)];
4983 #define SPEC_in1_f1_o 0
4985 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4987 int r1
= get_field(f
, r1
);
4989 o
->out2
= fregs
[r1
+ 2];
4990 o
->g_out
= o
->g_out2
= true;
4992 #define SPEC_in1_x1_o SPEC_r1_f128
4994 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4996 o
->in1
= fregs
[get_field(f
, r3
)];
4999 #define SPEC_in1_f3_o 0
5001 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5003 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5005 #define SPEC_in1_la1 0
5007 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5009 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5010 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5012 #define SPEC_in1_la2 0
5014 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5017 o
->in1
= tcg_temp_new_i64();
5018 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5020 #define SPEC_in1_m1_8u 0
5022 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5025 o
->in1
= tcg_temp_new_i64();
5026 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5028 #define SPEC_in1_m1_16s 0
5030 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5033 o
->in1
= tcg_temp_new_i64();
5034 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5036 #define SPEC_in1_m1_16u 0
5038 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5041 o
->in1
= tcg_temp_new_i64();
5042 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5044 #define SPEC_in1_m1_32s 0
5046 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5049 o
->in1
= tcg_temp_new_i64();
5050 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5052 #define SPEC_in1_m1_32u 0
5054 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5057 o
->in1
= tcg_temp_new_i64();
5058 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5060 #define SPEC_in1_m1_64 0
5062 /* ====================================================================== */
5063 /* The "INput 2" generators. These load the second operand to an insn. */
5065 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5067 o
->in2
= regs
[get_field(f
, r1
)];
5070 #define SPEC_in2_r1_o 0
5072 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5074 o
->in2
= tcg_temp_new_i64();
5075 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5077 #define SPEC_in2_r1_16u 0
5079 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5081 o
->in2
= tcg_temp_new_i64();
5082 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5084 #define SPEC_in2_r1_32u 0
5086 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5088 int r1
= get_field(f
, r1
);
5089 o
->in2
= tcg_temp_new_i64();
5090 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5092 #define SPEC_in2_r1_D32 SPEC_r1_even
5094 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5096 o
->in2
= load_reg(get_field(f
, r2
));
5098 #define SPEC_in2_r2 0
5100 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5102 o
->in2
= regs
[get_field(f
, r2
)];
5105 #define SPEC_in2_r2_o 0
5107 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5109 int r2
= get_field(f
, r2
);
5111 o
->in2
= load_reg(r2
);
5114 #define SPEC_in2_r2_nz 0
5116 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5118 o
->in2
= tcg_temp_new_i64();
5119 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5121 #define SPEC_in2_r2_8s 0
5123 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5125 o
->in2
= tcg_temp_new_i64();
5126 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5128 #define SPEC_in2_r2_8u 0
5130 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5132 o
->in2
= tcg_temp_new_i64();
5133 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5135 #define SPEC_in2_r2_16s 0
5137 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5139 o
->in2
= tcg_temp_new_i64();
5140 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5142 #define SPEC_in2_r2_16u 0
5144 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5146 o
->in2
= load_reg(get_field(f
, r3
));
5148 #define SPEC_in2_r3 0
5150 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5152 o
->in2
= tcg_temp_new_i64();
5153 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5155 #define SPEC_in2_r3_sr32 0
5157 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5159 o
->in2
= tcg_temp_new_i64();
5160 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5162 #define SPEC_in2_r2_32s 0
5164 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5166 o
->in2
= tcg_temp_new_i64();
5167 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5169 #define SPEC_in2_r2_32u 0
5171 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5173 o
->in2
= tcg_temp_new_i64();
5174 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5176 #define SPEC_in2_r2_sr32 0
5178 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5180 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5182 #define SPEC_in2_e2 0
5184 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5186 o
->in2
= fregs
[get_field(f
, r2
)];
5189 #define SPEC_in2_f2_o 0
5191 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5193 int r2
= get_field(f
, r2
);
5195 o
->in2
= fregs
[r2
+ 2];
5196 o
->g_in1
= o
->g_in2
= true;
5198 #define SPEC_in2_x2_o SPEC_r2_f128
5200 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5202 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5204 #define SPEC_in2_ra2 0
5206 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5208 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5209 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5211 #define SPEC_in2_a2 0
5213 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5215 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
5217 #define SPEC_in2_ri2 0
5219 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5221 help_l2_shift(s
, f
, o
, 31);
5223 #define SPEC_in2_sh32 0
5225 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5227 help_l2_shift(s
, f
, o
, 63);
5229 #define SPEC_in2_sh64 0
5231 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5234 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5236 #define SPEC_in2_m2_8u 0
5238 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5241 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5243 #define SPEC_in2_m2_16s 0
5245 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5248 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5250 #define SPEC_in2_m2_16u 0
5252 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5255 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5257 #define SPEC_in2_m2_32s 0
5259 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5262 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5264 #define SPEC_in2_m2_32u 0
5266 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5269 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5271 #define SPEC_in2_m2_64 0
5273 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5276 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5278 #define SPEC_in2_mri2_16u 0
5280 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5283 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5285 #define SPEC_in2_mri2_32s 0
5287 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5290 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5292 #define SPEC_in2_mri2_32u 0
5294 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5297 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5299 #define SPEC_in2_mri2_64 0
5301 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5303 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5305 #define SPEC_in2_i2 0
5307 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5309 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5311 #define SPEC_in2_i2_8u 0
5313 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5315 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5317 #define SPEC_in2_i2_16u 0
5319 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5321 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5323 #define SPEC_in2_i2_32u 0
5325 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5327 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5328 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5330 #define SPEC_in2_i2_16u_shl 0
5332 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5334 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5335 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5337 #define SPEC_in2_i2_32u_shl 0
5339 #ifndef CONFIG_USER_ONLY
5340 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5342 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5344 #define SPEC_in2_insn 0
5347 /* ====================================================================== */
5349 /* Find opc within the table of insns. This is formulated as a switch
5350 statement so that (1) we get compile-time notice of cut-paste errors
5351 for duplicated opcodes, and (2) the compiler generates the binary
5352 search tree, rather than us having to post-process the table. */
5354 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5355 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5357 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5359 enum DisasInsnEnum
{
5360 #include "insn-data.def"
5364 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5368 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5370 .help_in1 = in1_##I1, \
5371 .help_in2 = in2_##I2, \
5372 .help_prep = prep_##P, \
5373 .help_wout = wout_##W, \
5374 .help_cout = cout_##CC, \
5375 .help_op = op_##OP, \
5379 /* Allow 0 to be used for NULL in the table below. */
5387 #define SPEC_in1_0 0
5388 #define SPEC_in2_0 0
5389 #define SPEC_prep_0 0
5390 #define SPEC_wout_0 0
5392 /* Give smaller names to the various facilities. */
5393 #define FAC_Z S390_FEAT_ZARCH
5394 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5395 #define FAC_CASS2 S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2
5396 #define FAC_DFP S390_FEAT_DFP
5397 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5398 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5399 #define FAC_EE S390_FEAT_EXECUTE_EXT
5400 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5401 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5402 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5403 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5404 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5405 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5406 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5407 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5408 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5409 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5410 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5411 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5412 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5413 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5414 #define FAC_SFLE S390_FEAT_STFLE
5415 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5416 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5417 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5418 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5420 static const DisasInsn insn_info
[] = {
5421 #include "insn-data.def"
5425 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5426 case OPC: return &insn_info[insn_ ## NM];
5428 static const DisasInsn
*lookup_opc(uint16_t opc
)
5431 #include "insn-data.def"
5440 /* Extract a field from the insn. The INSN should be left-aligned in
5441 the uint64_t so that we can more easily utilize the big-bit-endian
5442 definitions we extract from the Principals of Operation. */
5444 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5452 /* Zero extract the field from the insn. */
5453 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5455 /* Sign-extend, or un-swap the field as necessary. */
5457 case 0: /* unsigned */
5459 case 1: /* signed */
5460 assert(f
->size
<= 32);
5461 m
= 1u << (f
->size
- 1);
5464 case 2: /* dl+dh split, signed 20 bit. */
5465 r
= ((int8_t)r
<< 12) | (r
>> 8);
5471 /* Validate that the "compressed" encoding we selected above is valid.
5472 I.e. we havn't make two different original fields overlap. */
5473 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5474 o
->presentC
|= 1 << f
->indexC
;
5475 o
->presentO
|= 1 << f
->indexO
;
5477 o
->c
[f
->indexC
] = r
;
5480 /* Lookup the insn at the current PC, extracting the operands into O and
5481 returning the info struct for the insn. Returns NULL for invalid insn. */
5483 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5486 uint64_t insn
, pc
= s
->pc
;
5488 const DisasInsn
*info
;
5490 if (unlikely(s
->ex_value
)) {
5491 /* Drop the EX data now, so that it's clear on exception paths. */
5492 TCGv_i64 zero
= tcg_const_i64(0);
5493 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5494 tcg_temp_free_i64(zero
);
5496 /* Extract the values saved by EXECUTE. */
5497 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5498 ilen
= s
->ex_value
& 0xf;
5501 insn
= ld_code2(env
, pc
);
5502 op
= (insn
>> 8) & 0xff;
5503 ilen
= get_ilen(op
);
5509 insn
= ld_code4(env
, pc
) << 32;
5512 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5515 g_assert_not_reached();
5518 s
->next_pc
= s
->pc
+ ilen
;
5521 /* We can't actually determine the insn format until we've looked up
5522 the full insn opcode. Which we can't do without locating the
5523 secondary opcode. Assume by default that OP2 is at bit 40; for
5524 those smaller insns that don't actually have a secondary opcode
5525 this will correctly result in OP2 = 0. */
5531 case 0xb2: /* S, RRF, RRE */
5532 case 0xb3: /* RRE, RRD, RRF */
5533 case 0xb9: /* RRE, RRF */
5534 case 0xe5: /* SSE, SIL */
5535 op2
= (insn
<< 8) >> 56;
5539 case 0xc0: /* RIL */
5540 case 0xc2: /* RIL */
5541 case 0xc4: /* RIL */
5542 case 0xc6: /* RIL */
5543 case 0xc8: /* SSF */
5544 case 0xcc: /* RIL */
5545 op2
= (insn
<< 12) >> 60;
5547 case 0xd0 ... 0xdf: /* SS */
5553 case 0xee ... 0xf3: /* SS */
5554 case 0xf8 ... 0xfd: /* SS */
5558 op2
= (insn
<< 40) >> 56;
5562 memset(f
, 0, sizeof(*f
));
5567 /* Lookup the instruction. */
5568 info
= lookup_opc(op
<< 8 | op2
);
5570 /* If we found it, extract the operands. */
5572 DisasFormat fmt
= info
->fmt
;
5575 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5576 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5582 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5584 const DisasInsn
*insn
;
5585 ExitStatus ret
= NO_EXIT
;
5589 /* Search for the insn in the table. */
5590 insn
= extract_insn(env
, s
, &f
);
5592 /* Not found means unimplemented/illegal opcode. */
5594 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5596 gen_illegal_opcode(s
);
5597 return EXIT_NORETURN
;
5600 #ifndef CONFIG_USER_ONLY
5601 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5602 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5603 gen_helper_per_ifetch(cpu_env
, addr
);
5604 tcg_temp_free_i64(addr
);
5608 /* Check for insn specification exceptions. */
5610 int spec
= insn
->spec
, excp
= 0, r
;
5612 if (spec
& SPEC_r1_even
) {
5613 r
= get_field(&f
, r1
);
5615 excp
= PGM_SPECIFICATION
;
5618 if (spec
& SPEC_r2_even
) {
5619 r
= get_field(&f
, r2
);
5621 excp
= PGM_SPECIFICATION
;
5624 if (spec
& SPEC_r3_even
) {
5625 r
= get_field(&f
, r3
);
5627 excp
= PGM_SPECIFICATION
;
5630 if (spec
& SPEC_r1_f128
) {
5631 r
= get_field(&f
, r1
);
5633 excp
= PGM_SPECIFICATION
;
5636 if (spec
& SPEC_r2_f128
) {
5637 r
= get_field(&f
, r2
);
5639 excp
= PGM_SPECIFICATION
;
5643 gen_program_exception(s
, excp
);
5644 return EXIT_NORETURN
;
5648 /* Set up the strutures we use to communicate with the helpers. */
5651 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5652 TCGV_UNUSED_I64(o
.out
);
5653 TCGV_UNUSED_I64(o
.out2
);
5654 TCGV_UNUSED_I64(o
.in1
);
5655 TCGV_UNUSED_I64(o
.in2
);
5656 TCGV_UNUSED_I64(o
.addr1
);
5658 /* Implement the instruction. */
5659 if (insn
->help_in1
) {
5660 insn
->help_in1(s
, &f
, &o
);
5662 if (insn
->help_in2
) {
5663 insn
->help_in2(s
, &f
, &o
);
5665 if (insn
->help_prep
) {
5666 insn
->help_prep(s
, &f
, &o
);
5668 if (insn
->help_op
) {
5669 ret
= insn
->help_op(s
, &o
);
5671 if (insn
->help_wout
) {
5672 insn
->help_wout(s
, &f
, &o
);
5674 if (insn
->help_cout
) {
5675 insn
->help_cout(s
, &o
);
5678 /* Free any temporaries created by the helpers. */
5679 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5680 tcg_temp_free_i64(o
.out
);
5682 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5683 tcg_temp_free_i64(o
.out2
);
5685 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5686 tcg_temp_free_i64(o
.in1
);
5688 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5689 tcg_temp_free_i64(o
.in2
);
5691 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5692 tcg_temp_free_i64(o
.addr1
);
5695 #ifndef CONFIG_USER_ONLY
5696 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5697 /* An exception might be triggered, save PSW if not already done. */
5698 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5699 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5705 /* Call the helper to check for a possible PER exception. */
5706 gen_helper_per_check_exception(cpu_env
);
5710 /* Advance to the next instruction. */
5715 void gen_intermediate_code(CPUS390XState
*env
, struct TranslationBlock
*tb
)
5717 S390CPU
*cpu
= s390_env_get_cpu(env
);
5718 CPUState
*cs
= CPU(cpu
);
5720 target_ulong pc_start
;
5721 uint64_t next_page_start
;
5722 int num_insns
, max_insns
;
5729 if (!(tb
->flags
& FLAG_MASK_64
)) {
5730 pc_start
&= 0x7fffffff;
5735 dc
.cc_op
= CC_OP_DYNAMIC
;
5736 dc
.ex_value
= tb
->cs_base
;
5737 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5739 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5742 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5743 if (max_insns
== 0) {
5744 max_insns
= CF_COUNT_MASK
;
5746 if (max_insns
> TCG_MAX_INSNS
) {
5747 max_insns
= TCG_MAX_INSNS
;
5753 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5756 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5757 status
= EXIT_PC_STALE
;
5759 /* The address covered by the breakpoint must be included in
5760 [tb->pc, tb->pc + tb->size) in order to for it to be
5761 properly cleared -- thus we increment the PC here so that
5762 the logic setting tb->size below does the right thing. */
5767 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5771 status
= translate_one(env
, &dc
);
5773 /* If we reach a page boundary, are single stepping,
5774 or exhaust instruction count, stop generation. */
5775 if (status
== NO_EXIT
5776 && (dc
.pc
>= next_page_start
5777 || tcg_op_buf_full()
5778 || num_insns
>= max_insns
5780 || cs
->singlestep_enabled
5782 status
= EXIT_PC_STALE
;
5784 } while (status
== NO_EXIT
);
5786 if (tb
->cflags
& CF_LAST_IO
) {
5795 case EXIT_PC_STALE_NOCHAIN
:
5796 update_psw_addr(&dc
);
5798 case EXIT_PC_UPDATED
:
5799 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5800 cc op type is in env */
5803 case EXIT_PC_CC_UPDATED
:
5804 /* Exit the TB, either by raising a debug exception or by return. */
5806 gen_exception(EXCP_DEBUG
);
5807 } else if (use_exit_tb(&dc
) || status
== EXIT_PC_STALE_NOCHAIN
) {
5810 tcg_gen_lookup_and_goto_ptr(psw_addr
);
5814 g_assert_not_reached();
5817 gen_tb_end(tb
, num_insns
);
5819 tb
->size
= dc
.pc
- pc_start
;
5820 tb
->icount
= num_insns
;
5822 #if defined(S390X_DEBUG_DISAS)
5823 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5824 && qemu_log_in_addr_range(pc_start
)) {
5826 if (unlikely(dc
.ex_value
)) {
5827 /* ??? Unfortunately log_target_disas can't use host memory. */
5828 qemu_log("IN: EXECUTE %016" PRIx64
"\n", dc
.ex_value
);
5830 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5831 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5839 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5842 int cc_op
= data
[1];
5843 env
->psw
.addr
= data
[0];
5844 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {