4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env
;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
60 /* Information carried about a condition to be evaluated. */
67 struct { TCGv_i64 a
, b
; } s64
;
68 struct { TCGv_i32 a
, b
; } s32
;
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit
[CC_OP_MAX
];
76 static uint64_t inline_branch_miss
[CC_OP_MAX
];
79 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
81 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
82 if (s
->tb
->flags
& FLAG_MASK_32
) {
83 return pc
| 0x80000000;
89 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
92 S390CPU
*cpu
= S390_CPU(cs
);
93 CPUS390XState
*env
= &cpu
->env
;
97 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
98 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
100 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
101 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
104 for (i
= 0; i
< 16; i
++) {
105 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
107 cpu_fprintf(f
, "\n");
113 for (i
= 0; i
< 16; i
++) {
114 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
116 cpu_fprintf(f
, "\n");
122 #ifndef CONFIG_USER_ONLY
123 for (i
= 0; i
< 16; i
++) {
124 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
126 cpu_fprintf(f
, "\n");
133 #ifdef DEBUG_INLINE_BRANCHES
134 for (i
= 0; i
< CC_OP_MAX
; i
++) {
135 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
136 inline_branch_miss
[i
], inline_branch_hit
[i
]);
140 cpu_fprintf(f
, "\n");
143 static TCGv_i64 psw_addr
;
144 static TCGv_i64 psw_mask
;
146 static TCGv_i32 cc_op
;
147 static TCGv_i64 cc_src
;
148 static TCGv_i64 cc_dst
;
149 static TCGv_i64 cc_vr
;
151 static char cpu_reg_names
[32][4];
152 static TCGv_i64 regs
[16];
153 static TCGv_i64 fregs
[16];
155 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
157 void s390x_translate_init(void)
161 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
162 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
163 offsetof(CPUS390XState
, psw
.addr
),
165 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
166 offsetof(CPUS390XState
, psw
.mask
),
169 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
171 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
173 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
175 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
178 for (i
= 0; i
< 16; i
++) {
179 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
180 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
181 offsetof(CPUS390XState
, regs
[i
]),
185 for (i
= 0; i
< 16; i
++) {
186 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
187 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
188 offsetof(CPUS390XState
, fregs
[i
].d
),
189 cpu_reg_names
[i
+ 16]);
193 static TCGv_i64
load_reg(int reg
)
195 TCGv_i64 r
= tcg_temp_new_i64();
196 tcg_gen_mov_i64(r
, regs
[reg
]);
200 static TCGv_i64
load_freg32_i64(int reg
)
202 TCGv_i64 r
= tcg_temp_new_i64();
203 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
207 static void store_reg(int reg
, TCGv_i64 v
)
209 tcg_gen_mov_i64(regs
[reg
], v
);
212 static void store_freg(int reg
, TCGv_i64 v
)
214 tcg_gen_mov_i64(fregs
[reg
], v
);
217 static void store_reg32_i64(int reg
, TCGv_i64 v
)
219 /* 32 bit register writes keep the upper half */
220 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
223 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
225 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
228 static void store_freg32_i64(int reg
, TCGv_i64 v
)
230 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
233 static void return_low128(TCGv_i64 dest
)
235 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
238 static void update_psw_addr(DisasContext
*s
)
241 tcg_gen_movi_i64(psw_addr
, s
->pc
);
244 static void update_cc_op(DisasContext
*s
)
246 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
247 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
251 static void potential_page_fault(DisasContext
*s
)
257 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
259 return (uint64_t)cpu_lduw_code(env
, pc
);
262 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
264 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
267 static int get_mem_index(DisasContext
*s
)
269 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
270 case PSW_ASC_PRIMARY
>> 32:
272 case PSW_ASC_SECONDARY
>> 32:
274 case PSW_ASC_HOME
>> 32:
282 static void gen_exception(int excp
)
284 TCGv_i32 tmp
= tcg_const_i32(excp
);
285 gen_helper_exception(cpu_env
, tmp
);
286 tcg_temp_free_i32(tmp
);
289 static void gen_program_exception(DisasContext
*s
, int code
)
293 /* Remember what pgm exeption this was. */
294 tmp
= tcg_const_i32(code
);
295 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
296 tcg_temp_free_i32(tmp
);
298 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
299 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
300 tcg_temp_free_i32(tmp
);
302 /* Advance past instruction. */
309 /* Trigger exception. */
310 gen_exception(EXCP_PGM
);
313 static inline void gen_illegal_opcode(DisasContext
*s
)
315 gen_program_exception(s
, PGM_SPECIFICATION
);
318 static inline void check_privileged(DisasContext
*s
)
320 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
321 gen_program_exception(s
, PGM_PRIVILEGED
);
325 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
327 TCGv_i64 tmp
= tcg_temp_new_i64();
328 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
330 /* Note that d2 is limited to 20 bits, signed. If we crop negative
331 displacements early we create larger immedate addends. */
333 /* Note that addi optimizes the imm==0 case. */
335 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
336 tcg_gen_addi_i64(tmp
, tmp
, d2
);
338 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
340 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
346 tcg_gen_movi_i64(tmp
, d2
);
349 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
355 static inline bool live_cc_data(DisasContext
*s
)
357 return (s
->cc_op
!= CC_OP_DYNAMIC
358 && s
->cc_op
!= CC_OP_STATIC
362 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
364 if (live_cc_data(s
)) {
365 tcg_gen_discard_i64(cc_src
);
366 tcg_gen_discard_i64(cc_dst
);
367 tcg_gen_discard_i64(cc_vr
);
369 s
->cc_op
= CC_OP_CONST0
+ val
;
372 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
374 if (live_cc_data(s
)) {
375 tcg_gen_discard_i64(cc_src
);
376 tcg_gen_discard_i64(cc_vr
);
378 tcg_gen_mov_i64(cc_dst
, dst
);
382 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
385 if (live_cc_data(s
)) {
386 tcg_gen_discard_i64(cc_vr
);
388 tcg_gen_mov_i64(cc_src
, src
);
389 tcg_gen_mov_i64(cc_dst
, dst
);
393 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
394 TCGv_i64 dst
, TCGv_i64 vr
)
396 tcg_gen_mov_i64(cc_src
, src
);
397 tcg_gen_mov_i64(cc_dst
, dst
);
398 tcg_gen_mov_i64(cc_vr
, vr
);
402 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
404 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
407 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
409 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
412 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
414 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
417 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
419 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
422 /* CC value is in env->cc_op */
423 static void set_cc_static(DisasContext
*s
)
425 if (live_cc_data(s
)) {
426 tcg_gen_discard_i64(cc_src
);
427 tcg_gen_discard_i64(cc_dst
);
428 tcg_gen_discard_i64(cc_vr
);
430 s
->cc_op
= CC_OP_STATIC
;
433 /* calculates cc into cc_op */
434 static void gen_op_calc_cc(DisasContext
*s
)
436 TCGv_i32 local_cc_op
;
439 TCGV_UNUSED_I32(local_cc_op
);
440 TCGV_UNUSED_I64(dummy
);
443 dummy
= tcg_const_i64(0);
457 local_cc_op
= tcg_const_i32(s
->cc_op
);
473 /* s->cc_op is the cc value */
474 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
477 /* env->cc_op already is the cc value */
492 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
497 case CC_OP_LTUGTU_32
:
498 case CC_OP_LTUGTU_64
:
505 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
520 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
523 /* unknown operation - assume 3 arguments and cc_op in env */
524 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
530 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
531 tcg_temp_free_i32(local_cc_op
);
533 if (!TCGV_IS_UNUSED_I64(dummy
)) {
534 tcg_temp_free_i64(dummy
);
537 /* We now have cc in cc_op as constant */
541 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
543 /* NOTE: we handle the case where the TB spans two pages here */
544 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
545 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
546 && !s
->singlestep_enabled
547 && !(s
->tb
->cflags
& CF_LAST_IO
));
550 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
552 #ifdef DEBUG_INLINE_BRANCHES
553 inline_branch_miss
[cc_op
]++;
557 static void account_inline_branch(DisasContext
*s
, int cc_op
)
559 #ifdef DEBUG_INLINE_BRANCHES
560 inline_branch_hit
[cc_op
]++;
564 /* Table of mask values to comparison codes, given a comparison as input.
565 For such, CC=3 should not be possible. */
566 static const TCGCond ltgt_cond
[16] = {
567 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
568 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
569 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
570 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
571 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
572 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
573 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
574 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
577 /* Table of mask values to comparison codes, given a logic op as input.
578 For such, only CC=0 and CC=1 should be possible. */
579 static const TCGCond nz_cond
[16] = {
580 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
581 TCG_COND_NEVER
, TCG_COND_NEVER
,
582 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
583 TCG_COND_NE
, TCG_COND_NE
,
584 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
585 TCG_COND_EQ
, TCG_COND_EQ
,
586 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
587 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
590 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
591 details required to generate a TCG comparison. */
592 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
595 enum cc_op old_cc_op
= s
->cc_op
;
597 if (mask
== 15 || mask
== 0) {
598 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
601 c
->g1
= c
->g2
= true;
606 /* Find the TCG condition for the mask + cc op. */
612 cond
= ltgt_cond
[mask
];
613 if (cond
== TCG_COND_NEVER
) {
616 account_inline_branch(s
, old_cc_op
);
619 case CC_OP_LTUGTU_32
:
620 case CC_OP_LTUGTU_64
:
621 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
622 if (cond
== TCG_COND_NEVER
) {
625 account_inline_branch(s
, old_cc_op
);
629 cond
= nz_cond
[mask
];
630 if (cond
== TCG_COND_NEVER
) {
633 account_inline_branch(s
, old_cc_op
);
648 account_inline_branch(s
, old_cc_op
);
663 account_inline_branch(s
, old_cc_op
);
667 switch (mask
& 0xa) {
668 case 8: /* src == 0 -> no one bit found */
671 case 2: /* src != 0 -> one bit found */
677 account_inline_branch(s
, old_cc_op
);
683 case 8 | 2: /* vr == 0 */
686 case 4 | 1: /* vr != 0 */
689 case 8 | 4: /* no carry -> vr >= src */
692 case 2 | 1: /* carry -> vr < src */
698 account_inline_branch(s
, old_cc_op
);
703 /* Note that CC=0 is impossible; treat it as dont-care. */
705 case 2: /* zero -> op1 == op2 */
708 case 4 | 1: /* !zero -> op1 != op2 */
711 case 4: /* borrow (!carry) -> op1 < op2 */
714 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
720 account_inline_branch(s
, old_cc_op
);
725 /* Calculate cc value. */
730 /* Jump based on CC. We'll load up the real cond below;
731 the assignment here merely avoids a compiler warning. */
732 account_noninline_branch(s
, old_cc_op
);
733 old_cc_op
= CC_OP_STATIC
;
734 cond
= TCG_COND_NEVER
;
738 /* Load up the arguments of the comparison. */
740 c
->g1
= c
->g2
= false;
744 c
->u
.s32
.a
= tcg_temp_new_i32();
745 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
746 c
->u
.s32
.b
= tcg_const_i32(0);
749 case CC_OP_LTUGTU_32
:
752 c
->u
.s32
.a
= tcg_temp_new_i32();
753 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
754 c
->u
.s32
.b
= tcg_temp_new_i32();
755 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
762 c
->u
.s64
.b
= tcg_const_i64(0);
766 case CC_OP_LTUGTU_64
:
770 c
->g1
= c
->g2
= true;
776 c
->u
.s64
.a
= tcg_temp_new_i64();
777 c
->u
.s64
.b
= tcg_const_i64(0);
778 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
783 c
->u
.s32
.a
= tcg_temp_new_i32();
784 c
->u
.s32
.b
= tcg_temp_new_i32();
785 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_vr
);
786 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
787 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
789 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_src
);
796 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
797 c
->u
.s64
.b
= tcg_const_i64(0);
809 case 0x8 | 0x4 | 0x2: /* cc != 3 */
811 c
->u
.s32
.b
= tcg_const_i32(3);
813 case 0x8 | 0x4 | 0x1: /* cc != 2 */
815 c
->u
.s32
.b
= tcg_const_i32(2);
817 case 0x8 | 0x2 | 0x1: /* cc != 1 */
819 c
->u
.s32
.b
= tcg_const_i32(1);
821 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
824 c
->u
.s32
.a
= tcg_temp_new_i32();
825 c
->u
.s32
.b
= tcg_const_i32(0);
826 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
828 case 0x8 | 0x4: /* cc < 2 */
830 c
->u
.s32
.b
= tcg_const_i32(2);
832 case 0x8: /* cc == 0 */
834 c
->u
.s32
.b
= tcg_const_i32(0);
836 case 0x4 | 0x2 | 0x1: /* cc != 0 */
838 c
->u
.s32
.b
= tcg_const_i32(0);
840 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
843 c
->u
.s32
.a
= tcg_temp_new_i32();
844 c
->u
.s32
.b
= tcg_const_i32(0);
845 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
847 case 0x4: /* cc == 1 */
849 c
->u
.s32
.b
= tcg_const_i32(1);
851 case 0x2 | 0x1: /* cc > 1 */
853 c
->u
.s32
.b
= tcg_const_i32(1);
855 case 0x2: /* cc == 2 */
857 c
->u
.s32
.b
= tcg_const_i32(2);
859 case 0x1: /* cc == 3 */
861 c
->u
.s32
.b
= tcg_const_i32(3);
864 /* CC is masked by something else: (8 >> cc) & mask. */
867 c
->u
.s32
.a
= tcg_const_i32(8);
868 c
->u
.s32
.b
= tcg_const_i32(0);
869 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
870 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
881 static void free_compare(DisasCompare
*c
)
885 tcg_temp_free_i64(c
->u
.s64
.a
);
887 tcg_temp_free_i32(c
->u
.s32
.a
);
892 tcg_temp_free_i64(c
->u
.s64
.b
);
894 tcg_temp_free_i32(c
->u
.s32
.b
);
899 /* ====================================================================== */
900 /* Define the insn format enumeration. */
901 #define F0(N) FMT_##N,
902 #define F1(N, X1) F0(N)
903 #define F2(N, X1, X2) F0(N)
904 #define F3(N, X1, X2, X3) F0(N)
905 #define F4(N, X1, X2, X3, X4) F0(N)
906 #define F5(N, X1, X2, X3, X4, X5) F0(N)
909 #include "insn-format.def"
919 /* Define a structure to hold the decoded fields. We'll store each inside
920 an array indexed by an enum. In order to conserve memory, we'll arrange
921 for fields that do not exist at the same time to overlap, thus the "C"
922 for compact. For checking purposes there is an "O" for original index
923 as well that will be applied to availability bitmaps. */
925 enum DisasFieldIndexO
{
948 enum DisasFieldIndexC
{
982 unsigned presentC
:16;
983 unsigned int presentO
;
987 /* This is the way fields are to be accessed out of DisasFields. */
988 #define have_field(S, F) have_field1((S), FLD_O_##F)
989 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
991 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
993 return (f
->presentO
>> c
) & 1;
996 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
997 enum DisasFieldIndexC c
)
999 assert(have_field1(f
, o
));
1003 /* Describe the layout of each field in each format. */
1004 typedef struct DisasField
{
1006 unsigned int size
:8;
1007 unsigned int type
:2;
1008 unsigned int indexC
:6;
1009 enum DisasFieldIndexO indexO
:8;
1012 typedef struct DisasFormatInfo
{
1013 DisasField op
[NUM_C_FIELD
];
1016 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1017 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1018 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1020 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1022 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1023 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1025 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1027 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1028 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1029 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1031 #define F0(N) { { } },
1032 #define F1(N, X1) { { X1 } },
1033 #define F2(N, X1, X2) { { X1, X2 } },
1034 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1035 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1036 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1038 static const DisasFormatInfo format_info
[] = {
1039 #include "insn-format.def"
1057 /* Generally, we'll extract operands into this structures, operate upon
1058 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1059 of routines below for more details. */
1061 bool g_out
, g_out2
, g_in1
, g_in2
;
1062 TCGv_i64 out
, out2
, in1
, in2
;
1066 /* Instructions can place constraints on their operands, raising specification
1067 exceptions if they are violated. To make this easy to automate, each "in1",
1068 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1069 of the following, or 0. To make this easy to document, we'll put the
1070 SPEC_<name> defines next to <name>. */
1072 #define SPEC_r1_even 1
1073 #define SPEC_r2_even 2
1074 #define SPEC_r3_even 4
1075 #define SPEC_r1_f128 8
1076 #define SPEC_r2_f128 16
1078 /* Return values from translate_one, indicating the state of the TB. */
1080 /* Continue the TB. */
1082 /* We have emitted one or more goto_tb. No fixup required. */
1084 /* We are not using a goto_tb (for whatever reason), but have updated
1085 the PC (for whatever reason), so there's no need to do it again on
1088 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1089 updated the PC for the next instruction to be executed. */
1091 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1092 No following code will be executed. */
1096 typedef enum DisasFacility
{
1097 FAC_Z
, /* zarch (default) */
1098 FAC_CASS
, /* compare and swap and store */
1099 FAC_CASS2
, /* compare and swap and store 2*/
1100 FAC_DFP
, /* decimal floating point */
1101 FAC_DFPR
, /* decimal floating point rounding */
1102 FAC_DO
, /* distinct operands */
1103 FAC_EE
, /* execute extensions */
1104 FAC_EI
, /* extended immediate */
1105 FAC_FPE
, /* floating point extension */
1106 FAC_FPSSH
, /* floating point support sign handling */
1107 FAC_FPRGR
, /* FPR-GR transfer */
1108 FAC_GIE
, /* general instructions extension */
1109 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1110 FAC_HW
, /* high-word */
1111 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1112 FAC_LOC
, /* load/store on condition */
1113 FAC_LD
, /* long displacement */
1114 FAC_PC
, /* population count */
1115 FAC_SCF
, /* store clock fast */
1116 FAC_SFLE
, /* store facility list extended */
1122 DisasFacility fac
:8;
1127 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1128 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1129 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1130 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1131 void (*help_cout
)(DisasContext
*, DisasOps
*);
1132 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1137 /* ====================================================================== */
1138 /* Miscellaneous helpers, used by several operations. */
1140 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1141 DisasOps
*o
, int mask
)
1143 int b2
= get_field(f
, b2
);
1144 int d2
= get_field(f
, d2
);
1147 o
->in2
= tcg_const_i64(d2
& mask
);
1149 o
->in2
= get_address(s
, 0, b2
, d2
);
1150 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1154 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1156 if (dest
== s
->next_pc
) {
1159 if (use_goto_tb(s
, dest
)) {
1162 tcg_gen_movi_i64(psw_addr
, dest
);
1163 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1164 return EXIT_GOTO_TB
;
1166 tcg_gen_movi_i64(psw_addr
, dest
);
1167 return EXIT_PC_UPDATED
;
1171 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1172 bool is_imm
, int imm
, TCGv_i64 cdest
)
1175 uint64_t dest
= s
->pc
+ 2 * imm
;
1178 /* Take care of the special cases first. */
1179 if (c
->cond
== TCG_COND_NEVER
) {
1184 if (dest
== s
->next_pc
) {
1185 /* Branch to next. */
1189 if (c
->cond
== TCG_COND_ALWAYS
) {
1190 ret
= help_goto_direct(s
, dest
);
1194 if (TCGV_IS_UNUSED_I64(cdest
)) {
1195 /* E.g. bcr %r0 -> no branch. */
1199 if (c
->cond
== TCG_COND_ALWAYS
) {
1200 tcg_gen_mov_i64(psw_addr
, cdest
);
1201 ret
= EXIT_PC_UPDATED
;
1206 if (use_goto_tb(s
, s
->next_pc
)) {
1207 if (is_imm
&& use_goto_tb(s
, dest
)) {
1208 /* Both exits can use goto_tb. */
1211 lab
= gen_new_label();
1213 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1215 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1218 /* Branch not taken. */
1220 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1221 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1226 tcg_gen_movi_i64(psw_addr
, dest
);
1227 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1231 /* Fallthru can use goto_tb, but taken branch cannot. */
1232 /* Store taken branch destination before the brcond. This
1233 avoids having to allocate a new local temp to hold it.
1234 We'll overwrite this in the not taken case anyway. */
1236 tcg_gen_mov_i64(psw_addr
, cdest
);
1239 lab
= gen_new_label();
1241 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1243 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1246 /* Branch not taken. */
1249 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1250 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1254 tcg_gen_movi_i64(psw_addr
, dest
);
1256 ret
= EXIT_PC_UPDATED
;
1259 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1260 Most commonly we're single-stepping or some other condition that
1261 disables all use of goto_tb. Just update the PC and exit. */
1263 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1265 cdest
= tcg_const_i64(dest
);
1269 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1272 TCGv_i32 t0
= tcg_temp_new_i32();
1273 TCGv_i64 t1
= tcg_temp_new_i64();
1274 TCGv_i64 z
= tcg_const_i64(0);
1275 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1276 tcg_gen_extu_i32_i64(t1
, t0
);
1277 tcg_temp_free_i32(t0
);
1278 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1279 tcg_temp_free_i64(t1
);
1280 tcg_temp_free_i64(z
);
1284 tcg_temp_free_i64(cdest
);
1286 tcg_temp_free_i64(next
);
1288 ret
= EXIT_PC_UPDATED
;
1296 /* ====================================================================== */
1297 /* The operations. These perform the bulk of the work for any insn,
1298 usually after the operands have been loaded and output initialized. */
1300 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1302 gen_helper_abs_i64(o
->out
, o
->in2
);
1306 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1308 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1312 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1314 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1318 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1320 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1321 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1325 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1327 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1331 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1336 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1338 /* The carry flag is the msb of CC, therefore the branch mask that would
1339 create that comparison is 3. Feeding the generated comparison to
1340 setcond produces the carry flag that we desire. */
1341 disas_jcc(s
, &cmp
, 3);
1342 carry
= tcg_temp_new_i64();
1344 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1346 TCGv_i32 t
= tcg_temp_new_i32();
1347 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1348 tcg_gen_extu_i32_i64(carry
, t
);
1349 tcg_temp_free_i32(t
);
1353 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1354 tcg_temp_free_i64(carry
);
1358 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1360 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1364 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1366 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1370 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1372 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1373 return_low128(o
->out2
);
1377 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1379 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1383 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1385 int shift
= s
->insn
->data
& 0xff;
1386 int size
= s
->insn
->data
>> 8;
1387 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1390 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1391 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1392 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1394 /* Produce the CC from only the bits manipulated. */
1395 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1396 set_cc_nz_u64(s
, cc_dst
);
1400 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1402 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1403 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1404 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1405 return EXIT_PC_UPDATED
;
1411 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1413 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1414 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1417 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1419 int m1
= get_field(s
->fields
, m1
);
1420 bool is_imm
= have_field(s
->fields
, i2
);
1421 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1424 disas_jcc(s
, &c
, m1
);
1425 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1428 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1430 int r1
= get_field(s
->fields
, r1
);
1431 bool is_imm
= have_field(s
->fields
, i2
);
1432 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1436 c
.cond
= TCG_COND_NE
;
1441 t
= tcg_temp_new_i64();
1442 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1443 store_reg32_i64(r1
, t
);
1444 c
.u
.s32
.a
= tcg_temp_new_i32();
1445 c
.u
.s32
.b
= tcg_const_i32(0);
1446 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1447 tcg_temp_free_i64(t
);
1449 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1452 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1454 int r1
= get_field(s
->fields
, r1
);
1455 bool is_imm
= have_field(s
->fields
, i2
);
1456 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1459 c
.cond
= TCG_COND_NE
;
1464 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1465 c
.u
.s64
.a
= regs
[r1
];
1466 c
.u
.s64
.b
= tcg_const_i64(0);
1468 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1471 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1473 int r1
= get_field(s
->fields
, r1
);
1474 int r3
= get_field(s
->fields
, r3
);
1475 bool is_imm
= have_field(s
->fields
, i2
);
1476 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1480 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1485 t
= tcg_temp_new_i64();
1486 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1487 c
.u
.s32
.a
= tcg_temp_new_i32();
1488 c
.u
.s32
.b
= tcg_temp_new_i32();
1489 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1490 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1491 store_reg32_i64(r1
, t
);
1492 tcg_temp_free_i64(t
);
1494 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1497 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1499 int r1
= get_field(s
->fields
, r1
);
1500 int r3
= get_field(s
->fields
, r3
);
1501 bool is_imm
= have_field(s
->fields
, i2
);
1502 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1505 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1508 if (r1
== (r3
| 1)) {
1509 c
.u
.s64
.b
= load_reg(r3
| 1);
1512 c
.u
.s64
.b
= regs
[r3
| 1];
1516 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1517 c
.u
.s64
.a
= regs
[r1
];
1520 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1523 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1525 int imm
, m3
= get_field(s
->fields
, m3
);
1529 c
.cond
= ltgt_cond
[m3
];
1530 if (s
->insn
->data
) {
1531 c
.cond
= tcg_unsigned_cond(c
.cond
);
1533 c
.is_64
= c
.g1
= c
.g2
= true;
1537 is_imm
= have_field(s
->fields
, i4
);
1539 imm
= get_field(s
->fields
, i4
);
1542 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1543 get_field(s
->fields
, d4
));
1546 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1549 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1551 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1556 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1558 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1563 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1565 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1570 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1572 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1573 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1574 tcg_temp_free_i32(m3
);
1575 gen_set_cc_nz_f32(s
, o
->in2
);
1579 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1581 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1582 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1583 tcg_temp_free_i32(m3
);
1584 gen_set_cc_nz_f64(s
, o
->in2
);
1588 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1590 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1591 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1592 tcg_temp_free_i32(m3
);
1593 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1597 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1599 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1600 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1601 tcg_temp_free_i32(m3
);
1602 gen_set_cc_nz_f32(s
, o
->in2
);
1606 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1608 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1609 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1610 tcg_temp_free_i32(m3
);
1611 gen_set_cc_nz_f64(s
, o
->in2
);
1615 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1617 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1618 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1619 tcg_temp_free_i32(m3
);
1620 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1624 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1626 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1627 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1628 tcg_temp_free_i32(m3
);
1629 gen_set_cc_nz_f32(s
, o
->in2
);
1633 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1635 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1636 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1637 tcg_temp_free_i32(m3
);
1638 gen_set_cc_nz_f64(s
, o
->in2
);
1642 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1644 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1645 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1646 tcg_temp_free_i32(m3
);
1647 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1651 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1653 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1654 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1655 tcg_temp_free_i32(m3
);
1656 gen_set_cc_nz_f32(s
, o
->in2
);
1660 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1662 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1663 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1664 tcg_temp_free_i32(m3
);
1665 gen_set_cc_nz_f64(s
, o
->in2
);
1669 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1671 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1672 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1673 tcg_temp_free_i32(m3
);
1674 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1678 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1680 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1681 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1682 tcg_temp_free_i32(m3
);
1686 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1688 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1689 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1690 tcg_temp_free_i32(m3
);
1694 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1696 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1697 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1698 tcg_temp_free_i32(m3
);
1699 return_low128(o
->out2
);
1703 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1705 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1706 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1707 tcg_temp_free_i32(m3
);
1711 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1713 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1714 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1715 tcg_temp_free_i32(m3
);
1719 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1721 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1722 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1723 tcg_temp_free_i32(m3
);
1724 return_low128(o
->out2
);
1728 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1730 int r2
= get_field(s
->fields
, r2
);
1731 TCGv_i64 len
= tcg_temp_new_i64();
1733 potential_page_fault(s
);
1734 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1736 return_low128(o
->out
);
1738 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1739 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1740 tcg_temp_free_i64(len
);
1745 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1747 int l
= get_field(s
->fields
, l1
);
1752 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1753 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1756 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1757 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1760 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1761 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1764 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1765 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1768 potential_page_fault(s
);
1769 vl
= tcg_const_i32(l
);
1770 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1771 tcg_temp_free_i32(vl
);
1775 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1779 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1781 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1782 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1783 potential_page_fault(s
);
1784 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1785 tcg_temp_free_i32(r1
);
1786 tcg_temp_free_i32(r3
);
1791 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1793 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1794 TCGv_i32 t1
= tcg_temp_new_i32();
1795 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1796 potential_page_fault(s
);
1797 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1799 tcg_temp_free_i32(t1
);
1800 tcg_temp_free_i32(m3
);
1804 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1806 potential_page_fault(s
);
1807 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1809 return_low128(o
->in2
);
1813 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1815 TCGv_i64 t
= tcg_temp_new_i64();
1816 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1817 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1818 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1819 tcg_temp_free_i64(t
);
1823 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1825 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1826 int d2
= get_field(s
->fields
, d2
);
1827 int b2
= get_field(s
->fields
, b2
);
1828 int is_64
= s
->insn
->data
;
1829 TCGv_i64 addr
, mem
, cc
, z
;
1831 /* Note that in1 = R3 (new value) and
1832 in2 = (zero-extended) R1 (expected value). */
1834 /* Load the memory into the (temporary) output. While the PoO only talks
1835 about moving the memory to R1 on inequality, if we include equality it
1836 means that R1 is equal to the memory in all conditions. */
1837 addr
= get_address(s
, 0, b2
, d2
);
1839 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1841 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1844 /* Are the memory and expected values (un)equal? Note that this setcond
1845 produces the output CC value, thus the NE sense of the test. */
1846 cc
= tcg_temp_new_i64();
1847 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1849 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1850 Recall that we are allowed to unconditionally issue the store (and
1851 thus any possible write trap), so (re-)store the original contents
1852 of MEM in case of inequality. */
1853 z
= tcg_const_i64(0);
1854 mem
= tcg_temp_new_i64();
1855 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1857 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1859 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1861 tcg_temp_free_i64(z
);
1862 tcg_temp_free_i64(mem
);
1863 tcg_temp_free_i64(addr
);
1865 /* Store CC back to cc_op. Wait until after the store so that any
1866 exception gets the old cc_op value. */
1867 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1868 tcg_temp_free_i64(cc
);
1873 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1875 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1876 int r1
= get_field(s
->fields
, r1
);
1877 int r3
= get_field(s
->fields
, r3
);
1878 int d2
= get_field(s
->fields
, d2
);
1879 int b2
= get_field(s
->fields
, b2
);
1880 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
1882 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1884 addrh
= get_address(s
, 0, b2
, d2
);
1885 addrl
= get_address(s
, 0, b2
, d2
+ 8);
1886 outh
= tcg_temp_new_i64();
1887 outl
= tcg_temp_new_i64();
1889 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
1890 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
1892 /* Fold the double-word compare with arithmetic. */
1893 cc
= tcg_temp_new_i64();
1894 z
= tcg_temp_new_i64();
1895 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
1896 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
1897 tcg_gen_or_i64(cc
, cc
, z
);
1898 tcg_gen_movi_i64(z
, 0);
1899 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
1901 memh
= tcg_temp_new_i64();
1902 meml
= tcg_temp_new_i64();
1903 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
1904 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
1905 tcg_temp_free_i64(z
);
1907 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
1908 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
1909 tcg_temp_free_i64(memh
);
1910 tcg_temp_free_i64(meml
);
1911 tcg_temp_free_i64(addrh
);
1912 tcg_temp_free_i64(addrl
);
1914 /* Save back state now that we've passed all exceptions. */
1915 tcg_gen_mov_i64(regs
[r1
], outh
);
1916 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
1917 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1918 tcg_temp_free_i64(outh
);
1919 tcg_temp_free_i64(outl
);
1920 tcg_temp_free_i64(cc
);
1925 #ifndef CONFIG_USER_ONLY
1926 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1928 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1929 check_privileged(s
);
1930 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1931 tcg_temp_free_i32(r1
);
1937 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1939 TCGv_i64 t1
= tcg_temp_new_i64();
1940 TCGv_i32 t2
= tcg_temp_new_i32();
1941 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1942 gen_helper_cvd(t1
, t2
);
1943 tcg_temp_free_i32(t2
);
1944 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1945 tcg_temp_free_i64(t1
);
1949 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
1951 int m3
= get_field(s
->fields
, m3
);
1952 int lab
= gen_new_label();
1956 c
= tcg_invert_cond(ltgt_cond
[m3
]);
1957 if (s
->insn
->data
) {
1958 c
= tcg_unsigned_cond(c
);
1960 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
1962 /* Set DXC to 0xff. */
1963 t
= tcg_temp_new_i32();
1964 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1965 tcg_gen_ori_i32(t
, t
, 0xff00);
1966 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1967 tcg_temp_free_i32(t
);
1970 gen_program_exception(s
, PGM_DATA
);
1976 #ifndef CONFIG_USER_ONLY
1977 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
1981 check_privileged(s
);
1982 potential_page_fault(s
);
1984 /* We pretend the format is RX_a so that D2 is the field we want. */
1985 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
1986 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
1987 tcg_temp_free_i32(tmp
);
1992 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
1994 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1995 return_low128(o
->out
);
1999 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2001 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2002 return_low128(o
->out
);
2006 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2008 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2009 return_low128(o
->out
);
2013 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2015 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2016 return_low128(o
->out
);
2020 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2022 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2026 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2028 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2032 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2034 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2035 return_low128(o
->out2
);
2039 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2041 int r2
= get_field(s
->fields
, r2
);
2042 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2046 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2048 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2052 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2054 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2055 tb->flags, (ab)use the tb->cs_base field as the address of
2056 the template in memory, and grab 8 bits of tb->flags/cflags for
2057 the contents of the register. We would then recognize all this
2058 in gen_intermediate_code_internal, generating code for exactly
2059 one instruction. This new TB then gets executed normally.
2061 On the other hand, this seems to be mostly used for modifying
2062 MVC inside of memcpy, which needs a helper call anyway. So
2063 perhaps this doesn't bear thinking about any further. */
2070 tmp
= tcg_const_i64(s
->next_pc
);
2071 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2072 tcg_temp_free_i64(tmp
);
2078 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2080 /* We'll use the original input for cc computation, since we get to
2081 compare that against 0, which ought to be better than comparing
2082 the real output against 64. It also lets cc_dst be a convenient
2083 temporary during our computation. */
2084 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2086 /* R1 = IN ? CLZ(IN) : 64. */
2087 gen_helper_clz(o
->out
, o
->in2
);
2089 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2090 value by 64, which is undefined. But since the shift is 64 iff the
2091 input is zero, we still get the correct result after and'ing. */
2092 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2093 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2094 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2098 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2100 int m3
= get_field(s
->fields
, m3
);
2101 int pos
, len
, base
= s
->insn
->data
;
2102 TCGv_i64 tmp
= tcg_temp_new_i64();
2107 /* Effectively a 32-bit load. */
2108 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2115 /* Effectively a 16-bit load. */
2116 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2124 /* Effectively an 8-bit load. */
2125 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2130 pos
= base
+ ctz32(m3
) * 8;
2131 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2132 ccm
= ((1ull << len
) - 1) << pos
;
2136 /* This is going to be a sequence of loads and inserts. */
2137 pos
= base
+ 32 - 8;
2141 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2142 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2143 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2146 m3
= (m3
<< 1) & 0xf;
2152 tcg_gen_movi_i64(tmp
, ccm
);
2153 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2154 tcg_temp_free_i64(tmp
);
2158 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2160 int shift
= s
->insn
->data
& 0xff;
2161 int size
= s
->insn
->data
>> 8;
2162 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2166 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2171 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2173 t1
= tcg_temp_new_i64();
2174 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2175 tcg_gen_shri_i64(t1
, t1
, 36);
2176 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2178 tcg_gen_extu_i32_i64(t1
, cc_op
);
2179 tcg_gen_shli_i64(t1
, t1
, 28);
2180 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2181 tcg_temp_free_i64(t1
);
2185 #ifndef CONFIG_USER_ONLY
2186 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2188 check_privileged(s
);
2189 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2193 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2195 check_privileged(s
);
2196 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2201 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2203 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2207 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2209 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2213 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2215 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2219 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2221 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2225 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2227 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2228 return_low128(o
->out2
);
2232 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2234 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2235 return_low128(o
->out2
);
2239 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2241 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2245 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2247 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2251 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2253 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2257 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2259 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2263 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2265 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2269 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2271 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2275 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2277 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2281 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2283 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2287 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2291 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2294 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2298 TCGv_i32 t32
= tcg_temp_new_i32();
2301 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2304 t
= tcg_temp_new_i64();
2305 tcg_gen_extu_i32_i64(t
, t32
);
2306 tcg_temp_free_i32(t32
);
2308 z
= tcg_const_i64(0);
2309 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2310 tcg_temp_free_i64(t
);
2311 tcg_temp_free_i64(z
);
2317 #ifndef CONFIG_USER_ONLY
2318 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2320 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2321 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2322 check_privileged(s
);
2323 potential_page_fault(s
);
2324 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2325 tcg_temp_free_i32(r1
);
2326 tcg_temp_free_i32(r3
);
2330 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2332 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2333 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2334 check_privileged(s
);
2335 potential_page_fault(s
);
2336 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2337 tcg_temp_free_i32(r1
);
2338 tcg_temp_free_i32(r3
);
2341 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2343 check_privileged(s
);
2344 potential_page_fault(s
);
2345 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2350 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2354 check_privileged(s
);
2356 t1
= tcg_temp_new_i64();
2357 t2
= tcg_temp_new_i64();
2358 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2359 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2360 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2361 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2362 tcg_gen_shli_i64(t1
, t1
, 32);
2363 gen_helper_load_psw(cpu_env
, t1
, t2
);
2364 tcg_temp_free_i64(t1
);
2365 tcg_temp_free_i64(t2
);
2366 return EXIT_NORETURN
;
2369 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2373 check_privileged(s
);
2375 t1
= tcg_temp_new_i64();
2376 t2
= tcg_temp_new_i64();
2377 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2378 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2379 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2380 gen_helper_load_psw(cpu_env
, t1
, t2
);
2381 tcg_temp_free_i64(t1
);
2382 tcg_temp_free_i64(t2
);
2383 return EXIT_NORETURN
;
2387 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2389 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2390 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2391 potential_page_fault(s
);
2392 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2393 tcg_temp_free_i32(r1
);
2394 tcg_temp_free_i32(r3
);
2398 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2400 int r1
= get_field(s
->fields
, r1
);
2401 int r3
= get_field(s
->fields
, r3
);
2402 TCGv_i64 t
= tcg_temp_new_i64();
2403 TCGv_i64 t4
= tcg_const_i64(4);
2406 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2407 store_reg32_i64(r1
, t
);
2411 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2415 tcg_temp_free_i64(t
);
2416 tcg_temp_free_i64(t4
);
2420 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2422 int r1
= get_field(s
->fields
, r1
);
2423 int r3
= get_field(s
->fields
, r3
);
2424 TCGv_i64 t
= tcg_temp_new_i64();
2425 TCGv_i64 t4
= tcg_const_i64(4);
2428 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2429 store_reg32h_i64(r1
, t
);
2433 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2437 tcg_temp_free_i64(t
);
2438 tcg_temp_free_i64(t4
);
2442 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2444 int r1
= get_field(s
->fields
, r1
);
2445 int r3
= get_field(s
->fields
, r3
);
2446 TCGv_i64 t8
= tcg_const_i64(8);
2449 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2453 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2457 tcg_temp_free_i64(t8
);
2461 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2464 o
->g_out
= o
->g_in2
;
2465 TCGV_UNUSED_I64(o
->in2
);
2470 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2474 o
->g_out
= o
->g_in1
;
2475 o
->g_out2
= o
->g_in2
;
2476 TCGV_UNUSED_I64(o
->in1
);
2477 TCGV_UNUSED_I64(o
->in2
);
2478 o
->g_in1
= o
->g_in2
= false;
2482 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2484 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2485 potential_page_fault(s
);
2486 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2487 tcg_temp_free_i32(l
);
2491 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2493 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2494 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2495 potential_page_fault(s
);
2496 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2497 tcg_temp_free_i32(r1
);
2498 tcg_temp_free_i32(r2
);
2503 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2505 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2506 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2507 potential_page_fault(s
);
2508 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2509 tcg_temp_free_i32(r1
);
2510 tcg_temp_free_i32(r3
);
2515 #ifndef CONFIG_USER_ONLY
2516 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2518 int r1
= get_field(s
->fields
, l1
);
2519 check_privileged(s
);
2520 potential_page_fault(s
);
2521 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2526 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2528 int r1
= get_field(s
->fields
, l1
);
2529 check_privileged(s
);
2530 potential_page_fault(s
);
2531 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2537 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2539 potential_page_fault(s
);
2540 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2545 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2547 potential_page_fault(s
);
2548 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2550 return_low128(o
->in2
);
2554 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2556 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2560 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2562 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2566 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2568 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2572 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2574 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2578 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2580 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2584 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2586 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2587 return_low128(o
->out2
);
2591 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2593 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2594 return_low128(o
->out2
);
2598 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2600 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2601 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2602 tcg_temp_free_i64(r3
);
2606 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2608 int r3
= get_field(s
->fields
, r3
);
2609 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2613 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2615 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2616 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2617 tcg_temp_free_i64(r3
);
2621 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2623 int r3
= get_field(s
->fields
, r3
);
2624 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2628 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2630 gen_helper_nabs_i64(o
->out
, o
->in2
);
2634 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2636 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2640 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2642 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2646 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2648 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2649 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2653 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2655 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2656 potential_page_fault(s
);
2657 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2658 tcg_temp_free_i32(l
);
2663 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2665 tcg_gen_neg_i64(o
->out
, o
->in2
);
2669 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2671 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2675 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2677 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2681 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2683 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2684 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2688 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2690 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2691 potential_page_fault(s
);
2692 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2693 tcg_temp_free_i32(l
);
2698 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2700 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2704 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2706 int shift
= s
->insn
->data
& 0xff;
2707 int size
= s
->insn
->data
>> 8;
2708 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2711 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2712 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2714 /* Produce the CC from only the bits manipulated. */
2715 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2716 set_cc_nz_u64(s
, cc_dst
);
2720 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
2722 gen_helper_popcnt(o
->out
, o
->in2
);
2726 #ifndef CONFIG_USER_ONLY
2727 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2729 check_privileged(s
);
2730 gen_helper_ptlb(cpu_env
);
2735 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
2737 int i3
= get_field(s
->fields
, i3
);
2738 int i4
= get_field(s
->fields
, i4
);
2739 int i5
= get_field(s
->fields
, i5
);
2740 int do_zero
= i4
& 0x80;
2741 uint64_t mask
, imask
, pmask
;
2744 /* Adjust the arguments for the specific insn. */
2745 switch (s
->fields
->op2
) {
2746 case 0x55: /* risbg */
2751 case 0x5d: /* risbhg */
2754 pmask
= 0xffffffff00000000ull
;
2756 case 0x51: /* risblg */
2759 pmask
= 0x00000000ffffffffull
;
2765 /* MASK is the set of bits to be inserted from R2.
2766 Take care for I3/I4 wraparound. */
2769 mask
^= pmask
>> i4
>> 1;
2771 mask
|= ~(pmask
>> i4
>> 1);
2775 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2776 insns, we need to keep the other half of the register. */
2777 imask
= ~mask
| ~pmask
;
2779 if (s
->fields
->op2
== 0x55) {
2786 /* In some cases we can implement this with deposit, which can be more
2787 efficient on some hosts. */
2788 if (~mask
== imask
&& i3
<= i4
) {
2789 if (s
->fields
->op2
== 0x5d) {
2792 /* Note that we rotate the bits to be inserted to the lsb, not to
2793 the position as described in the PoO. */
2796 rot
= (i5
- pos
) & 63;
2802 /* Rotate the input as necessary. */
2803 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
2805 /* Insert the selected bits into the output. */
2807 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
2808 } else if (imask
== 0) {
2809 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
2811 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2812 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
2813 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2818 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
2820 int i3
= get_field(s
->fields
, i3
);
2821 int i4
= get_field(s
->fields
, i4
);
2822 int i5
= get_field(s
->fields
, i5
);
2825 /* If this is a test-only form, arrange to discard the result. */
2827 o
->out
= tcg_temp_new_i64();
2835 /* MASK is the set of bits to be operated on from R2.
2836 Take care for I3/I4 wraparound. */
2839 mask
^= ~0ull >> i4
>> 1;
2841 mask
|= ~(~0ull >> i4
>> 1);
2844 /* Rotate the input as necessary. */
2845 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
2848 switch (s
->fields
->op2
) {
2849 case 0x55: /* AND */
2850 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2851 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
2854 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2855 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2857 case 0x57: /* XOR */
2858 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2859 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
2866 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2867 set_cc_nz_u64(s
, cc_dst
);
2871 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2873 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2877 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2879 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2883 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2885 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2889 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2891 TCGv_i32 t1
= tcg_temp_new_i32();
2892 TCGv_i32 t2
= tcg_temp_new_i32();
2893 TCGv_i32 to
= tcg_temp_new_i32();
2894 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2895 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2896 tcg_gen_rotl_i32(to
, t1
, t2
);
2897 tcg_gen_extu_i32_i64(o
->out
, to
);
2898 tcg_temp_free_i32(t1
);
2899 tcg_temp_free_i32(t2
);
2900 tcg_temp_free_i32(to
);
2904 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2906 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2910 #ifndef CONFIG_USER_ONLY
2911 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
2913 check_privileged(s
);
2914 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
2919 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
2921 check_privileged(s
);
2922 gen_helper_sacf(cpu_env
, o
->in2
);
2923 /* Addressing mode has changed, so end the block. */
2924 return EXIT_PC_STALE
;
2928 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2930 int r1
= get_field(s
->fields
, r1
);
2931 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2935 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2937 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2941 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2943 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2947 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2949 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2950 return_low128(o
->out2
);
2954 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2956 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2960 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2962 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2966 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2968 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2969 return_low128(o
->out2
);
2973 #ifndef CONFIG_USER_ONLY
2974 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
2976 check_privileged(s
);
2977 potential_page_fault(s
);
2978 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
2983 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2985 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2986 check_privileged(s
);
2987 potential_page_fault(s
);
2988 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2989 tcg_temp_free_i32(r1
);
2994 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3000 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3002 lab
= gen_new_label();
3004 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3006 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3010 r1
= get_field(s
->fields
, r1
);
3011 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3012 if (s
->insn
->data
) {
3013 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3015 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3017 tcg_temp_free_i64(a
);
3023 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3025 uint64_t sign
= 1ull << s
->insn
->data
;
3026 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3027 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3028 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3029 /* The arithmetic left shift is curious in that it does not affect
3030 the sign bit. Copy that over from the source unchanged. */
3031 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3032 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3033 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3037 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3039 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3043 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3045 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3049 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3051 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3055 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3057 gen_helper_sfpc(cpu_env
, o
->in2
);
3061 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3063 gen_helper_sfas(cpu_env
, o
->in2
);
3067 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3069 int b2
= get_field(s
->fields
, b2
);
3070 int d2
= get_field(s
->fields
, d2
);
3071 TCGv_i64 t1
= tcg_temp_new_i64();
3072 TCGv_i64 t2
= tcg_temp_new_i64();
3075 switch (s
->fields
->op2
) {
3076 case 0x99: /* SRNM */
3079 case 0xb8: /* SRNMB */
3082 case 0xb9: /* SRNMT */
3088 mask
= (1 << len
) - 1;
3090 /* Insert the value into the appropriate field of the FPC. */
3092 tcg_gen_movi_i64(t1
, d2
& mask
);
3094 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3095 tcg_gen_andi_i64(t1
, t1
, mask
);
3097 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3098 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3099 tcg_temp_free_i64(t1
);
3101 /* Then install the new FPC to set the rounding mode in fpu_status. */
3102 gen_helper_sfpc(cpu_env
, t2
);
3103 tcg_temp_free_i64(t2
);
3107 #ifndef CONFIG_USER_ONLY
3108 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3110 check_privileged(s
);
3111 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3112 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3116 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3118 check_privileged(s
);
3119 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3123 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3125 check_privileged(s
);
3126 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3130 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3132 check_privileged(s
);
3133 /* ??? Surely cpu address != cpu number. In any case the previous
3134 version of this stored more than the required half-word, so it
3135 is unlikely this has ever been tested. */
3136 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3140 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3142 gen_helper_stck(o
->out
, cpu_env
);
3143 /* ??? We don't implement clock states. */
3144 gen_op_movi_cc(s
, 0);
3148 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3150 TCGv_i64 c1
= tcg_temp_new_i64();
3151 TCGv_i64 c2
= tcg_temp_new_i64();
3152 gen_helper_stck(c1
, cpu_env
);
3153 /* Shift the 64-bit value into its place as a zero-extended
3154 104-bit value. Note that "bit positions 64-103 are always
3155 non-zero so that they compare differently to STCK"; we set
3156 the least significant bit to 1. */
3157 tcg_gen_shli_i64(c2
, c1
, 56);
3158 tcg_gen_shri_i64(c1
, c1
, 8);
3159 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3160 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3161 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3162 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3163 tcg_temp_free_i64(c1
);
3164 tcg_temp_free_i64(c2
);
3165 /* ??? We don't implement clock states. */
3166 gen_op_movi_cc(s
, 0);
3170 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3172 check_privileged(s
);
3173 gen_helper_sckc(cpu_env
, o
->in2
);
3177 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3179 check_privileged(s
);
3180 gen_helper_stckc(o
->out
, cpu_env
);
3184 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3186 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3187 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3188 check_privileged(s
);
3189 potential_page_fault(s
);
3190 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3191 tcg_temp_free_i32(r1
);
3192 tcg_temp_free_i32(r3
);
3196 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3198 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3199 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3200 check_privileged(s
);
3201 potential_page_fault(s
);
3202 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3203 tcg_temp_free_i32(r1
);
3204 tcg_temp_free_i32(r3
);
3208 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3210 check_privileged(s
);
3211 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3215 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3217 check_privileged(s
);
3218 gen_helper_spt(cpu_env
, o
->in2
);
3222 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3225 /* We really ought to have more complete indication of facilities
3226 that we implement. Address this when STFLE is implemented. */
3227 check_privileged(s
);
3228 f
= tcg_const_i64(0xc0000000);
3229 a
= tcg_const_i64(200);
3230 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3231 tcg_temp_free_i64(f
);
3232 tcg_temp_free_i64(a
);
3236 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3238 check_privileged(s
);
3239 gen_helper_stpt(o
->out
, cpu_env
);
3243 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3245 check_privileged(s
);
3246 potential_page_fault(s
);
3247 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3252 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3254 check_privileged(s
);
3255 gen_helper_spx(cpu_env
, o
->in2
);
3259 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
3261 check_privileged(s
);
3262 /* Not operational. */
3263 gen_op_movi_cc(s
, 3);
3267 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3269 check_privileged(s
);
3270 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3271 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3275 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3277 uint64_t i2
= get_field(s
->fields
, i2
);
3280 check_privileged(s
);
3282 /* It is important to do what the instruction name says: STORE THEN.
3283 If we let the output hook perform the store then if we fault and
3284 restart, we'll have the wrong SYSTEM MASK in place. */
3285 t
= tcg_temp_new_i64();
3286 tcg_gen_shri_i64(t
, psw_mask
, 56);
3287 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3288 tcg_temp_free_i64(t
);
3290 if (s
->fields
->op
== 0xac) {
3291 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3292 (i2
<< 56) | 0x00ffffffffffffffull
);
3294 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3299 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3301 check_privileged(s
);
3302 potential_page_fault(s
);
3303 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3308 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3310 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3314 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3316 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3320 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3322 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3326 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3328 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3332 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3334 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3335 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3336 potential_page_fault(s
);
3337 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3338 tcg_temp_free_i32(r1
);
3339 tcg_temp_free_i32(r3
);
3343 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3345 int m3
= get_field(s
->fields
, m3
);
3346 int pos
, base
= s
->insn
->data
;
3347 TCGv_i64 tmp
= tcg_temp_new_i64();
3349 pos
= base
+ ctz32(m3
) * 8;
3352 /* Effectively a 32-bit store. */
3353 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3354 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3360 /* Effectively a 16-bit store. */
3361 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3362 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3369 /* Effectively an 8-bit store. */
3370 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3371 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3375 /* This is going to be a sequence of shifts and stores. */
3376 pos
= base
+ 32 - 8;
3379 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3380 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3381 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3383 m3
= (m3
<< 1) & 0xf;
3388 tcg_temp_free_i64(tmp
);
3392 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3394 int r1
= get_field(s
->fields
, r1
);
3395 int r3
= get_field(s
->fields
, r3
);
3396 int size
= s
->insn
->data
;
3397 TCGv_i64 tsize
= tcg_const_i64(size
);
3401 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3403 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3408 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3412 tcg_temp_free_i64(tsize
);
3416 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3418 int r1
= get_field(s
->fields
, r1
);
3419 int r3
= get_field(s
->fields
, r3
);
3420 TCGv_i64 t
= tcg_temp_new_i64();
3421 TCGv_i64 t4
= tcg_const_i64(4);
3422 TCGv_i64 t32
= tcg_const_i64(32);
3425 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3426 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3430 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3434 tcg_temp_free_i64(t
);
3435 tcg_temp_free_i64(t4
);
3436 tcg_temp_free_i64(t32
);
3440 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3442 potential_page_fault(s
);
3443 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3445 return_low128(o
->in2
);
3449 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3451 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3455 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3460 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3462 /* The !borrow flag is the msb of CC. Since we want the inverse of
3463 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3464 disas_jcc(s
, &cmp
, 8 | 4);
3465 borrow
= tcg_temp_new_i64();
3467 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3469 TCGv_i32 t
= tcg_temp_new_i32();
3470 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3471 tcg_gen_extu_i32_i64(borrow
, t
);
3472 tcg_temp_free_i32(t
);
3476 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3477 tcg_temp_free_i64(borrow
);
3481 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3488 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3489 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3490 tcg_temp_free_i32(t
);
3492 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3493 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3494 tcg_temp_free_i32(t
);
3496 gen_exception(EXCP_SVC
);
3497 return EXIT_NORETURN
;
3500 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3502 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3507 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3509 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3514 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3516 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3521 #ifndef CONFIG_USER_ONLY
3522 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3524 potential_page_fault(s
);
3525 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3531 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3533 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3534 potential_page_fault(s
);
3535 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3536 tcg_temp_free_i32(l
);
3541 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3543 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3544 potential_page_fault(s
);
3545 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3546 tcg_temp_free_i32(l
);
3550 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3552 int d1
= get_field(s
->fields
, d1
);
3553 int d2
= get_field(s
->fields
, d2
);
3554 int b1
= get_field(s
->fields
, b1
);
3555 int b2
= get_field(s
->fields
, b2
);
3556 int l
= get_field(s
->fields
, l1
);
3559 o
->addr1
= get_address(s
, 0, b1
, d1
);
3561 /* If the addresses are identical, this is a store/memset of zero. */
3562 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
3563 o
->in2
= tcg_const_i64(0);
3567 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
3570 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
3574 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
3577 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
3581 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
3584 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
3588 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
3590 gen_op_movi_cc(s
, 0);
3594 /* But in general we'll defer to a helper. */
3595 o
->in2
= get_address(s
, 0, b2
, d2
);
3596 t32
= tcg_const_i32(l
);
3597 potential_page_fault(s
);
3598 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
3599 tcg_temp_free_i32(t32
);
3604 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3606 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3610 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3612 int shift
= s
->insn
->data
& 0xff;
3613 int size
= s
->insn
->data
>> 8;
3614 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3617 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3618 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3620 /* Produce the CC from only the bits manipulated. */
3621 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3622 set_cc_nz_u64(s
, cc_dst
);
3626 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3628 o
->out
= tcg_const_i64(0);
3632 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3634 o
->out
= tcg_const_i64(0);
3640 /* ====================================================================== */
3641 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3642 the original inputs), update the various cc data structures in order to
3643 be able to compute the new condition code. */
3645 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3647 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3650 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3652 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3655 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3657 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3660 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3662 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3665 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3667 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3670 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3672 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3675 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3677 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3680 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3682 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3685 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3687 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3690 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3692 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3695 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3697 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3700 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3702 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3705 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3707 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3710 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3712 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3715 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3717 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3720 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3722 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3725 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3727 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3730 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3732 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3735 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3737 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3740 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3742 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3743 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3746 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3748 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3751 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3753 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3756 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3758 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3761 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3763 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3766 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3768 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3771 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3773 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3776 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3778 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3781 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3783 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3786 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3788 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3791 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3793 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3796 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3798 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3801 /* ====================================================================== */
3802 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3803 with the TCG register to which we will write. Used in combination with
3804 the "wout" generators, in some cases we need a new temporary, and in
3805 some cases we can write to a TCG global. */
3807 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3809 o
->out
= tcg_temp_new_i64();
3811 #define SPEC_prep_new 0
3813 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3815 o
->out
= tcg_temp_new_i64();
3816 o
->out2
= tcg_temp_new_i64();
3818 #define SPEC_prep_new_P 0
3820 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3822 o
->out
= regs
[get_field(f
, r1
)];
3825 #define SPEC_prep_r1 0
3827 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3829 int r1
= get_field(f
, r1
);
3831 o
->out2
= regs
[r1
+ 1];
3832 o
->g_out
= o
->g_out2
= true;
3834 #define SPEC_prep_r1_P SPEC_r1_even
3836 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3838 o
->out
= fregs
[get_field(f
, r1
)];
3841 #define SPEC_prep_f1 0
3843 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3845 int r1
= get_field(f
, r1
);
3847 o
->out2
= fregs
[r1
+ 2];
3848 o
->g_out
= o
->g_out2
= true;
3850 #define SPEC_prep_x1 SPEC_r1_f128
3852 /* ====================================================================== */
3853 /* The "Write OUTput" generators. These generally perform some non-trivial
3854 copy of data to TCG globals, or to main memory. The trivial cases are
3855 generally handled by having a "prep" generator install the TCG global
3856 as the destination of the operation. */
3858 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3860 store_reg(get_field(f
, r1
), o
->out
);
3862 #define SPEC_wout_r1 0
3864 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3866 int r1
= get_field(f
, r1
);
3867 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3869 #define SPEC_wout_r1_8 0
3871 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3873 int r1
= get_field(f
, r1
);
3874 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3876 #define SPEC_wout_r1_16 0
3878 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3880 store_reg32_i64(get_field(f
, r1
), o
->out
);
3882 #define SPEC_wout_r1_32 0
3884 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3886 int r1
= get_field(f
, r1
);
3887 store_reg32_i64(r1
, o
->out
);
3888 store_reg32_i64(r1
+ 1, o
->out2
);
3890 #define SPEC_wout_r1_P32 SPEC_r1_even
3892 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3894 int r1
= get_field(f
, r1
);
3895 store_reg32_i64(r1
+ 1, o
->out
);
3896 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3897 store_reg32_i64(r1
, o
->out
);
3899 #define SPEC_wout_r1_D32 SPEC_r1_even
3901 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3903 store_freg32_i64(get_field(f
, r1
), o
->out
);
3905 #define SPEC_wout_e1 0
3907 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3909 store_freg(get_field(f
, r1
), o
->out
);
3911 #define SPEC_wout_f1 0
3913 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3915 int f1
= get_field(s
->fields
, r1
);
3916 store_freg(f1
, o
->out
);
3917 store_freg(f1
+ 2, o
->out2
);
3919 #define SPEC_wout_x1 SPEC_r1_f128
3921 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3923 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3924 store_reg32_i64(get_field(f
, r1
), o
->out
);
3927 #define SPEC_wout_cond_r1r2_32 0
3929 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3931 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3932 store_freg32_i64(get_field(f
, r1
), o
->out
);
3935 #define SPEC_wout_cond_e1e2 0
3937 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3939 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3941 #define SPEC_wout_m1_8 0
3943 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3945 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3947 #define SPEC_wout_m1_16 0
3949 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3951 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3953 #define SPEC_wout_m1_32 0
3955 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3957 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3959 #define SPEC_wout_m1_64 0
3961 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3963 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3965 #define SPEC_wout_m2_32 0
3967 /* ====================================================================== */
3968 /* The "INput 1" generators. These load the first operand to an insn. */
3970 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3972 o
->in1
= load_reg(get_field(f
, r1
));
3974 #define SPEC_in1_r1 0
3976 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3978 o
->in1
= regs
[get_field(f
, r1
)];
3981 #define SPEC_in1_r1_o 0
3983 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3985 o
->in1
= tcg_temp_new_i64();
3986 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3988 #define SPEC_in1_r1_32s 0
3990 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3992 o
->in1
= tcg_temp_new_i64();
3993 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3995 #define SPEC_in1_r1_32u 0
3997 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3999 o
->in1
= tcg_temp_new_i64();
4000 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4002 #define SPEC_in1_r1_sr32 0
4004 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4006 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4008 #define SPEC_in1_r1p1 SPEC_r1_even
4010 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4012 o
->in1
= tcg_temp_new_i64();
4013 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4015 #define SPEC_in1_r1p1_32s SPEC_r1_even
4017 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4019 o
->in1
= tcg_temp_new_i64();
4020 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4022 #define SPEC_in1_r1p1_32u SPEC_r1_even
4024 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4026 int r1
= get_field(f
, r1
);
4027 o
->in1
= tcg_temp_new_i64();
4028 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4030 #define SPEC_in1_r1_D32 SPEC_r1_even
4032 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4034 o
->in1
= load_reg(get_field(f
, r2
));
4036 #define SPEC_in1_r2 0
4038 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4040 o
->in1
= load_reg(get_field(f
, r3
));
4042 #define SPEC_in1_r3 0
4044 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4046 o
->in1
= regs
[get_field(f
, r3
)];
4049 #define SPEC_in1_r3_o 0
4051 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4053 o
->in1
= tcg_temp_new_i64();
4054 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4056 #define SPEC_in1_r3_32s 0
4058 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4060 o
->in1
= tcg_temp_new_i64();
4061 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4063 #define SPEC_in1_r3_32u 0
4065 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4067 int r3
= get_field(f
, r3
);
4068 o
->in1
= tcg_temp_new_i64();
4069 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4071 #define SPEC_in1_r3_D32 SPEC_r3_even
4073 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4075 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4077 #define SPEC_in1_e1 0
4079 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4081 o
->in1
= fregs
[get_field(f
, r1
)];
4084 #define SPEC_in1_f1_o 0
4086 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4088 int r1
= get_field(f
, r1
);
4090 o
->out2
= fregs
[r1
+ 2];
4091 o
->g_out
= o
->g_out2
= true;
4093 #define SPEC_in1_x1_o SPEC_r1_f128
4095 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4097 o
->in1
= fregs
[get_field(f
, r3
)];
4100 #define SPEC_in1_f3_o 0
4102 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4104 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4106 #define SPEC_in1_la1 0
4108 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4110 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4111 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4113 #define SPEC_in1_la2 0
4115 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4118 o
->in1
= tcg_temp_new_i64();
4119 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4121 #define SPEC_in1_m1_8u 0
4123 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4126 o
->in1
= tcg_temp_new_i64();
4127 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4129 #define SPEC_in1_m1_16s 0
4131 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4134 o
->in1
= tcg_temp_new_i64();
4135 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4137 #define SPEC_in1_m1_16u 0
4139 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4142 o
->in1
= tcg_temp_new_i64();
4143 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4145 #define SPEC_in1_m1_32s 0
4147 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4150 o
->in1
= tcg_temp_new_i64();
4151 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4153 #define SPEC_in1_m1_32u 0
4155 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4158 o
->in1
= tcg_temp_new_i64();
4159 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4161 #define SPEC_in1_m1_64 0
4163 /* ====================================================================== */
4164 /* The "INput 2" generators. These load the second operand to an insn. */
4166 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4168 o
->in2
= regs
[get_field(f
, r1
)];
4171 #define SPEC_in2_r1_o 0
4173 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4175 o
->in2
= tcg_temp_new_i64();
4176 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4178 #define SPEC_in2_r1_16u 0
4180 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4182 o
->in2
= tcg_temp_new_i64();
4183 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4185 #define SPEC_in2_r1_32u 0
4187 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4189 int r1
= get_field(f
, r1
);
4190 o
->in2
= tcg_temp_new_i64();
4191 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4193 #define SPEC_in2_r1_D32 SPEC_r1_even
4195 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4197 o
->in2
= load_reg(get_field(f
, r2
));
4199 #define SPEC_in2_r2 0
4201 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4203 o
->in2
= regs
[get_field(f
, r2
)];
4206 #define SPEC_in2_r2_o 0
4208 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4210 int r2
= get_field(f
, r2
);
4212 o
->in2
= load_reg(r2
);
4215 #define SPEC_in2_r2_nz 0
4217 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4219 o
->in2
= tcg_temp_new_i64();
4220 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4222 #define SPEC_in2_r2_8s 0
4224 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4226 o
->in2
= tcg_temp_new_i64();
4227 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4229 #define SPEC_in2_r2_8u 0
4231 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4233 o
->in2
= tcg_temp_new_i64();
4234 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4236 #define SPEC_in2_r2_16s 0
4238 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4240 o
->in2
= tcg_temp_new_i64();
4241 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4243 #define SPEC_in2_r2_16u 0
4245 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4247 o
->in2
= load_reg(get_field(f
, r3
));
4249 #define SPEC_in2_r3 0
4251 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4253 o
->in2
= tcg_temp_new_i64();
4254 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4256 #define SPEC_in2_r2_32s 0
4258 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4260 o
->in2
= tcg_temp_new_i64();
4261 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4263 #define SPEC_in2_r2_32u 0
4265 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4267 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4269 #define SPEC_in2_e2 0
4271 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4273 o
->in2
= fregs
[get_field(f
, r2
)];
4276 #define SPEC_in2_f2_o 0
4278 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4280 int r2
= get_field(f
, r2
);
4282 o
->in2
= fregs
[r2
+ 2];
4283 o
->g_in1
= o
->g_in2
= true;
4285 #define SPEC_in2_x2_o SPEC_r2_f128
4287 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4289 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4291 #define SPEC_in2_ra2 0
4293 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4295 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4296 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4298 #define SPEC_in2_a2 0
4300 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4302 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4304 #define SPEC_in2_ri2 0
4306 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4308 help_l2_shift(s
, f
, o
, 31);
4310 #define SPEC_in2_sh32 0
4312 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4314 help_l2_shift(s
, f
, o
, 63);
4316 #define SPEC_in2_sh64 0
4318 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4321 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4323 #define SPEC_in2_m2_8u 0
4325 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4328 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4330 #define SPEC_in2_m2_16s 0
4332 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4335 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4337 #define SPEC_in2_m2_16u 0
4339 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4342 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4344 #define SPEC_in2_m2_32s 0
4346 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4349 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4351 #define SPEC_in2_m2_32u 0
4353 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4356 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4358 #define SPEC_in2_m2_64 0
4360 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4363 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4365 #define SPEC_in2_mri2_16u 0
4367 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4370 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4372 #define SPEC_in2_mri2_32s 0
4374 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4377 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4379 #define SPEC_in2_mri2_32u 0
4381 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4384 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4386 #define SPEC_in2_mri2_64 0
4388 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4390 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4392 #define SPEC_in2_i2 0
4394 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4396 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4398 #define SPEC_in2_i2_8u 0
4400 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4402 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4404 #define SPEC_in2_i2_16u 0
4406 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4408 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4410 #define SPEC_in2_i2_32u 0
4412 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4414 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4415 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4417 #define SPEC_in2_i2_16u_shl 0
4419 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4421 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4422 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4424 #define SPEC_in2_i2_32u_shl 0
4426 /* ====================================================================== */
4428 /* Find opc within the table of insns. This is formulated as a switch
4429 statement so that (1) we get compile-time notice of cut-paste errors
4430 for duplicated opcodes, and (2) the compiler generates the binary
4431 search tree, rather than us having to post-process the table. */
4433 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4434 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4436 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4438 enum DisasInsnEnum
{
4439 #include "insn-data.def"
4443 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4447 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4449 .help_in1 = in1_##I1, \
4450 .help_in2 = in2_##I2, \
4451 .help_prep = prep_##P, \
4452 .help_wout = wout_##W, \
4453 .help_cout = cout_##CC, \
4454 .help_op = op_##OP, \
4458 /* Allow 0 to be used for NULL in the table below. */
4466 #define SPEC_in1_0 0
4467 #define SPEC_in2_0 0
4468 #define SPEC_prep_0 0
4469 #define SPEC_wout_0 0
4471 static const DisasInsn insn_info
[] = {
4472 #include "insn-data.def"
4476 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4477 case OPC: return &insn_info[insn_ ## NM];
4479 static const DisasInsn
*lookup_opc(uint16_t opc
)
4482 #include "insn-data.def"
4491 /* Extract a field from the insn. The INSN should be left-aligned in
4492 the uint64_t so that we can more easily utilize the big-bit-endian
4493 definitions we extract from the Principals of Operation. */
4495 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4503 /* Zero extract the field from the insn. */
4504 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4506 /* Sign-extend, or un-swap the field as necessary. */
4508 case 0: /* unsigned */
4510 case 1: /* signed */
4511 assert(f
->size
<= 32);
4512 m
= 1u << (f
->size
- 1);
4515 case 2: /* dl+dh split, signed 20 bit. */
4516 r
= ((int8_t)r
<< 12) | (r
>> 8);
4522 /* Validate that the "compressed" encoding we selected above is valid.
4523 I.e. we havn't make two different original fields overlap. */
4524 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4525 o
->presentC
|= 1 << f
->indexC
;
4526 o
->presentO
|= 1 << f
->indexO
;
4528 o
->c
[f
->indexC
] = r
;
4531 /* Lookup the insn at the current PC, extracting the operands into O and
4532 returning the info struct for the insn. Returns NULL for invalid insn. */
4534 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4537 uint64_t insn
, pc
= s
->pc
;
4539 const DisasInsn
*info
;
4541 insn
= ld_code2(env
, pc
);
4542 op
= (insn
>> 8) & 0xff;
4543 ilen
= get_ilen(op
);
4544 s
->next_pc
= s
->pc
+ ilen
;
4551 insn
= ld_code4(env
, pc
) << 32;
4554 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4560 /* We can't actually determine the insn format until we've looked up
4561 the full insn opcode. Which we can't do without locating the
4562 secondary opcode. Assume by default that OP2 is at bit 40; for
4563 those smaller insns that don't actually have a secondary opcode
4564 this will correctly result in OP2 = 0. */
4570 case 0xb2: /* S, RRF, RRE */
4571 case 0xb3: /* RRE, RRD, RRF */
4572 case 0xb9: /* RRE, RRF */
4573 case 0xe5: /* SSE, SIL */
4574 op2
= (insn
<< 8) >> 56;
4578 case 0xc0: /* RIL */
4579 case 0xc2: /* RIL */
4580 case 0xc4: /* RIL */
4581 case 0xc6: /* RIL */
4582 case 0xc8: /* SSF */
4583 case 0xcc: /* RIL */
4584 op2
= (insn
<< 12) >> 60;
4586 case 0xd0 ... 0xdf: /* SS */
4592 case 0xee ... 0xf3: /* SS */
4593 case 0xf8 ... 0xfd: /* SS */
4597 op2
= (insn
<< 40) >> 56;
4601 memset(f
, 0, sizeof(*f
));
4605 /* Lookup the instruction. */
4606 info
= lookup_opc(op
<< 8 | op2
);
4608 /* If we found it, extract the operands. */
4610 DisasFormat fmt
= info
->fmt
;
4613 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4614 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4620 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4622 const DisasInsn
*insn
;
4623 ExitStatus ret
= NO_EXIT
;
4627 /* Search for the insn in the table. */
4628 insn
= extract_insn(env
, s
, &f
);
4630 /* Not found means unimplemented/illegal opcode. */
4632 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
4634 gen_illegal_opcode(s
);
4635 return EXIT_NORETURN
;
4638 /* Check for insn specification exceptions. */
4640 int spec
= insn
->spec
, excp
= 0, r
;
4642 if (spec
& SPEC_r1_even
) {
4643 r
= get_field(&f
, r1
);
4645 excp
= PGM_SPECIFICATION
;
4648 if (spec
& SPEC_r2_even
) {
4649 r
= get_field(&f
, r2
);
4651 excp
= PGM_SPECIFICATION
;
4654 if (spec
& SPEC_r3_even
) {
4655 r
= get_field(&f
, r3
);
4657 excp
= PGM_SPECIFICATION
;
4660 if (spec
& SPEC_r1_f128
) {
4661 r
= get_field(&f
, r1
);
4663 excp
= PGM_SPECIFICATION
;
4666 if (spec
& SPEC_r2_f128
) {
4667 r
= get_field(&f
, r2
);
4669 excp
= PGM_SPECIFICATION
;
4673 gen_program_exception(s
, excp
);
4674 return EXIT_NORETURN
;
4678 /* Set up the strutures we use to communicate with the helpers. */
4681 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4682 TCGV_UNUSED_I64(o
.out
);
4683 TCGV_UNUSED_I64(o
.out2
);
4684 TCGV_UNUSED_I64(o
.in1
);
4685 TCGV_UNUSED_I64(o
.in2
);
4686 TCGV_UNUSED_I64(o
.addr1
);
4688 /* Implement the instruction. */
4689 if (insn
->help_in1
) {
4690 insn
->help_in1(s
, &f
, &o
);
4692 if (insn
->help_in2
) {
4693 insn
->help_in2(s
, &f
, &o
);
4695 if (insn
->help_prep
) {
4696 insn
->help_prep(s
, &f
, &o
);
4698 if (insn
->help_op
) {
4699 ret
= insn
->help_op(s
, &o
);
4701 if (insn
->help_wout
) {
4702 insn
->help_wout(s
, &f
, &o
);
4704 if (insn
->help_cout
) {
4705 insn
->help_cout(s
, &o
);
4708 /* Free any temporaries created by the helpers. */
4709 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4710 tcg_temp_free_i64(o
.out
);
4712 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4713 tcg_temp_free_i64(o
.out2
);
4715 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4716 tcg_temp_free_i64(o
.in1
);
4718 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4719 tcg_temp_free_i64(o
.in2
);
4721 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4722 tcg_temp_free_i64(o
.addr1
);
4725 /* Advance to the next instruction. */
4730 static inline void gen_intermediate_code_internal(S390CPU
*cpu
,
4731 TranslationBlock
*tb
,
4734 CPUState
*cs
= CPU(cpu
);
4735 CPUS390XState
*env
= &cpu
->env
;
4737 target_ulong pc_start
;
4738 uint64_t next_page_start
;
4739 uint16_t *gen_opc_end
;
4741 int num_insns
, max_insns
;
4749 if (!(tb
->flags
& FLAG_MASK_64
)) {
4750 pc_start
&= 0x7fffffff;
4755 dc
.cc_op
= CC_OP_DYNAMIC
;
4756 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
4758 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4760 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4763 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4764 if (max_insns
== 0) {
4765 max_insns
= CF_COUNT_MASK
;
4772 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4776 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4779 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4780 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4781 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4782 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4784 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4788 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4789 tcg_gen_debug_insn_start(dc
.pc
);
4793 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
4794 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
4795 if (bp
->pc
== dc
.pc
) {
4796 status
= EXIT_PC_STALE
;
4802 if (status
== NO_EXIT
) {
4803 status
= translate_one(env
, &dc
);
4806 /* If we reach a page boundary, are single stepping,
4807 or exhaust instruction count, stop generation. */
4808 if (status
== NO_EXIT
4809 && (dc
.pc
>= next_page_start
4810 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4811 || num_insns
>= max_insns
4813 || cs
->singlestep_enabled
)) {
4814 status
= EXIT_PC_STALE
;
4816 } while (status
== NO_EXIT
);
4818 if (tb
->cflags
& CF_LAST_IO
) {
4827 update_psw_addr(&dc
);
4829 case EXIT_PC_UPDATED
:
4830 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4831 cc op type is in env */
4833 /* Exit the TB, either by raising a debug exception or by return. */
4835 gen_exception(EXCP_DEBUG
);
4844 gen_tb_end(tb
, num_insns
);
4845 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4847 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4850 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4853 tb
->size
= dc
.pc
- pc_start
;
4854 tb
->icount
= num_insns
;
4857 #if defined(S390X_DEBUG_DISAS)
4858 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4859 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4860 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4866 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4868 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, false);
4871 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4873 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, true);
4876 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4879 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4880 cc_op
= gen_opc_cc_op
[pc_pos
];
4881 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {