4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env
;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext
;
50 typedef struct DisasInsn DisasInsn
;
51 typedef struct DisasFields DisasFields
;
54 struct TranslationBlock
*tb
;
55 const DisasInsn
*insn
;
59 bool singlestep_enabled
;
62 /* Information carried about a condition to be evaluated. */
69 struct { TCGv_i64 a
, b
; } s64
;
70 struct { TCGv_i32 a
, b
; } s32
;
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit
[CC_OP_MAX
];
78 static uint64_t inline_branch_miss
[CC_OP_MAX
];
81 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
83 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
84 if (s
->tb
->flags
& FLAG_MASK_32
) {
85 return pc
| 0x80000000;
91 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
94 S390CPU
*cpu
= S390_CPU(cs
);
95 CPUS390XState
*env
= &cpu
->env
;
99 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
100 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
103 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
106 for (i
= 0; i
< 16; i
++) {
107 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
109 cpu_fprintf(f
, "\n");
115 for (i
= 0; i
< 16; i
++) {
116 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
118 cpu_fprintf(f
, "\n");
124 #ifndef CONFIG_USER_ONLY
125 for (i
= 0; i
< 16; i
++) {
126 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
128 cpu_fprintf(f
, "\n");
135 #ifdef DEBUG_INLINE_BRANCHES
136 for (i
= 0; i
< CC_OP_MAX
; i
++) {
137 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
138 inline_branch_miss
[i
], inline_branch_hit
[i
]);
142 cpu_fprintf(f
, "\n");
145 static TCGv_i64 psw_addr
;
146 static TCGv_i64 psw_mask
;
148 static TCGv_i32 cc_op
;
149 static TCGv_i64 cc_src
;
150 static TCGv_i64 cc_dst
;
151 static TCGv_i64 cc_vr
;
153 static char cpu_reg_names
[32][4];
154 static TCGv_i64 regs
[16];
155 static TCGv_i64 fregs
[16];
157 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
159 void s390x_translate_init(void)
163 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
164 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
165 offsetof(CPUS390XState
, psw
.addr
),
167 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
168 offsetof(CPUS390XState
, psw
.mask
),
171 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
173 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
175 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
177 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
180 for (i
= 0; i
< 16; i
++) {
181 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
182 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
183 offsetof(CPUS390XState
, regs
[i
]),
187 for (i
= 0; i
< 16; i
++) {
188 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
189 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
190 offsetof(CPUS390XState
, fregs
[i
].d
),
191 cpu_reg_names
[i
+ 16]);
195 static TCGv_i64
load_reg(int reg
)
197 TCGv_i64 r
= tcg_temp_new_i64();
198 tcg_gen_mov_i64(r
, regs
[reg
]);
202 static TCGv_i64
load_freg32_i64(int reg
)
204 TCGv_i64 r
= tcg_temp_new_i64();
205 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
209 static void store_reg(int reg
, TCGv_i64 v
)
211 tcg_gen_mov_i64(regs
[reg
], v
);
214 static void store_freg(int reg
, TCGv_i64 v
)
216 tcg_gen_mov_i64(fregs
[reg
], v
);
219 static void store_reg32_i64(int reg
, TCGv_i64 v
)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
225 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
227 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
230 static void store_freg32_i64(int reg
, TCGv_i64 v
)
232 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
235 static void return_low128(TCGv_i64 dest
)
237 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
240 static void update_psw_addr(DisasContext
*s
)
243 tcg_gen_movi_i64(psw_addr
, s
->pc
);
246 static void update_cc_op(DisasContext
*s
)
248 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
249 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
253 static void potential_page_fault(DisasContext
*s
)
259 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
261 return (uint64_t)cpu_lduw_code(env
, pc
);
264 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
269 static int get_mem_index(DisasContext
*s
)
271 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
272 case PSW_ASC_PRIMARY
>> 32:
274 case PSW_ASC_SECONDARY
>> 32:
276 case PSW_ASC_HOME
>> 32:
284 static void gen_exception(int excp
)
286 TCGv_i32 tmp
= tcg_const_i32(excp
);
287 gen_helper_exception(cpu_env
, tmp
);
288 tcg_temp_free_i32(tmp
);
291 static void gen_program_exception(DisasContext
*s
, int code
)
295 /* Remember what pgm exeption this was. */
296 tmp
= tcg_const_i32(code
);
297 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
298 tcg_temp_free_i32(tmp
);
300 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
301 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
302 tcg_temp_free_i32(tmp
);
304 /* Advance past instruction. */
311 /* Trigger exception. */
312 gen_exception(EXCP_PGM
);
315 static inline void gen_illegal_opcode(DisasContext
*s
)
317 gen_program_exception(s
, PGM_SPECIFICATION
);
320 static inline void check_privileged(DisasContext
*s
)
322 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
323 gen_program_exception(s
, PGM_PRIVILEGED
);
327 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
329 TCGv_i64 tmp
= tcg_temp_new_i64();
330 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
332 /* Note that d2 is limited to 20 bits, signed. If we crop negative
333 displacements early we create larger immedate addends. */
335 /* Note that addi optimizes the imm==0 case. */
337 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
338 tcg_gen_addi_i64(tmp
, tmp
, d2
);
340 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
342 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
348 tcg_gen_movi_i64(tmp
, d2
);
351 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
357 static inline bool live_cc_data(DisasContext
*s
)
359 return (s
->cc_op
!= CC_OP_DYNAMIC
360 && s
->cc_op
!= CC_OP_STATIC
364 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
366 if (live_cc_data(s
)) {
367 tcg_gen_discard_i64(cc_src
);
368 tcg_gen_discard_i64(cc_dst
);
369 tcg_gen_discard_i64(cc_vr
);
371 s
->cc_op
= CC_OP_CONST0
+ val
;
374 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
376 if (live_cc_data(s
)) {
377 tcg_gen_discard_i64(cc_src
);
378 tcg_gen_discard_i64(cc_vr
);
380 tcg_gen_mov_i64(cc_dst
, dst
);
384 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
387 if (live_cc_data(s
)) {
388 tcg_gen_discard_i64(cc_vr
);
390 tcg_gen_mov_i64(cc_src
, src
);
391 tcg_gen_mov_i64(cc_dst
, dst
);
395 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
396 TCGv_i64 dst
, TCGv_i64 vr
)
398 tcg_gen_mov_i64(cc_src
, src
);
399 tcg_gen_mov_i64(cc_dst
, dst
);
400 tcg_gen_mov_i64(cc_vr
, vr
);
404 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
406 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
409 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
411 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
414 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
416 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
419 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
421 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
424 /* CC value is in env->cc_op */
425 static void set_cc_static(DisasContext
*s
)
427 if (live_cc_data(s
)) {
428 tcg_gen_discard_i64(cc_src
);
429 tcg_gen_discard_i64(cc_dst
);
430 tcg_gen_discard_i64(cc_vr
);
432 s
->cc_op
= CC_OP_STATIC
;
435 /* calculates cc into cc_op */
436 static void gen_op_calc_cc(DisasContext
*s
)
438 TCGv_i32 local_cc_op
;
441 TCGV_UNUSED_I32(local_cc_op
);
442 TCGV_UNUSED_I64(dummy
);
445 dummy
= tcg_const_i64(0);
459 local_cc_op
= tcg_const_i32(s
->cc_op
);
475 /* s->cc_op is the cc value */
476 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
479 /* env->cc_op already is the cc value */
494 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
499 case CC_OP_LTUGTU_32
:
500 case CC_OP_LTUGTU_64
:
507 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
522 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
525 /* unknown operation - assume 3 arguments and cc_op in env */
526 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
532 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
533 tcg_temp_free_i32(local_cc_op
);
535 if (!TCGV_IS_UNUSED_I64(dummy
)) {
536 tcg_temp_free_i64(dummy
);
539 /* We now have cc in cc_op as constant */
543 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
545 /* NOTE: we handle the case where the TB spans two pages here */
546 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
547 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
548 && !s
->singlestep_enabled
549 && !(s
->tb
->cflags
& CF_LAST_IO
));
552 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
554 #ifdef DEBUG_INLINE_BRANCHES
555 inline_branch_miss
[cc_op
]++;
559 static void account_inline_branch(DisasContext
*s
, int cc_op
)
561 #ifdef DEBUG_INLINE_BRANCHES
562 inline_branch_hit
[cc_op
]++;
566 /* Table of mask values to comparison codes, given a comparison as input.
567 For such, CC=3 should not be possible. */
568 static const TCGCond ltgt_cond
[16] = {
569 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
570 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
571 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
572 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
573 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
574 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
575 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
576 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
579 /* Table of mask values to comparison codes, given a logic op as input.
580 For such, only CC=0 and CC=1 should be possible. */
581 static const TCGCond nz_cond
[16] = {
582 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
583 TCG_COND_NEVER
, TCG_COND_NEVER
,
584 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
585 TCG_COND_NE
, TCG_COND_NE
,
586 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
587 TCG_COND_EQ
, TCG_COND_EQ
,
588 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
589 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
592 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
593 details required to generate a TCG comparison. */
594 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
597 enum cc_op old_cc_op
= s
->cc_op
;
599 if (mask
== 15 || mask
== 0) {
600 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
603 c
->g1
= c
->g2
= true;
608 /* Find the TCG condition for the mask + cc op. */
614 cond
= ltgt_cond
[mask
];
615 if (cond
== TCG_COND_NEVER
) {
618 account_inline_branch(s
, old_cc_op
);
621 case CC_OP_LTUGTU_32
:
622 case CC_OP_LTUGTU_64
:
623 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
624 if (cond
== TCG_COND_NEVER
) {
627 account_inline_branch(s
, old_cc_op
);
631 cond
= nz_cond
[mask
];
632 if (cond
== TCG_COND_NEVER
) {
635 account_inline_branch(s
, old_cc_op
);
650 account_inline_branch(s
, old_cc_op
);
665 account_inline_branch(s
, old_cc_op
);
669 switch (mask
& 0xa) {
670 case 8: /* src == 0 -> no one bit found */
673 case 2: /* src != 0 -> one bit found */
679 account_inline_branch(s
, old_cc_op
);
685 case 8 | 2: /* vr == 0 */
688 case 4 | 1: /* vr != 0 */
691 case 8 | 4: /* no carry -> vr >= src */
694 case 2 | 1: /* carry -> vr < src */
700 account_inline_branch(s
, old_cc_op
);
705 /* Note that CC=0 is impossible; treat it as dont-care. */
707 case 2: /* zero -> op1 == op2 */
710 case 4 | 1: /* !zero -> op1 != op2 */
713 case 4: /* borrow (!carry) -> op1 < op2 */
716 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
722 account_inline_branch(s
, old_cc_op
);
727 /* Calculate cc value. */
732 /* Jump based on CC. We'll load up the real cond below;
733 the assignment here merely avoids a compiler warning. */
734 account_noninline_branch(s
, old_cc_op
);
735 old_cc_op
= CC_OP_STATIC
;
736 cond
= TCG_COND_NEVER
;
740 /* Load up the arguments of the comparison. */
742 c
->g1
= c
->g2
= false;
746 c
->u
.s32
.a
= tcg_temp_new_i32();
747 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
748 c
->u
.s32
.b
= tcg_const_i32(0);
751 case CC_OP_LTUGTU_32
:
754 c
->u
.s32
.a
= tcg_temp_new_i32();
755 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
756 c
->u
.s32
.b
= tcg_temp_new_i32();
757 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
764 c
->u
.s64
.b
= tcg_const_i64(0);
768 case CC_OP_LTUGTU_64
:
772 c
->g1
= c
->g2
= true;
778 c
->u
.s64
.a
= tcg_temp_new_i64();
779 c
->u
.s64
.b
= tcg_const_i64(0);
780 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
785 c
->u
.s32
.a
= tcg_temp_new_i32();
786 c
->u
.s32
.b
= tcg_temp_new_i32();
787 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_vr
);
788 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
789 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
791 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_src
);
798 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
799 c
->u
.s64
.b
= tcg_const_i64(0);
811 case 0x8 | 0x4 | 0x2: /* cc != 3 */
813 c
->u
.s32
.b
= tcg_const_i32(3);
815 case 0x8 | 0x4 | 0x1: /* cc != 2 */
817 c
->u
.s32
.b
= tcg_const_i32(2);
819 case 0x8 | 0x2 | 0x1: /* cc != 1 */
821 c
->u
.s32
.b
= tcg_const_i32(1);
823 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
826 c
->u
.s32
.a
= tcg_temp_new_i32();
827 c
->u
.s32
.b
= tcg_const_i32(0);
828 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
830 case 0x8 | 0x4: /* cc < 2 */
832 c
->u
.s32
.b
= tcg_const_i32(2);
834 case 0x8: /* cc == 0 */
836 c
->u
.s32
.b
= tcg_const_i32(0);
838 case 0x4 | 0x2 | 0x1: /* cc != 0 */
840 c
->u
.s32
.b
= tcg_const_i32(0);
842 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
845 c
->u
.s32
.a
= tcg_temp_new_i32();
846 c
->u
.s32
.b
= tcg_const_i32(0);
847 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
849 case 0x4: /* cc == 1 */
851 c
->u
.s32
.b
= tcg_const_i32(1);
853 case 0x2 | 0x1: /* cc > 1 */
855 c
->u
.s32
.b
= tcg_const_i32(1);
857 case 0x2: /* cc == 2 */
859 c
->u
.s32
.b
= tcg_const_i32(2);
861 case 0x1: /* cc == 3 */
863 c
->u
.s32
.b
= tcg_const_i32(3);
866 /* CC is masked by something else: (8 >> cc) & mask. */
869 c
->u
.s32
.a
= tcg_const_i32(8);
870 c
->u
.s32
.b
= tcg_const_i32(0);
871 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
872 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
883 static void free_compare(DisasCompare
*c
)
887 tcg_temp_free_i64(c
->u
.s64
.a
);
889 tcg_temp_free_i32(c
->u
.s32
.a
);
894 tcg_temp_free_i64(c
->u
.s64
.b
);
896 tcg_temp_free_i32(c
->u
.s32
.b
);
901 /* ====================================================================== */
902 /* Define the insn format enumeration. */
903 #define F0(N) FMT_##N,
904 #define F1(N, X1) F0(N)
905 #define F2(N, X1, X2) F0(N)
906 #define F3(N, X1, X2, X3) F0(N)
907 #define F4(N, X1, X2, X3, X4) F0(N)
908 #define F5(N, X1, X2, X3, X4, X5) F0(N)
911 #include "insn-format.def"
921 /* Define a structure to hold the decoded fields. We'll store each inside
922 an array indexed by an enum. In order to conserve memory, we'll arrange
923 for fields that do not exist at the same time to overlap, thus the "C"
924 for compact. For checking purposes there is an "O" for original index
925 as well that will be applied to availability bitmaps. */
927 enum DisasFieldIndexO
{
950 enum DisasFieldIndexC
{
984 unsigned presentC
:16;
985 unsigned int presentO
;
989 /* This is the way fields are to be accessed out of DisasFields. */
990 #define have_field(S, F) have_field1((S), FLD_O_##F)
991 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
993 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
995 return (f
->presentO
>> c
) & 1;
998 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
999 enum DisasFieldIndexC c
)
1001 assert(have_field1(f
, o
));
1005 /* Describe the layout of each field in each format. */
1006 typedef struct DisasField
{
1008 unsigned int size
:8;
1009 unsigned int type
:2;
1010 unsigned int indexC
:6;
1011 enum DisasFieldIndexO indexO
:8;
1014 typedef struct DisasFormatInfo
{
1015 DisasField op
[NUM_C_FIELD
];
1018 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1019 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1020 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1022 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1023 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1024 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1025 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1027 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1029 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1030 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1031 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1033 #define F0(N) { { } },
1034 #define F1(N, X1) { { X1 } },
1035 #define F2(N, X1, X2) { { X1, X2 } },
1036 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1037 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1038 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1040 static const DisasFormatInfo format_info
[] = {
1041 #include "insn-format.def"
1059 /* Generally, we'll extract operands into this structures, operate upon
1060 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1061 of routines below for more details. */
1063 bool g_out
, g_out2
, g_in1
, g_in2
;
1064 TCGv_i64 out
, out2
, in1
, in2
;
1068 /* Instructions can place constraints on their operands, raising specification
1069 exceptions if they are violated. To make this easy to automate, each "in1",
1070 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1071 of the following, or 0. To make this easy to document, we'll put the
1072 SPEC_<name> defines next to <name>. */
1074 #define SPEC_r1_even 1
1075 #define SPEC_r2_even 2
1076 #define SPEC_r3_even 4
1077 #define SPEC_r1_f128 8
1078 #define SPEC_r2_f128 16
1080 /* Return values from translate_one, indicating the state of the TB. */
1082 /* Continue the TB. */
1084 /* We have emitted one or more goto_tb. No fixup required. */
1086 /* We are not using a goto_tb (for whatever reason), but have updated
1087 the PC (for whatever reason), so there's no need to do it again on
1090 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1091 updated the PC for the next instruction to be executed. */
1093 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1094 No following code will be executed. */
1098 typedef enum DisasFacility
{
1099 FAC_Z
, /* zarch (default) */
1100 FAC_CASS
, /* compare and swap and store */
1101 FAC_CASS2
, /* compare and swap and store 2*/
1102 FAC_DFP
, /* decimal floating point */
1103 FAC_DFPR
, /* decimal floating point rounding */
1104 FAC_DO
, /* distinct operands */
1105 FAC_EE
, /* execute extensions */
1106 FAC_EI
, /* extended immediate */
1107 FAC_FPE
, /* floating point extension */
1108 FAC_FPSSH
, /* floating point support sign handling */
1109 FAC_FPRGR
, /* FPR-GR transfer */
1110 FAC_GIE
, /* general instructions extension */
1111 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1112 FAC_HW
, /* high-word */
1113 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1114 FAC_LOC
, /* load/store on condition */
1115 FAC_LD
, /* long displacement */
1116 FAC_PC
, /* population count */
1117 FAC_SCF
, /* store clock fast */
1118 FAC_SFLE
, /* store facility list extended */
1124 DisasFacility fac
:8;
1129 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1130 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1131 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1132 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1133 void (*help_cout
)(DisasContext
*, DisasOps
*);
1134 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1139 /* ====================================================================== */
1140 /* Miscellaneous helpers, used by several operations. */
1142 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1143 DisasOps
*o
, int mask
)
1145 int b2
= get_field(f
, b2
);
1146 int d2
= get_field(f
, d2
);
1149 o
->in2
= tcg_const_i64(d2
& mask
);
1151 o
->in2
= get_address(s
, 0, b2
, d2
);
1152 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1156 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1158 if (dest
== s
->next_pc
) {
1161 if (use_goto_tb(s
, dest
)) {
1164 tcg_gen_movi_i64(psw_addr
, dest
);
1165 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1166 return EXIT_GOTO_TB
;
1168 tcg_gen_movi_i64(psw_addr
, dest
);
1169 return EXIT_PC_UPDATED
;
1173 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1174 bool is_imm
, int imm
, TCGv_i64 cdest
)
1177 uint64_t dest
= s
->pc
+ 2 * imm
;
1180 /* Take care of the special cases first. */
1181 if (c
->cond
== TCG_COND_NEVER
) {
1186 if (dest
== s
->next_pc
) {
1187 /* Branch to next. */
1191 if (c
->cond
== TCG_COND_ALWAYS
) {
1192 ret
= help_goto_direct(s
, dest
);
1196 if (TCGV_IS_UNUSED_I64(cdest
)) {
1197 /* E.g. bcr %r0 -> no branch. */
1201 if (c
->cond
== TCG_COND_ALWAYS
) {
1202 tcg_gen_mov_i64(psw_addr
, cdest
);
1203 ret
= EXIT_PC_UPDATED
;
1208 if (use_goto_tb(s
, s
->next_pc
)) {
1209 if (is_imm
&& use_goto_tb(s
, dest
)) {
1210 /* Both exits can use goto_tb. */
1213 lab
= gen_new_label();
1215 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1217 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1220 /* Branch not taken. */
1222 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1223 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1228 tcg_gen_movi_i64(psw_addr
, dest
);
1229 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1233 /* Fallthru can use goto_tb, but taken branch cannot. */
1234 /* Store taken branch destination before the brcond. This
1235 avoids having to allocate a new local temp to hold it.
1236 We'll overwrite this in the not taken case anyway. */
1238 tcg_gen_mov_i64(psw_addr
, cdest
);
1241 lab
= gen_new_label();
1243 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1245 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1248 /* Branch not taken. */
1251 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1252 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1256 tcg_gen_movi_i64(psw_addr
, dest
);
1258 ret
= EXIT_PC_UPDATED
;
1261 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1262 Most commonly we're single-stepping or some other condition that
1263 disables all use of goto_tb. Just update the PC and exit. */
1265 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1267 cdest
= tcg_const_i64(dest
);
1271 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1274 TCGv_i32 t0
= tcg_temp_new_i32();
1275 TCGv_i64 t1
= tcg_temp_new_i64();
1276 TCGv_i64 z
= tcg_const_i64(0);
1277 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1278 tcg_gen_extu_i32_i64(t1
, t0
);
1279 tcg_temp_free_i32(t0
);
1280 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1281 tcg_temp_free_i64(t1
);
1282 tcg_temp_free_i64(z
);
1286 tcg_temp_free_i64(cdest
);
1288 tcg_temp_free_i64(next
);
1290 ret
= EXIT_PC_UPDATED
;
1298 /* ====================================================================== */
1299 /* The operations. These perform the bulk of the work for any insn,
1300 usually after the operands have been loaded and output initialized. */
1302 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1304 gen_helper_abs_i64(o
->out
, o
->in2
);
1308 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1310 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1314 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1316 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1320 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1322 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1323 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1327 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1329 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1333 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1338 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1340 /* The carry flag is the msb of CC, therefore the branch mask that would
1341 create that comparison is 3. Feeding the generated comparison to
1342 setcond produces the carry flag that we desire. */
1343 disas_jcc(s
, &cmp
, 3);
1344 carry
= tcg_temp_new_i64();
1346 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1348 TCGv_i32 t
= tcg_temp_new_i32();
1349 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1350 tcg_gen_extu_i32_i64(carry
, t
);
1351 tcg_temp_free_i32(t
);
1355 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1356 tcg_temp_free_i64(carry
);
1360 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1362 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1366 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1368 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1372 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1374 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1375 return_low128(o
->out2
);
1379 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1381 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1385 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1387 int shift
= s
->insn
->data
& 0xff;
1388 int size
= s
->insn
->data
>> 8;
1389 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1392 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1393 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1394 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1396 /* Produce the CC from only the bits manipulated. */
1397 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1398 set_cc_nz_u64(s
, cc_dst
);
1402 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1404 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1405 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1406 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1407 return EXIT_PC_UPDATED
;
1413 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1415 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1416 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1419 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1421 int m1
= get_field(s
->fields
, m1
);
1422 bool is_imm
= have_field(s
->fields
, i2
);
1423 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1426 disas_jcc(s
, &c
, m1
);
1427 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1430 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1432 int r1
= get_field(s
->fields
, r1
);
1433 bool is_imm
= have_field(s
->fields
, i2
);
1434 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1438 c
.cond
= TCG_COND_NE
;
1443 t
= tcg_temp_new_i64();
1444 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1445 store_reg32_i64(r1
, t
);
1446 c
.u
.s32
.a
= tcg_temp_new_i32();
1447 c
.u
.s32
.b
= tcg_const_i32(0);
1448 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1449 tcg_temp_free_i64(t
);
1451 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1454 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1456 int r1
= get_field(s
->fields
, r1
);
1457 bool is_imm
= have_field(s
->fields
, i2
);
1458 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1461 c
.cond
= TCG_COND_NE
;
1466 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1467 c
.u
.s64
.a
= regs
[r1
];
1468 c
.u
.s64
.b
= tcg_const_i64(0);
1470 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1473 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1475 int r1
= get_field(s
->fields
, r1
);
1476 int r3
= get_field(s
->fields
, r3
);
1477 bool is_imm
= have_field(s
->fields
, i2
);
1478 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1482 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1487 t
= tcg_temp_new_i64();
1488 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1489 c
.u
.s32
.a
= tcg_temp_new_i32();
1490 c
.u
.s32
.b
= tcg_temp_new_i32();
1491 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1492 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1493 store_reg32_i64(r1
, t
);
1494 tcg_temp_free_i64(t
);
1496 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1499 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1501 int r1
= get_field(s
->fields
, r1
);
1502 int r3
= get_field(s
->fields
, r3
);
1503 bool is_imm
= have_field(s
->fields
, i2
);
1504 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1507 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1510 if (r1
== (r3
| 1)) {
1511 c
.u
.s64
.b
= load_reg(r3
| 1);
1514 c
.u
.s64
.b
= regs
[r3
| 1];
1518 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1519 c
.u
.s64
.a
= regs
[r1
];
1522 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1525 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1527 int imm
, m3
= get_field(s
->fields
, m3
);
1531 c
.cond
= ltgt_cond
[m3
];
1532 if (s
->insn
->data
) {
1533 c
.cond
= tcg_unsigned_cond(c
.cond
);
1535 c
.is_64
= c
.g1
= c
.g2
= true;
1539 is_imm
= have_field(s
->fields
, i4
);
1541 imm
= get_field(s
->fields
, i4
);
1544 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1545 get_field(s
->fields
, d4
));
1548 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1551 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1553 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1558 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1560 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1565 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1567 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1572 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1574 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1575 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1576 tcg_temp_free_i32(m3
);
1577 gen_set_cc_nz_f32(s
, o
->in2
);
1581 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1583 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1584 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1585 tcg_temp_free_i32(m3
);
1586 gen_set_cc_nz_f64(s
, o
->in2
);
1590 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1592 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1593 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1594 tcg_temp_free_i32(m3
);
1595 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1599 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1601 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1602 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1603 tcg_temp_free_i32(m3
);
1604 gen_set_cc_nz_f32(s
, o
->in2
);
1608 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1610 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1611 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1612 tcg_temp_free_i32(m3
);
1613 gen_set_cc_nz_f64(s
, o
->in2
);
1617 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1619 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1620 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1621 tcg_temp_free_i32(m3
);
1622 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1626 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1628 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1629 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1630 tcg_temp_free_i32(m3
);
1631 gen_set_cc_nz_f32(s
, o
->in2
);
1635 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1637 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1638 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1639 tcg_temp_free_i32(m3
);
1640 gen_set_cc_nz_f64(s
, o
->in2
);
1644 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1646 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1647 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1648 tcg_temp_free_i32(m3
);
1649 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1653 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1655 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1656 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1657 tcg_temp_free_i32(m3
);
1658 gen_set_cc_nz_f32(s
, o
->in2
);
1662 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1664 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1665 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1666 tcg_temp_free_i32(m3
);
1667 gen_set_cc_nz_f64(s
, o
->in2
);
1671 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1673 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1674 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1675 tcg_temp_free_i32(m3
);
1676 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1680 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1682 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1683 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1684 tcg_temp_free_i32(m3
);
1688 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1690 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1691 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1692 tcg_temp_free_i32(m3
);
1696 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1698 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1699 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1700 tcg_temp_free_i32(m3
);
1701 return_low128(o
->out2
);
1705 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1707 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1708 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1709 tcg_temp_free_i32(m3
);
1713 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1715 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1716 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1717 tcg_temp_free_i32(m3
);
1721 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1723 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1724 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1725 tcg_temp_free_i32(m3
);
1726 return_low128(o
->out2
);
1730 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1732 int r2
= get_field(s
->fields
, r2
);
1733 TCGv_i64 len
= tcg_temp_new_i64();
1735 potential_page_fault(s
);
1736 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1738 return_low128(o
->out
);
1740 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1741 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1742 tcg_temp_free_i64(len
);
1747 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1749 int l
= get_field(s
->fields
, l1
);
1754 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1755 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1758 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1759 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1762 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1763 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1766 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1767 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1770 potential_page_fault(s
);
1771 vl
= tcg_const_i32(l
);
1772 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1773 tcg_temp_free_i32(vl
);
1777 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1781 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1783 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1784 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1785 potential_page_fault(s
);
1786 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1787 tcg_temp_free_i32(r1
);
1788 tcg_temp_free_i32(r3
);
1793 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1795 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1796 TCGv_i32 t1
= tcg_temp_new_i32();
1797 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1798 potential_page_fault(s
);
1799 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1801 tcg_temp_free_i32(t1
);
1802 tcg_temp_free_i32(m3
);
1806 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1808 potential_page_fault(s
);
1809 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1811 return_low128(o
->in2
);
1815 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1817 TCGv_i64 t
= tcg_temp_new_i64();
1818 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1819 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1820 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1821 tcg_temp_free_i64(t
);
1825 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1827 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1828 int d2
= get_field(s
->fields
, d2
);
1829 int b2
= get_field(s
->fields
, b2
);
1830 int is_64
= s
->insn
->data
;
1831 TCGv_i64 addr
, mem
, cc
, z
;
1833 /* Note that in1 = R3 (new value) and
1834 in2 = (zero-extended) R1 (expected value). */
1836 /* Load the memory into the (temporary) output. While the PoO only talks
1837 about moving the memory to R1 on inequality, if we include equality it
1838 means that R1 is equal to the memory in all conditions. */
1839 addr
= get_address(s
, 0, b2
, d2
);
1841 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1843 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1846 /* Are the memory and expected values (un)equal? Note that this setcond
1847 produces the output CC value, thus the NE sense of the test. */
1848 cc
= tcg_temp_new_i64();
1849 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1851 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1852 Recall that we are allowed to unconditionally issue the store (and
1853 thus any possible write trap), so (re-)store the original contents
1854 of MEM in case of inequality. */
1855 z
= tcg_const_i64(0);
1856 mem
= tcg_temp_new_i64();
1857 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1859 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1861 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1863 tcg_temp_free_i64(z
);
1864 tcg_temp_free_i64(mem
);
1865 tcg_temp_free_i64(addr
);
1867 /* Store CC back to cc_op. Wait until after the store so that any
1868 exception gets the old cc_op value. */
1869 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1870 tcg_temp_free_i64(cc
);
1875 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1877 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1878 int r1
= get_field(s
->fields
, r1
);
1879 int r3
= get_field(s
->fields
, r3
);
1880 int d2
= get_field(s
->fields
, d2
);
1881 int b2
= get_field(s
->fields
, b2
);
1882 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
1884 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1886 addrh
= get_address(s
, 0, b2
, d2
);
1887 addrl
= get_address(s
, 0, b2
, d2
+ 8);
1888 outh
= tcg_temp_new_i64();
1889 outl
= tcg_temp_new_i64();
1891 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
1892 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
1894 /* Fold the double-word compare with arithmetic. */
1895 cc
= tcg_temp_new_i64();
1896 z
= tcg_temp_new_i64();
1897 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
1898 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
1899 tcg_gen_or_i64(cc
, cc
, z
);
1900 tcg_gen_movi_i64(z
, 0);
1901 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
1903 memh
= tcg_temp_new_i64();
1904 meml
= tcg_temp_new_i64();
1905 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
1906 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
1907 tcg_temp_free_i64(z
);
1909 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
1910 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
1911 tcg_temp_free_i64(memh
);
1912 tcg_temp_free_i64(meml
);
1913 tcg_temp_free_i64(addrh
);
1914 tcg_temp_free_i64(addrl
);
1916 /* Save back state now that we've passed all exceptions. */
1917 tcg_gen_mov_i64(regs
[r1
], outh
);
1918 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
1919 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1920 tcg_temp_free_i64(outh
);
1921 tcg_temp_free_i64(outl
);
1922 tcg_temp_free_i64(cc
);
1927 #ifndef CONFIG_USER_ONLY
1928 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1930 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1931 check_privileged(s
);
1932 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1933 tcg_temp_free_i32(r1
);
1939 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1941 TCGv_i64 t1
= tcg_temp_new_i64();
1942 TCGv_i32 t2
= tcg_temp_new_i32();
1943 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1944 gen_helper_cvd(t1
, t2
);
1945 tcg_temp_free_i32(t2
);
1946 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1947 tcg_temp_free_i64(t1
);
1951 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
1953 int m3
= get_field(s
->fields
, m3
);
1954 int lab
= gen_new_label();
1958 c
= tcg_invert_cond(ltgt_cond
[m3
]);
1959 if (s
->insn
->data
) {
1960 c
= tcg_unsigned_cond(c
);
1962 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
1964 /* Set DXC to 0xff. */
1965 t
= tcg_temp_new_i32();
1966 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1967 tcg_gen_ori_i32(t
, t
, 0xff00);
1968 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1969 tcg_temp_free_i32(t
);
1972 gen_program_exception(s
, PGM_DATA
);
1978 #ifndef CONFIG_USER_ONLY
1979 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
1983 check_privileged(s
);
1984 potential_page_fault(s
);
1986 /* We pretend the format is RX_a so that D2 is the field we want. */
1987 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
1988 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
1989 tcg_temp_free_i32(tmp
);
1994 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
1996 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1997 return_low128(o
->out
);
2001 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2003 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2004 return_low128(o
->out
);
2008 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2010 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2011 return_low128(o
->out
);
2015 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2017 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2018 return_low128(o
->out
);
2022 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2024 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2028 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2030 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2034 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2036 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2037 return_low128(o
->out2
);
2041 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2043 int r2
= get_field(s
->fields
, r2
);
2044 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2048 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2050 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2054 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2056 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2057 tb->flags, (ab)use the tb->cs_base field as the address of
2058 the template in memory, and grab 8 bits of tb->flags/cflags for
2059 the contents of the register. We would then recognize all this
2060 in gen_intermediate_code_internal, generating code for exactly
2061 one instruction. This new TB then gets executed normally.
2063 On the other hand, this seems to be mostly used for modifying
2064 MVC inside of memcpy, which needs a helper call anyway. So
2065 perhaps this doesn't bear thinking about any further. */
2072 tmp
= tcg_const_i64(s
->next_pc
);
2073 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2074 tcg_temp_free_i64(tmp
);
2080 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2082 /* We'll use the original input for cc computation, since we get to
2083 compare that against 0, which ought to be better than comparing
2084 the real output against 64. It also lets cc_dst be a convenient
2085 temporary during our computation. */
2086 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2088 /* R1 = IN ? CLZ(IN) : 64. */
2089 gen_helper_clz(o
->out
, o
->in2
);
2091 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2092 value by 64, which is undefined. But since the shift is 64 iff the
2093 input is zero, we still get the correct result after and'ing. */
2094 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2095 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2096 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2100 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2102 int m3
= get_field(s
->fields
, m3
);
2103 int pos
, len
, base
= s
->insn
->data
;
2104 TCGv_i64 tmp
= tcg_temp_new_i64();
2109 /* Effectively a 32-bit load. */
2110 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2117 /* Effectively a 16-bit load. */
2118 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2126 /* Effectively an 8-bit load. */
2127 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2132 pos
= base
+ ctz32(m3
) * 8;
2133 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2134 ccm
= ((1ull << len
) - 1) << pos
;
2138 /* This is going to be a sequence of loads and inserts. */
2139 pos
= base
+ 32 - 8;
2143 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2144 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2145 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2148 m3
= (m3
<< 1) & 0xf;
2154 tcg_gen_movi_i64(tmp
, ccm
);
2155 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2156 tcg_temp_free_i64(tmp
);
2160 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2162 int shift
= s
->insn
->data
& 0xff;
2163 int size
= s
->insn
->data
>> 8;
2164 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2168 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2173 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2175 t1
= tcg_temp_new_i64();
2176 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2177 tcg_gen_shri_i64(t1
, t1
, 36);
2178 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2180 tcg_gen_extu_i32_i64(t1
, cc_op
);
2181 tcg_gen_shli_i64(t1
, t1
, 28);
2182 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2183 tcg_temp_free_i64(t1
);
2187 #ifndef CONFIG_USER_ONLY
2188 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2190 check_privileged(s
);
2191 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2195 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2197 check_privileged(s
);
2198 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2203 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2205 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2209 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2211 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2215 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2217 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2221 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2223 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2227 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2229 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2230 return_low128(o
->out2
);
2234 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2236 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2237 return_low128(o
->out2
);
2241 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2243 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2247 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2249 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2253 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2255 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2259 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2261 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2265 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2267 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2271 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2273 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2277 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2279 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2283 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2285 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2289 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2293 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2296 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2300 TCGv_i32 t32
= tcg_temp_new_i32();
2303 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2306 t
= tcg_temp_new_i64();
2307 tcg_gen_extu_i32_i64(t
, t32
);
2308 tcg_temp_free_i32(t32
);
2310 z
= tcg_const_i64(0);
2311 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2312 tcg_temp_free_i64(t
);
2313 tcg_temp_free_i64(z
);
2319 #ifndef CONFIG_USER_ONLY
2320 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2322 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2323 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2324 check_privileged(s
);
2325 potential_page_fault(s
);
2326 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2327 tcg_temp_free_i32(r1
);
2328 tcg_temp_free_i32(r3
);
2332 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2334 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2335 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2336 check_privileged(s
);
2337 potential_page_fault(s
);
2338 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2339 tcg_temp_free_i32(r1
);
2340 tcg_temp_free_i32(r3
);
2343 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2345 check_privileged(s
);
2346 potential_page_fault(s
);
2347 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2352 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2356 check_privileged(s
);
2358 t1
= tcg_temp_new_i64();
2359 t2
= tcg_temp_new_i64();
2360 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2361 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2362 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2363 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2364 tcg_gen_shli_i64(t1
, t1
, 32);
2365 gen_helper_load_psw(cpu_env
, t1
, t2
);
2366 tcg_temp_free_i64(t1
);
2367 tcg_temp_free_i64(t2
);
2368 return EXIT_NORETURN
;
2371 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2375 check_privileged(s
);
2377 t1
= tcg_temp_new_i64();
2378 t2
= tcg_temp_new_i64();
2379 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2380 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2381 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2382 gen_helper_load_psw(cpu_env
, t1
, t2
);
2383 tcg_temp_free_i64(t1
);
2384 tcg_temp_free_i64(t2
);
2385 return EXIT_NORETURN
;
2389 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2391 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2392 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2393 potential_page_fault(s
);
2394 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2395 tcg_temp_free_i32(r1
);
2396 tcg_temp_free_i32(r3
);
2400 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2402 int r1
= get_field(s
->fields
, r1
);
2403 int r3
= get_field(s
->fields
, r3
);
2404 TCGv_i64 t
= tcg_temp_new_i64();
2405 TCGv_i64 t4
= tcg_const_i64(4);
2408 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2409 store_reg32_i64(r1
, t
);
2413 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2417 tcg_temp_free_i64(t
);
2418 tcg_temp_free_i64(t4
);
2422 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2424 int r1
= get_field(s
->fields
, r1
);
2425 int r3
= get_field(s
->fields
, r3
);
2426 TCGv_i64 t
= tcg_temp_new_i64();
2427 TCGv_i64 t4
= tcg_const_i64(4);
2430 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2431 store_reg32h_i64(r1
, t
);
2435 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2439 tcg_temp_free_i64(t
);
2440 tcg_temp_free_i64(t4
);
2444 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2446 int r1
= get_field(s
->fields
, r1
);
2447 int r3
= get_field(s
->fields
, r3
);
2448 TCGv_i64 t8
= tcg_const_i64(8);
2451 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2455 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2459 tcg_temp_free_i64(t8
);
2463 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2466 o
->g_out
= o
->g_in2
;
2467 TCGV_UNUSED_I64(o
->in2
);
2472 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2476 o
->g_out
= o
->g_in1
;
2477 o
->g_out2
= o
->g_in2
;
2478 TCGV_UNUSED_I64(o
->in1
);
2479 TCGV_UNUSED_I64(o
->in2
);
2480 o
->g_in1
= o
->g_in2
= false;
2484 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2486 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2487 potential_page_fault(s
);
2488 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2489 tcg_temp_free_i32(l
);
2493 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2495 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2496 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2497 potential_page_fault(s
);
2498 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2499 tcg_temp_free_i32(r1
);
2500 tcg_temp_free_i32(r2
);
2505 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2507 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2508 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2509 potential_page_fault(s
);
2510 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2511 tcg_temp_free_i32(r1
);
2512 tcg_temp_free_i32(r3
);
2517 #ifndef CONFIG_USER_ONLY
2518 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2520 int r1
= get_field(s
->fields
, l1
);
2521 check_privileged(s
);
2522 potential_page_fault(s
);
2523 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2528 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2530 int r1
= get_field(s
->fields
, l1
);
2531 check_privileged(s
);
2532 potential_page_fault(s
);
2533 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2539 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2541 potential_page_fault(s
);
2542 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2547 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2549 potential_page_fault(s
);
2550 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2552 return_low128(o
->in2
);
2556 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2558 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2562 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2564 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2568 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2570 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2574 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2576 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2580 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2582 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2586 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2588 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2589 return_low128(o
->out2
);
2593 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2595 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2596 return_low128(o
->out2
);
2600 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2602 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2603 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2604 tcg_temp_free_i64(r3
);
2608 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2610 int r3
= get_field(s
->fields
, r3
);
2611 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2615 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2617 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2618 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2619 tcg_temp_free_i64(r3
);
2623 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2625 int r3
= get_field(s
->fields
, r3
);
2626 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2630 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2632 gen_helper_nabs_i64(o
->out
, o
->in2
);
2636 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2638 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2642 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2644 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2648 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2650 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2651 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2655 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2657 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2658 potential_page_fault(s
);
2659 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2660 tcg_temp_free_i32(l
);
2665 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2667 tcg_gen_neg_i64(o
->out
, o
->in2
);
2671 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2673 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2677 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2679 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2683 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2685 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2686 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2690 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2692 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2693 potential_page_fault(s
);
2694 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2695 tcg_temp_free_i32(l
);
2700 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2702 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2706 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2708 int shift
= s
->insn
->data
& 0xff;
2709 int size
= s
->insn
->data
>> 8;
2710 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2713 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2714 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2716 /* Produce the CC from only the bits manipulated. */
2717 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2718 set_cc_nz_u64(s
, cc_dst
);
2722 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
2724 gen_helper_popcnt(o
->out
, o
->in2
);
2728 #ifndef CONFIG_USER_ONLY
2729 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2731 check_privileged(s
);
2732 gen_helper_ptlb(cpu_env
);
2737 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
2739 int i3
= get_field(s
->fields
, i3
);
2740 int i4
= get_field(s
->fields
, i4
);
2741 int i5
= get_field(s
->fields
, i5
);
2742 int do_zero
= i4
& 0x80;
2743 uint64_t mask
, imask
, pmask
;
2746 /* Adjust the arguments for the specific insn. */
2747 switch (s
->fields
->op2
) {
2748 case 0x55: /* risbg */
2753 case 0x5d: /* risbhg */
2756 pmask
= 0xffffffff00000000ull
;
2758 case 0x51: /* risblg */
2761 pmask
= 0x00000000ffffffffull
;
2767 /* MASK is the set of bits to be inserted from R2.
2768 Take care for I3/I4 wraparound. */
2771 mask
^= pmask
>> i4
>> 1;
2773 mask
|= ~(pmask
>> i4
>> 1);
2777 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2778 insns, we need to keep the other half of the register. */
2779 imask
= ~mask
| ~pmask
;
2781 if (s
->fields
->op2
== 0x55) {
2788 /* In some cases we can implement this with deposit, which can be more
2789 efficient on some hosts. */
2790 if (~mask
== imask
&& i3
<= i4
) {
2791 if (s
->fields
->op2
== 0x5d) {
2794 /* Note that we rotate the bits to be inserted to the lsb, not to
2795 the position as described in the PoO. */
2798 rot
= (i5
- pos
) & 63;
2804 /* Rotate the input as necessary. */
2805 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
2807 /* Insert the selected bits into the output. */
2809 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
2810 } else if (imask
== 0) {
2811 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
2813 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2814 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
2815 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2820 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
2822 int i3
= get_field(s
->fields
, i3
);
2823 int i4
= get_field(s
->fields
, i4
);
2824 int i5
= get_field(s
->fields
, i5
);
2827 /* If this is a test-only form, arrange to discard the result. */
2829 o
->out
= tcg_temp_new_i64();
2837 /* MASK is the set of bits to be operated on from R2.
2838 Take care for I3/I4 wraparound. */
2841 mask
^= ~0ull >> i4
>> 1;
2843 mask
|= ~(~0ull >> i4
>> 1);
2846 /* Rotate the input as necessary. */
2847 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
2850 switch (s
->fields
->op2
) {
2851 case 0x55: /* AND */
2852 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2853 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
2856 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2857 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2859 case 0x57: /* XOR */
2860 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2861 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
2868 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2869 set_cc_nz_u64(s
, cc_dst
);
2873 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2875 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2879 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2881 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2885 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2887 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2891 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2893 TCGv_i32 t1
= tcg_temp_new_i32();
2894 TCGv_i32 t2
= tcg_temp_new_i32();
2895 TCGv_i32 to
= tcg_temp_new_i32();
2896 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2897 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2898 tcg_gen_rotl_i32(to
, t1
, t2
);
2899 tcg_gen_extu_i32_i64(o
->out
, to
);
2900 tcg_temp_free_i32(t1
);
2901 tcg_temp_free_i32(t2
);
2902 tcg_temp_free_i32(to
);
2906 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2908 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2912 #ifndef CONFIG_USER_ONLY
2913 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
2915 check_privileged(s
);
2916 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
2921 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
2923 check_privileged(s
);
2924 gen_helper_sacf(cpu_env
, o
->in2
);
2925 /* Addressing mode has changed, so end the block. */
2926 return EXIT_PC_STALE
;
2930 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2932 int r1
= get_field(s
->fields
, r1
);
2933 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2937 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2939 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2943 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2945 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2949 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2951 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2952 return_low128(o
->out2
);
2956 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2958 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2962 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2964 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2968 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2970 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2971 return_low128(o
->out2
);
2975 #ifndef CONFIG_USER_ONLY
2976 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
2978 check_privileged(s
);
2979 potential_page_fault(s
);
2980 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
2985 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2987 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2988 check_privileged(s
);
2989 potential_page_fault(s
);
2990 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2991 tcg_temp_free_i32(r1
);
2996 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3002 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3004 lab
= gen_new_label();
3006 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3008 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3012 r1
= get_field(s
->fields
, r1
);
3013 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3014 if (s
->insn
->data
) {
3015 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3017 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3019 tcg_temp_free_i64(a
);
3025 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3027 uint64_t sign
= 1ull << s
->insn
->data
;
3028 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3029 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3030 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3031 /* The arithmetic left shift is curious in that it does not affect
3032 the sign bit. Copy that over from the source unchanged. */
3033 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3034 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3035 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3039 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3041 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3045 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3047 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3051 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3053 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3057 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3059 gen_helper_sfpc(cpu_env
, o
->in2
);
3063 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3065 gen_helper_sfas(cpu_env
, o
->in2
);
3069 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3071 int b2
= get_field(s
->fields
, b2
);
3072 int d2
= get_field(s
->fields
, d2
);
3073 TCGv_i64 t1
= tcg_temp_new_i64();
3074 TCGv_i64 t2
= tcg_temp_new_i64();
3077 switch (s
->fields
->op2
) {
3078 case 0x99: /* SRNM */
3081 case 0xb8: /* SRNMB */
3084 case 0xb9: /* SRNMT */
3090 mask
= (1 << len
) - 1;
3092 /* Insert the value into the appropriate field of the FPC. */
3094 tcg_gen_movi_i64(t1
, d2
& mask
);
3096 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3097 tcg_gen_andi_i64(t1
, t1
, mask
);
3099 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3100 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3101 tcg_temp_free_i64(t1
);
3103 /* Then install the new FPC to set the rounding mode in fpu_status. */
3104 gen_helper_sfpc(cpu_env
, t2
);
3105 tcg_temp_free_i64(t2
);
3109 #ifndef CONFIG_USER_ONLY
3110 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3112 check_privileged(s
);
3113 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3114 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3118 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3120 check_privileged(s
);
3121 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3125 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3127 check_privileged(s
);
3128 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3132 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3134 check_privileged(s
);
3135 /* ??? Surely cpu address != cpu number. In any case the previous
3136 version of this stored more than the required half-word, so it
3137 is unlikely this has ever been tested. */
3138 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3142 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3144 gen_helper_stck(o
->out
, cpu_env
);
3145 /* ??? We don't implement clock states. */
3146 gen_op_movi_cc(s
, 0);
3150 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3152 TCGv_i64 c1
= tcg_temp_new_i64();
3153 TCGv_i64 c2
= tcg_temp_new_i64();
3154 gen_helper_stck(c1
, cpu_env
);
3155 /* Shift the 64-bit value into its place as a zero-extended
3156 104-bit value. Note that "bit positions 64-103 are always
3157 non-zero so that they compare differently to STCK"; we set
3158 the least significant bit to 1. */
3159 tcg_gen_shli_i64(c2
, c1
, 56);
3160 tcg_gen_shri_i64(c1
, c1
, 8);
3161 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3162 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3163 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3164 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3165 tcg_temp_free_i64(c1
);
3166 tcg_temp_free_i64(c2
);
3167 /* ??? We don't implement clock states. */
3168 gen_op_movi_cc(s
, 0);
3172 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3174 check_privileged(s
);
3175 gen_helper_sckc(cpu_env
, o
->in2
);
3179 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3181 check_privileged(s
);
3182 gen_helper_stckc(o
->out
, cpu_env
);
3186 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3188 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3189 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3190 check_privileged(s
);
3191 potential_page_fault(s
);
3192 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3193 tcg_temp_free_i32(r1
);
3194 tcg_temp_free_i32(r3
);
3198 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3200 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3201 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3202 check_privileged(s
);
3203 potential_page_fault(s
);
3204 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3205 tcg_temp_free_i32(r1
);
3206 tcg_temp_free_i32(r3
);
3210 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3212 check_privileged(s
);
3213 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3217 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3219 check_privileged(s
);
3220 gen_helper_spt(cpu_env
, o
->in2
);
3224 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3227 /* We really ought to have more complete indication of facilities
3228 that we implement. Address this when STFLE is implemented. */
3229 check_privileged(s
);
3230 f
= tcg_const_i64(0xc0000000);
3231 a
= tcg_const_i64(200);
3232 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3233 tcg_temp_free_i64(f
);
3234 tcg_temp_free_i64(a
);
3238 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3240 check_privileged(s
);
3241 gen_helper_stpt(o
->out
, cpu_env
);
3245 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3247 check_privileged(s
);
3248 potential_page_fault(s
);
3249 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3254 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3256 check_privileged(s
);
3257 gen_helper_spx(cpu_env
, o
->in2
);
3261 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
3263 check_privileged(s
);
3264 /* Not operational. */
3265 gen_op_movi_cc(s
, 3);
3269 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3271 check_privileged(s
);
3272 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3273 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3277 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3279 uint64_t i2
= get_field(s
->fields
, i2
);
3282 check_privileged(s
);
3284 /* It is important to do what the instruction name says: STORE THEN.
3285 If we let the output hook perform the store then if we fault and
3286 restart, we'll have the wrong SYSTEM MASK in place. */
3287 t
= tcg_temp_new_i64();
3288 tcg_gen_shri_i64(t
, psw_mask
, 56);
3289 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3290 tcg_temp_free_i64(t
);
3292 if (s
->fields
->op
== 0xac) {
3293 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3294 (i2
<< 56) | 0x00ffffffffffffffull
);
3296 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3301 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3303 check_privileged(s
);
3304 potential_page_fault(s
);
3305 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3310 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3312 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3316 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3318 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3322 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3324 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3328 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3330 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3334 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3336 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3337 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3338 potential_page_fault(s
);
3339 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3340 tcg_temp_free_i32(r1
);
3341 tcg_temp_free_i32(r3
);
3345 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3347 int m3
= get_field(s
->fields
, m3
);
3348 int pos
, base
= s
->insn
->data
;
3349 TCGv_i64 tmp
= tcg_temp_new_i64();
3351 pos
= base
+ ctz32(m3
) * 8;
3354 /* Effectively a 32-bit store. */
3355 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3356 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3362 /* Effectively a 16-bit store. */
3363 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3364 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3371 /* Effectively an 8-bit store. */
3372 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3373 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3377 /* This is going to be a sequence of shifts and stores. */
3378 pos
= base
+ 32 - 8;
3381 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3382 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3383 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3385 m3
= (m3
<< 1) & 0xf;
3390 tcg_temp_free_i64(tmp
);
3394 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3396 int r1
= get_field(s
->fields
, r1
);
3397 int r3
= get_field(s
->fields
, r3
);
3398 int size
= s
->insn
->data
;
3399 TCGv_i64 tsize
= tcg_const_i64(size
);
3403 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3405 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3410 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3414 tcg_temp_free_i64(tsize
);
3418 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3420 int r1
= get_field(s
->fields
, r1
);
3421 int r3
= get_field(s
->fields
, r3
);
3422 TCGv_i64 t
= tcg_temp_new_i64();
3423 TCGv_i64 t4
= tcg_const_i64(4);
3424 TCGv_i64 t32
= tcg_const_i64(32);
3427 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3428 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3432 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3436 tcg_temp_free_i64(t
);
3437 tcg_temp_free_i64(t4
);
3438 tcg_temp_free_i64(t32
);
3442 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3444 potential_page_fault(s
);
3445 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3447 return_low128(o
->in2
);
3451 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3453 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3457 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3462 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3464 /* The !borrow flag is the msb of CC. Since we want the inverse of
3465 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3466 disas_jcc(s
, &cmp
, 8 | 4);
3467 borrow
= tcg_temp_new_i64();
3469 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3471 TCGv_i32 t
= tcg_temp_new_i32();
3472 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3473 tcg_gen_extu_i32_i64(borrow
, t
);
3474 tcg_temp_free_i32(t
);
3478 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3479 tcg_temp_free_i64(borrow
);
3483 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3490 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3491 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3492 tcg_temp_free_i32(t
);
3494 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3495 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3496 tcg_temp_free_i32(t
);
3498 gen_exception(EXCP_SVC
);
3499 return EXIT_NORETURN
;
3502 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3504 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3509 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3511 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3516 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3518 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3523 #ifndef CONFIG_USER_ONLY
3524 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3526 potential_page_fault(s
);
3527 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3533 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3535 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3536 potential_page_fault(s
);
3537 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3538 tcg_temp_free_i32(l
);
3543 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3545 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3546 potential_page_fault(s
);
3547 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3548 tcg_temp_free_i32(l
);
3552 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3554 int d1
= get_field(s
->fields
, d1
);
3555 int d2
= get_field(s
->fields
, d2
);
3556 int b1
= get_field(s
->fields
, b1
);
3557 int b2
= get_field(s
->fields
, b2
);
3558 int l
= get_field(s
->fields
, l1
);
3561 o
->addr1
= get_address(s
, 0, b1
, d1
);
3563 /* If the addresses are identical, this is a store/memset of zero. */
3564 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
3565 o
->in2
= tcg_const_i64(0);
3569 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
3572 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
3576 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
3579 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
3583 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
3586 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
3590 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
3592 gen_op_movi_cc(s
, 0);
3596 /* But in general we'll defer to a helper. */
3597 o
->in2
= get_address(s
, 0, b2
, d2
);
3598 t32
= tcg_const_i32(l
);
3599 potential_page_fault(s
);
3600 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
3601 tcg_temp_free_i32(t32
);
3606 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3608 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3612 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3614 int shift
= s
->insn
->data
& 0xff;
3615 int size
= s
->insn
->data
>> 8;
3616 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3619 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3620 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3622 /* Produce the CC from only the bits manipulated. */
3623 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3624 set_cc_nz_u64(s
, cc_dst
);
3628 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3630 o
->out
= tcg_const_i64(0);
3634 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3636 o
->out
= tcg_const_i64(0);
3642 /* ====================================================================== */
3643 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3644 the original inputs), update the various cc data structures in order to
3645 be able to compute the new condition code. */
3647 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3649 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3652 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3654 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3657 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3659 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3662 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3664 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3667 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3669 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3672 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3674 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3677 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3679 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3682 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3684 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3687 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3689 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3692 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3694 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3697 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3699 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3702 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3704 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3707 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3709 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3712 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3714 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3717 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3719 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3722 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3724 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3727 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3729 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3732 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3734 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3737 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3739 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3742 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3744 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3745 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3748 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3750 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3753 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3755 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3758 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3760 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3763 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3765 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3768 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3770 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3773 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3775 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3778 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3780 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3783 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3785 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3788 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3790 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3793 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3795 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3798 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3800 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3803 /* ====================================================================== */
3804 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3805 with the TCG register to which we will write. Used in combination with
3806 the "wout" generators, in some cases we need a new temporary, and in
3807 some cases we can write to a TCG global. */
3809 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3811 o
->out
= tcg_temp_new_i64();
3813 #define SPEC_prep_new 0
3815 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3817 o
->out
= tcg_temp_new_i64();
3818 o
->out2
= tcg_temp_new_i64();
3820 #define SPEC_prep_new_P 0
3822 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3824 o
->out
= regs
[get_field(f
, r1
)];
3827 #define SPEC_prep_r1 0
3829 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3831 int r1
= get_field(f
, r1
);
3833 o
->out2
= regs
[r1
+ 1];
3834 o
->g_out
= o
->g_out2
= true;
3836 #define SPEC_prep_r1_P SPEC_r1_even
3838 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3840 o
->out
= fregs
[get_field(f
, r1
)];
3843 #define SPEC_prep_f1 0
3845 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3847 int r1
= get_field(f
, r1
);
3849 o
->out2
= fregs
[r1
+ 2];
3850 o
->g_out
= o
->g_out2
= true;
3852 #define SPEC_prep_x1 SPEC_r1_f128
3854 /* ====================================================================== */
3855 /* The "Write OUTput" generators. These generally perform some non-trivial
3856 copy of data to TCG globals, or to main memory. The trivial cases are
3857 generally handled by having a "prep" generator install the TCG global
3858 as the destination of the operation. */
3860 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3862 store_reg(get_field(f
, r1
), o
->out
);
3864 #define SPEC_wout_r1 0
3866 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3868 int r1
= get_field(f
, r1
);
3869 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3871 #define SPEC_wout_r1_8 0
3873 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3875 int r1
= get_field(f
, r1
);
3876 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3878 #define SPEC_wout_r1_16 0
3880 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3882 store_reg32_i64(get_field(f
, r1
), o
->out
);
3884 #define SPEC_wout_r1_32 0
3886 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3888 int r1
= get_field(f
, r1
);
3889 store_reg32_i64(r1
, o
->out
);
3890 store_reg32_i64(r1
+ 1, o
->out2
);
3892 #define SPEC_wout_r1_P32 SPEC_r1_even
3894 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3896 int r1
= get_field(f
, r1
);
3897 store_reg32_i64(r1
+ 1, o
->out
);
3898 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3899 store_reg32_i64(r1
, o
->out
);
3901 #define SPEC_wout_r1_D32 SPEC_r1_even
3903 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3905 store_freg32_i64(get_field(f
, r1
), o
->out
);
3907 #define SPEC_wout_e1 0
3909 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3911 store_freg(get_field(f
, r1
), o
->out
);
3913 #define SPEC_wout_f1 0
3915 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3917 int f1
= get_field(s
->fields
, r1
);
3918 store_freg(f1
, o
->out
);
3919 store_freg(f1
+ 2, o
->out2
);
3921 #define SPEC_wout_x1 SPEC_r1_f128
3923 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3925 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3926 store_reg32_i64(get_field(f
, r1
), o
->out
);
3929 #define SPEC_wout_cond_r1r2_32 0
3931 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3933 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3934 store_freg32_i64(get_field(f
, r1
), o
->out
);
3937 #define SPEC_wout_cond_e1e2 0
3939 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3941 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3943 #define SPEC_wout_m1_8 0
3945 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3947 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3949 #define SPEC_wout_m1_16 0
3951 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3953 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3955 #define SPEC_wout_m1_32 0
3957 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3959 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3961 #define SPEC_wout_m1_64 0
3963 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3965 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3967 #define SPEC_wout_m2_32 0
3969 /* ====================================================================== */
3970 /* The "INput 1" generators. These load the first operand to an insn. */
3972 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3974 o
->in1
= load_reg(get_field(f
, r1
));
3976 #define SPEC_in1_r1 0
3978 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3980 o
->in1
= regs
[get_field(f
, r1
)];
3983 #define SPEC_in1_r1_o 0
3985 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3987 o
->in1
= tcg_temp_new_i64();
3988 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3990 #define SPEC_in1_r1_32s 0
3992 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3994 o
->in1
= tcg_temp_new_i64();
3995 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3997 #define SPEC_in1_r1_32u 0
3999 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4001 o
->in1
= tcg_temp_new_i64();
4002 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4004 #define SPEC_in1_r1_sr32 0
4006 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4008 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4010 #define SPEC_in1_r1p1 SPEC_r1_even
4012 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4014 o
->in1
= tcg_temp_new_i64();
4015 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4017 #define SPEC_in1_r1p1_32s SPEC_r1_even
4019 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4021 o
->in1
= tcg_temp_new_i64();
4022 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4024 #define SPEC_in1_r1p1_32u SPEC_r1_even
4026 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4028 int r1
= get_field(f
, r1
);
4029 o
->in1
= tcg_temp_new_i64();
4030 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4032 #define SPEC_in1_r1_D32 SPEC_r1_even
4034 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4036 o
->in1
= load_reg(get_field(f
, r2
));
4038 #define SPEC_in1_r2 0
4040 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4042 o
->in1
= load_reg(get_field(f
, r3
));
4044 #define SPEC_in1_r3 0
4046 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4048 o
->in1
= regs
[get_field(f
, r3
)];
4051 #define SPEC_in1_r3_o 0
4053 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4055 o
->in1
= tcg_temp_new_i64();
4056 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4058 #define SPEC_in1_r3_32s 0
4060 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4062 o
->in1
= tcg_temp_new_i64();
4063 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4065 #define SPEC_in1_r3_32u 0
4067 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4069 int r3
= get_field(f
, r3
);
4070 o
->in1
= tcg_temp_new_i64();
4071 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4073 #define SPEC_in1_r3_D32 SPEC_r3_even
4075 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4077 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4079 #define SPEC_in1_e1 0
4081 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4083 o
->in1
= fregs
[get_field(f
, r1
)];
4086 #define SPEC_in1_f1_o 0
4088 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4090 int r1
= get_field(f
, r1
);
4092 o
->out2
= fregs
[r1
+ 2];
4093 o
->g_out
= o
->g_out2
= true;
4095 #define SPEC_in1_x1_o SPEC_r1_f128
4097 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4099 o
->in1
= fregs
[get_field(f
, r3
)];
4102 #define SPEC_in1_f3_o 0
4104 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4106 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4108 #define SPEC_in1_la1 0
4110 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4112 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4113 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4115 #define SPEC_in1_la2 0
4117 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4120 o
->in1
= tcg_temp_new_i64();
4121 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4123 #define SPEC_in1_m1_8u 0
4125 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4128 o
->in1
= tcg_temp_new_i64();
4129 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4131 #define SPEC_in1_m1_16s 0
4133 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4136 o
->in1
= tcg_temp_new_i64();
4137 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4139 #define SPEC_in1_m1_16u 0
4141 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4144 o
->in1
= tcg_temp_new_i64();
4145 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4147 #define SPEC_in1_m1_32s 0
4149 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4152 o
->in1
= tcg_temp_new_i64();
4153 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4155 #define SPEC_in1_m1_32u 0
4157 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4160 o
->in1
= tcg_temp_new_i64();
4161 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4163 #define SPEC_in1_m1_64 0
4165 /* ====================================================================== */
4166 /* The "INput 2" generators. These load the second operand to an insn. */
4168 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4170 o
->in2
= regs
[get_field(f
, r1
)];
4173 #define SPEC_in2_r1_o 0
4175 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4177 o
->in2
= tcg_temp_new_i64();
4178 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4180 #define SPEC_in2_r1_16u 0
4182 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4184 o
->in2
= tcg_temp_new_i64();
4185 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4187 #define SPEC_in2_r1_32u 0
4189 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4191 int r1
= get_field(f
, r1
);
4192 o
->in2
= tcg_temp_new_i64();
4193 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4195 #define SPEC_in2_r1_D32 SPEC_r1_even
4197 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4199 o
->in2
= load_reg(get_field(f
, r2
));
4201 #define SPEC_in2_r2 0
4203 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4205 o
->in2
= regs
[get_field(f
, r2
)];
4208 #define SPEC_in2_r2_o 0
4210 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4212 int r2
= get_field(f
, r2
);
4214 o
->in2
= load_reg(r2
);
4217 #define SPEC_in2_r2_nz 0
4219 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4221 o
->in2
= tcg_temp_new_i64();
4222 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4224 #define SPEC_in2_r2_8s 0
4226 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4228 o
->in2
= tcg_temp_new_i64();
4229 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4231 #define SPEC_in2_r2_8u 0
4233 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4235 o
->in2
= tcg_temp_new_i64();
4236 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4238 #define SPEC_in2_r2_16s 0
4240 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4242 o
->in2
= tcg_temp_new_i64();
4243 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4245 #define SPEC_in2_r2_16u 0
4247 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4249 o
->in2
= load_reg(get_field(f
, r3
));
4251 #define SPEC_in2_r3 0
4253 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4255 o
->in2
= tcg_temp_new_i64();
4256 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4258 #define SPEC_in2_r2_32s 0
4260 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4262 o
->in2
= tcg_temp_new_i64();
4263 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4265 #define SPEC_in2_r2_32u 0
4267 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4269 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4271 #define SPEC_in2_e2 0
4273 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4275 o
->in2
= fregs
[get_field(f
, r2
)];
4278 #define SPEC_in2_f2_o 0
4280 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4282 int r2
= get_field(f
, r2
);
4284 o
->in2
= fregs
[r2
+ 2];
4285 o
->g_in1
= o
->g_in2
= true;
4287 #define SPEC_in2_x2_o SPEC_r2_f128
4289 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4291 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4293 #define SPEC_in2_ra2 0
4295 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4297 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4298 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4300 #define SPEC_in2_a2 0
4302 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4304 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4306 #define SPEC_in2_ri2 0
4308 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4310 help_l2_shift(s
, f
, o
, 31);
4312 #define SPEC_in2_sh32 0
4314 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4316 help_l2_shift(s
, f
, o
, 63);
4318 #define SPEC_in2_sh64 0
4320 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4323 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4325 #define SPEC_in2_m2_8u 0
4327 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4330 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4332 #define SPEC_in2_m2_16s 0
4334 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4337 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4339 #define SPEC_in2_m2_16u 0
4341 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4344 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4346 #define SPEC_in2_m2_32s 0
4348 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4351 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4353 #define SPEC_in2_m2_32u 0
4355 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4358 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4360 #define SPEC_in2_m2_64 0
4362 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4365 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4367 #define SPEC_in2_mri2_16u 0
4369 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4372 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4374 #define SPEC_in2_mri2_32s 0
4376 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4379 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4381 #define SPEC_in2_mri2_32u 0
4383 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4386 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4388 #define SPEC_in2_mri2_64 0
4390 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4392 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4394 #define SPEC_in2_i2 0
4396 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4398 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4400 #define SPEC_in2_i2_8u 0
4402 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4404 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4406 #define SPEC_in2_i2_16u 0
4408 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4410 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4412 #define SPEC_in2_i2_32u 0
4414 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4416 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4417 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4419 #define SPEC_in2_i2_16u_shl 0
4421 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4423 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4424 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4426 #define SPEC_in2_i2_32u_shl 0
4428 /* ====================================================================== */
4430 /* Find opc within the table of insns. This is formulated as a switch
4431 statement so that (1) we get compile-time notice of cut-paste errors
4432 for duplicated opcodes, and (2) the compiler generates the binary
4433 search tree, rather than us having to post-process the table. */
4435 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4436 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4438 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4440 enum DisasInsnEnum
{
4441 #include "insn-data.def"
4445 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4449 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4451 .help_in1 = in1_##I1, \
4452 .help_in2 = in2_##I2, \
4453 .help_prep = prep_##P, \
4454 .help_wout = wout_##W, \
4455 .help_cout = cout_##CC, \
4456 .help_op = op_##OP, \
4460 /* Allow 0 to be used for NULL in the table below. */
4468 #define SPEC_in1_0 0
4469 #define SPEC_in2_0 0
4470 #define SPEC_prep_0 0
4471 #define SPEC_wout_0 0
4473 static const DisasInsn insn_info
[] = {
4474 #include "insn-data.def"
4478 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4479 case OPC: return &insn_info[insn_ ## NM];
4481 static const DisasInsn
*lookup_opc(uint16_t opc
)
4484 #include "insn-data.def"
4493 /* Extract a field from the insn. The INSN should be left-aligned in
4494 the uint64_t so that we can more easily utilize the big-bit-endian
4495 definitions we extract from the Principals of Operation. */
4497 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4505 /* Zero extract the field from the insn. */
4506 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4508 /* Sign-extend, or un-swap the field as necessary. */
4510 case 0: /* unsigned */
4512 case 1: /* signed */
4513 assert(f
->size
<= 32);
4514 m
= 1u << (f
->size
- 1);
4517 case 2: /* dl+dh split, signed 20 bit. */
4518 r
= ((int8_t)r
<< 12) | (r
>> 8);
4524 /* Validate that the "compressed" encoding we selected above is valid.
4525 I.e. we havn't make two different original fields overlap. */
4526 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4527 o
->presentC
|= 1 << f
->indexC
;
4528 o
->presentO
|= 1 << f
->indexO
;
4530 o
->c
[f
->indexC
] = r
;
4533 /* Lookup the insn at the current PC, extracting the operands into O and
4534 returning the info struct for the insn. Returns NULL for invalid insn. */
4536 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4539 uint64_t insn
, pc
= s
->pc
;
4541 const DisasInsn
*info
;
4543 insn
= ld_code2(env
, pc
);
4544 op
= (insn
>> 8) & 0xff;
4545 ilen
= get_ilen(op
);
4546 s
->next_pc
= s
->pc
+ ilen
;
4553 insn
= ld_code4(env
, pc
) << 32;
4556 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4562 /* We can't actually determine the insn format until we've looked up
4563 the full insn opcode. Which we can't do without locating the
4564 secondary opcode. Assume by default that OP2 is at bit 40; for
4565 those smaller insns that don't actually have a secondary opcode
4566 this will correctly result in OP2 = 0. */
4572 case 0xb2: /* S, RRF, RRE */
4573 case 0xb3: /* RRE, RRD, RRF */
4574 case 0xb9: /* RRE, RRF */
4575 case 0xe5: /* SSE, SIL */
4576 op2
= (insn
<< 8) >> 56;
4580 case 0xc0: /* RIL */
4581 case 0xc2: /* RIL */
4582 case 0xc4: /* RIL */
4583 case 0xc6: /* RIL */
4584 case 0xc8: /* SSF */
4585 case 0xcc: /* RIL */
4586 op2
= (insn
<< 12) >> 60;
4588 case 0xd0 ... 0xdf: /* SS */
4594 case 0xee ... 0xf3: /* SS */
4595 case 0xf8 ... 0xfd: /* SS */
4599 op2
= (insn
<< 40) >> 56;
4603 memset(f
, 0, sizeof(*f
));
4607 /* Lookup the instruction. */
4608 info
= lookup_opc(op
<< 8 | op2
);
4610 /* If we found it, extract the operands. */
4612 DisasFormat fmt
= info
->fmt
;
4615 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4616 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4622 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4624 const DisasInsn
*insn
;
4625 ExitStatus ret
= NO_EXIT
;
4629 /* Search for the insn in the table. */
4630 insn
= extract_insn(env
, s
, &f
);
4632 /* Not found means unimplemented/illegal opcode. */
4634 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
4636 gen_illegal_opcode(s
);
4637 return EXIT_NORETURN
;
4640 /* Check for insn specification exceptions. */
4642 int spec
= insn
->spec
, excp
= 0, r
;
4644 if (spec
& SPEC_r1_even
) {
4645 r
= get_field(&f
, r1
);
4647 excp
= PGM_SPECIFICATION
;
4650 if (spec
& SPEC_r2_even
) {
4651 r
= get_field(&f
, r2
);
4653 excp
= PGM_SPECIFICATION
;
4656 if (spec
& SPEC_r3_even
) {
4657 r
= get_field(&f
, r3
);
4659 excp
= PGM_SPECIFICATION
;
4662 if (spec
& SPEC_r1_f128
) {
4663 r
= get_field(&f
, r1
);
4665 excp
= PGM_SPECIFICATION
;
4668 if (spec
& SPEC_r2_f128
) {
4669 r
= get_field(&f
, r2
);
4671 excp
= PGM_SPECIFICATION
;
4675 gen_program_exception(s
, excp
);
4676 return EXIT_NORETURN
;
4680 /* Set up the strutures we use to communicate with the helpers. */
4683 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4684 TCGV_UNUSED_I64(o
.out
);
4685 TCGV_UNUSED_I64(o
.out2
);
4686 TCGV_UNUSED_I64(o
.in1
);
4687 TCGV_UNUSED_I64(o
.in2
);
4688 TCGV_UNUSED_I64(o
.addr1
);
4690 /* Implement the instruction. */
4691 if (insn
->help_in1
) {
4692 insn
->help_in1(s
, &f
, &o
);
4694 if (insn
->help_in2
) {
4695 insn
->help_in2(s
, &f
, &o
);
4697 if (insn
->help_prep
) {
4698 insn
->help_prep(s
, &f
, &o
);
4700 if (insn
->help_op
) {
4701 ret
= insn
->help_op(s
, &o
);
4703 if (insn
->help_wout
) {
4704 insn
->help_wout(s
, &f
, &o
);
4706 if (insn
->help_cout
) {
4707 insn
->help_cout(s
, &o
);
4710 /* Free any temporaries created by the helpers. */
4711 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4712 tcg_temp_free_i64(o
.out
);
4714 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4715 tcg_temp_free_i64(o
.out2
);
4717 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4718 tcg_temp_free_i64(o
.in1
);
4720 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4721 tcg_temp_free_i64(o
.in2
);
4723 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4724 tcg_temp_free_i64(o
.addr1
);
4727 /* Advance to the next instruction. */
4732 static inline void gen_intermediate_code_internal(S390CPU
*cpu
,
4733 TranslationBlock
*tb
,
4736 CPUState
*cs
= CPU(cpu
);
4737 CPUS390XState
*env
= &cpu
->env
;
4739 target_ulong pc_start
;
4740 uint64_t next_page_start
;
4741 uint16_t *gen_opc_end
;
4743 int num_insns
, max_insns
;
4751 if (!(tb
->flags
& FLAG_MASK_64
)) {
4752 pc_start
&= 0x7fffffff;
4757 dc
.cc_op
= CC_OP_DYNAMIC
;
4758 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
4760 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4762 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4765 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4766 if (max_insns
== 0) {
4767 max_insns
= CF_COUNT_MASK
;
4774 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4778 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4781 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4782 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4783 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4784 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4786 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4790 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4791 tcg_gen_debug_insn_start(dc
.pc
);
4795 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
4796 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
4797 if (bp
->pc
== dc
.pc
) {
4798 status
= EXIT_PC_STALE
;
4804 if (status
== NO_EXIT
) {
4805 status
= translate_one(env
, &dc
);
4808 /* If we reach a page boundary, are single stepping,
4809 or exhaust instruction count, stop generation. */
4810 if (status
== NO_EXIT
4811 && (dc
.pc
>= next_page_start
4812 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4813 || num_insns
>= max_insns
4815 || cs
->singlestep_enabled
)) {
4816 status
= EXIT_PC_STALE
;
4818 } while (status
== NO_EXIT
);
4820 if (tb
->cflags
& CF_LAST_IO
) {
4829 update_psw_addr(&dc
);
4831 case EXIT_PC_UPDATED
:
4832 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4833 cc op type is in env */
4835 /* Exit the TB, either by raising a debug exception or by return. */
4837 gen_exception(EXCP_DEBUG
);
4846 gen_tb_end(tb
, num_insns
);
4847 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4849 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4852 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4855 tb
->size
= dc
.pc
- pc_start
;
4856 tb
->icount
= num_insns
;
4859 #if defined(S390X_DEBUG_DISAS)
4860 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4861 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4862 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4868 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4870 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, false);
4873 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4875 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, true);
4878 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4881 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4882 cc_op
= gen_opc_cc_op
[pc_pos
];
4883 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {