4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env
;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext
;
50 typedef struct DisasInsn DisasInsn
;
51 typedef struct DisasFields DisasFields
;
54 struct TranslationBlock
*tb
;
55 const DisasInsn
*insn
;
59 bool singlestep_enabled
;
62 /* Information carried about a condition to be evaluated. */
69 struct { TCGv_i64 a
, b
; } s64
;
70 struct { TCGv_i32 a
, b
; } s32
;
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit
[CC_OP_MAX
];
78 static uint64_t inline_branch_miss
[CC_OP_MAX
];
81 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
83 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
84 if (s
->tb
->flags
& FLAG_MASK_32
) {
85 return pc
| 0x80000000;
91 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
94 S390CPU
*cpu
= S390_CPU(cs
);
95 CPUS390XState
*env
= &cpu
->env
;
99 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
100 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
103 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
106 for (i
= 0; i
< 16; i
++) {
107 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
109 cpu_fprintf(f
, "\n");
115 for (i
= 0; i
< 16; i
++) {
116 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
118 cpu_fprintf(f
, "\n");
124 #ifndef CONFIG_USER_ONLY
125 for (i
= 0; i
< 16; i
++) {
126 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
128 cpu_fprintf(f
, "\n");
135 #ifdef DEBUG_INLINE_BRANCHES
136 for (i
= 0; i
< CC_OP_MAX
; i
++) {
137 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
138 inline_branch_miss
[i
], inline_branch_hit
[i
]);
142 cpu_fprintf(f
, "\n");
145 static TCGv_i64 psw_addr
;
146 static TCGv_i64 psw_mask
;
148 static TCGv_i32 cc_op
;
149 static TCGv_i64 cc_src
;
150 static TCGv_i64 cc_dst
;
151 static TCGv_i64 cc_vr
;
153 static char cpu_reg_names
[32][4];
154 static TCGv_i64 regs
[16];
155 static TCGv_i64 fregs
[16];
157 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
159 void s390x_translate_init(void)
163 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
164 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
165 offsetof(CPUS390XState
, psw
.addr
),
167 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
168 offsetof(CPUS390XState
, psw
.mask
),
171 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
173 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
175 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
177 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
180 for (i
= 0; i
< 16; i
++) {
181 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
182 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
183 offsetof(CPUS390XState
, regs
[i
]),
187 for (i
= 0; i
< 16; i
++) {
188 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
189 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
190 offsetof(CPUS390XState
, fregs
[i
].d
),
191 cpu_reg_names
[i
+ 16]);
195 static TCGv_i64
load_reg(int reg
)
197 TCGv_i64 r
= tcg_temp_new_i64();
198 tcg_gen_mov_i64(r
, regs
[reg
]);
202 static TCGv_i64
load_freg32_i64(int reg
)
204 TCGv_i64 r
= tcg_temp_new_i64();
205 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
209 static void store_reg(int reg
, TCGv_i64 v
)
211 tcg_gen_mov_i64(regs
[reg
], v
);
214 static void store_freg(int reg
, TCGv_i64 v
)
216 tcg_gen_mov_i64(fregs
[reg
], v
);
219 static void store_reg32_i64(int reg
, TCGv_i64 v
)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
225 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
227 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
230 static void store_freg32_i64(int reg
, TCGv_i64 v
)
232 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
235 static void return_low128(TCGv_i64 dest
)
237 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
240 static void update_psw_addr(DisasContext
*s
)
243 tcg_gen_movi_i64(psw_addr
, s
->pc
);
246 static void update_cc_op(DisasContext
*s
)
248 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
249 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
253 static void potential_page_fault(DisasContext
*s
)
259 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
261 return (uint64_t)cpu_lduw_code(env
, pc
);
264 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
269 static int get_mem_index(DisasContext
*s
)
271 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
272 case PSW_ASC_PRIMARY
>> 32:
274 case PSW_ASC_SECONDARY
>> 32:
276 case PSW_ASC_HOME
>> 32:
284 static void gen_exception(int excp
)
286 TCGv_i32 tmp
= tcg_const_i32(excp
);
287 gen_helper_exception(cpu_env
, tmp
);
288 tcg_temp_free_i32(tmp
);
291 static void gen_program_exception(DisasContext
*s
, int code
)
295 /* Remember what pgm exeption this was. */
296 tmp
= tcg_const_i32(code
);
297 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
298 tcg_temp_free_i32(tmp
);
300 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
301 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
302 tcg_temp_free_i32(tmp
);
304 /* Advance past instruction. */
311 /* Trigger exception. */
312 gen_exception(EXCP_PGM
);
315 static inline void gen_illegal_opcode(DisasContext
*s
)
317 gen_program_exception(s
, PGM_SPECIFICATION
);
320 #ifndef CONFIG_USER_ONLY
321 static void check_privileged(DisasContext
*s
)
323 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
324 gen_program_exception(s
, PGM_PRIVILEGED
);
329 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
331 TCGv_i64 tmp
= tcg_temp_new_i64();
332 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
334 /* Note that d2 is limited to 20 bits, signed. If we crop negative
335 displacements early we create larger immedate addends. */
337 /* Note that addi optimizes the imm==0 case. */
339 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
340 tcg_gen_addi_i64(tmp
, tmp
, d2
);
342 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
344 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
350 tcg_gen_movi_i64(tmp
, d2
);
353 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
359 static inline bool live_cc_data(DisasContext
*s
)
361 return (s
->cc_op
!= CC_OP_DYNAMIC
362 && s
->cc_op
!= CC_OP_STATIC
366 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
368 if (live_cc_data(s
)) {
369 tcg_gen_discard_i64(cc_src
);
370 tcg_gen_discard_i64(cc_dst
);
371 tcg_gen_discard_i64(cc_vr
);
373 s
->cc_op
= CC_OP_CONST0
+ val
;
376 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
378 if (live_cc_data(s
)) {
379 tcg_gen_discard_i64(cc_src
);
380 tcg_gen_discard_i64(cc_vr
);
382 tcg_gen_mov_i64(cc_dst
, dst
);
386 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
389 if (live_cc_data(s
)) {
390 tcg_gen_discard_i64(cc_vr
);
392 tcg_gen_mov_i64(cc_src
, src
);
393 tcg_gen_mov_i64(cc_dst
, dst
);
397 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
398 TCGv_i64 dst
, TCGv_i64 vr
)
400 tcg_gen_mov_i64(cc_src
, src
);
401 tcg_gen_mov_i64(cc_dst
, dst
);
402 tcg_gen_mov_i64(cc_vr
, vr
);
406 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
408 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
411 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
413 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
416 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
418 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
421 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
423 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
426 /* CC value is in env->cc_op */
427 static void set_cc_static(DisasContext
*s
)
429 if (live_cc_data(s
)) {
430 tcg_gen_discard_i64(cc_src
);
431 tcg_gen_discard_i64(cc_dst
);
432 tcg_gen_discard_i64(cc_vr
);
434 s
->cc_op
= CC_OP_STATIC
;
437 /* calculates cc into cc_op */
438 static void gen_op_calc_cc(DisasContext
*s
)
440 TCGv_i32 local_cc_op
;
443 TCGV_UNUSED_I32(local_cc_op
);
444 TCGV_UNUSED_I64(dummy
);
447 dummy
= tcg_const_i64(0);
461 local_cc_op
= tcg_const_i32(s
->cc_op
);
477 /* s->cc_op is the cc value */
478 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
481 /* env->cc_op already is the cc value */
496 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
501 case CC_OP_LTUGTU_32
:
502 case CC_OP_LTUGTU_64
:
509 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
524 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
527 /* unknown operation - assume 3 arguments and cc_op in env */
528 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
534 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
535 tcg_temp_free_i32(local_cc_op
);
537 if (!TCGV_IS_UNUSED_I64(dummy
)) {
538 tcg_temp_free_i64(dummy
);
541 /* We now have cc in cc_op as constant */
545 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
547 /* NOTE: we handle the case where the TB spans two pages here */
548 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
549 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
550 && !s
->singlestep_enabled
551 && !(s
->tb
->cflags
& CF_LAST_IO
));
554 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
556 #ifdef DEBUG_INLINE_BRANCHES
557 inline_branch_miss
[cc_op
]++;
561 static void account_inline_branch(DisasContext
*s
, int cc_op
)
563 #ifdef DEBUG_INLINE_BRANCHES
564 inline_branch_hit
[cc_op
]++;
568 /* Table of mask values to comparison codes, given a comparison as input.
569 For such, CC=3 should not be possible. */
570 static const TCGCond ltgt_cond
[16] = {
571 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
572 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
573 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
574 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
575 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
576 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
577 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
578 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
581 /* Table of mask values to comparison codes, given a logic op as input.
582 For such, only CC=0 and CC=1 should be possible. */
583 static const TCGCond nz_cond
[16] = {
584 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
585 TCG_COND_NEVER
, TCG_COND_NEVER
,
586 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
587 TCG_COND_NE
, TCG_COND_NE
,
588 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
589 TCG_COND_EQ
, TCG_COND_EQ
,
590 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
591 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
594 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
595 details required to generate a TCG comparison. */
596 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
599 enum cc_op old_cc_op
= s
->cc_op
;
601 if (mask
== 15 || mask
== 0) {
602 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
605 c
->g1
= c
->g2
= true;
610 /* Find the TCG condition for the mask + cc op. */
616 cond
= ltgt_cond
[mask
];
617 if (cond
== TCG_COND_NEVER
) {
620 account_inline_branch(s
, old_cc_op
);
623 case CC_OP_LTUGTU_32
:
624 case CC_OP_LTUGTU_64
:
625 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
626 if (cond
== TCG_COND_NEVER
) {
629 account_inline_branch(s
, old_cc_op
);
633 cond
= nz_cond
[mask
];
634 if (cond
== TCG_COND_NEVER
) {
637 account_inline_branch(s
, old_cc_op
);
652 account_inline_branch(s
, old_cc_op
);
667 account_inline_branch(s
, old_cc_op
);
671 switch (mask
& 0xa) {
672 case 8: /* src == 0 -> no one bit found */
675 case 2: /* src != 0 -> one bit found */
681 account_inline_branch(s
, old_cc_op
);
687 case 8 | 2: /* vr == 0 */
690 case 4 | 1: /* vr != 0 */
693 case 8 | 4: /* no carry -> vr >= src */
696 case 2 | 1: /* carry -> vr < src */
702 account_inline_branch(s
, old_cc_op
);
707 /* Note that CC=0 is impossible; treat it as dont-care. */
709 case 2: /* zero -> op1 == op2 */
712 case 4 | 1: /* !zero -> op1 != op2 */
715 case 4: /* borrow (!carry) -> op1 < op2 */
718 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
724 account_inline_branch(s
, old_cc_op
);
729 /* Calculate cc value. */
734 /* Jump based on CC. We'll load up the real cond below;
735 the assignment here merely avoids a compiler warning. */
736 account_noninline_branch(s
, old_cc_op
);
737 old_cc_op
= CC_OP_STATIC
;
738 cond
= TCG_COND_NEVER
;
742 /* Load up the arguments of the comparison. */
744 c
->g1
= c
->g2
= false;
748 c
->u
.s32
.a
= tcg_temp_new_i32();
749 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
750 c
->u
.s32
.b
= tcg_const_i32(0);
753 case CC_OP_LTUGTU_32
:
756 c
->u
.s32
.a
= tcg_temp_new_i32();
757 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
758 c
->u
.s32
.b
= tcg_temp_new_i32();
759 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
766 c
->u
.s64
.b
= tcg_const_i64(0);
770 case CC_OP_LTUGTU_64
:
774 c
->g1
= c
->g2
= true;
780 c
->u
.s64
.a
= tcg_temp_new_i64();
781 c
->u
.s64
.b
= tcg_const_i64(0);
782 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
787 c
->u
.s32
.a
= tcg_temp_new_i32();
788 c
->u
.s32
.b
= tcg_temp_new_i32();
789 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_vr
);
790 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
791 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
793 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_src
);
800 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
801 c
->u
.s64
.b
= tcg_const_i64(0);
813 case 0x8 | 0x4 | 0x2: /* cc != 3 */
815 c
->u
.s32
.b
= tcg_const_i32(3);
817 case 0x8 | 0x4 | 0x1: /* cc != 2 */
819 c
->u
.s32
.b
= tcg_const_i32(2);
821 case 0x8 | 0x2 | 0x1: /* cc != 1 */
823 c
->u
.s32
.b
= tcg_const_i32(1);
825 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
828 c
->u
.s32
.a
= tcg_temp_new_i32();
829 c
->u
.s32
.b
= tcg_const_i32(0);
830 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
832 case 0x8 | 0x4: /* cc < 2 */
834 c
->u
.s32
.b
= tcg_const_i32(2);
836 case 0x8: /* cc == 0 */
838 c
->u
.s32
.b
= tcg_const_i32(0);
840 case 0x4 | 0x2 | 0x1: /* cc != 0 */
842 c
->u
.s32
.b
= tcg_const_i32(0);
844 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
847 c
->u
.s32
.a
= tcg_temp_new_i32();
848 c
->u
.s32
.b
= tcg_const_i32(0);
849 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
851 case 0x4: /* cc == 1 */
853 c
->u
.s32
.b
= tcg_const_i32(1);
855 case 0x2 | 0x1: /* cc > 1 */
857 c
->u
.s32
.b
= tcg_const_i32(1);
859 case 0x2: /* cc == 2 */
861 c
->u
.s32
.b
= tcg_const_i32(2);
863 case 0x1: /* cc == 3 */
865 c
->u
.s32
.b
= tcg_const_i32(3);
868 /* CC is masked by something else: (8 >> cc) & mask. */
871 c
->u
.s32
.a
= tcg_const_i32(8);
872 c
->u
.s32
.b
= tcg_const_i32(0);
873 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
874 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
885 static void free_compare(DisasCompare
*c
)
889 tcg_temp_free_i64(c
->u
.s64
.a
);
891 tcg_temp_free_i32(c
->u
.s32
.a
);
896 tcg_temp_free_i64(c
->u
.s64
.b
);
898 tcg_temp_free_i32(c
->u
.s32
.b
);
903 /* ====================================================================== */
904 /* Define the insn format enumeration. */
905 #define F0(N) FMT_##N,
906 #define F1(N, X1) F0(N)
907 #define F2(N, X1, X2) F0(N)
908 #define F3(N, X1, X2, X3) F0(N)
909 #define F4(N, X1, X2, X3, X4) F0(N)
910 #define F5(N, X1, X2, X3, X4, X5) F0(N)
913 #include "insn-format.def"
923 /* Define a structure to hold the decoded fields. We'll store each inside
924 an array indexed by an enum. In order to conserve memory, we'll arrange
925 for fields that do not exist at the same time to overlap, thus the "C"
926 for compact. For checking purposes there is an "O" for original index
927 as well that will be applied to availability bitmaps. */
929 enum DisasFieldIndexO
{
952 enum DisasFieldIndexC
{
986 unsigned presentC
:16;
987 unsigned int presentO
;
991 /* This is the way fields are to be accessed out of DisasFields. */
992 #define have_field(S, F) have_field1((S), FLD_O_##F)
993 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
995 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
997 return (f
->presentO
>> c
) & 1;
1000 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1001 enum DisasFieldIndexC c
)
1003 assert(have_field1(f
, o
));
1007 /* Describe the layout of each field in each format. */
1008 typedef struct DisasField
{
1010 unsigned int size
:8;
1011 unsigned int type
:2;
1012 unsigned int indexC
:6;
1013 enum DisasFieldIndexO indexO
:8;
1016 typedef struct DisasFormatInfo
{
1017 DisasField op
[NUM_C_FIELD
];
1020 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1021 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1022 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1023 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1024 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1025 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1026 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1027 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1029 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1030 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1031 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1032 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1033 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1035 #define F0(N) { { } },
1036 #define F1(N, X1) { { X1 } },
1037 #define F2(N, X1, X2) { { X1, X2 } },
1038 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1039 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1040 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1042 static const DisasFormatInfo format_info
[] = {
1043 #include "insn-format.def"
1061 /* Generally, we'll extract operands into this structures, operate upon
1062 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1063 of routines below for more details. */
1065 bool g_out
, g_out2
, g_in1
, g_in2
;
1066 TCGv_i64 out
, out2
, in1
, in2
;
1070 /* Instructions can place constraints on their operands, raising specification
1071 exceptions if they are violated. To make this easy to automate, each "in1",
1072 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1073 of the following, or 0. To make this easy to document, we'll put the
1074 SPEC_<name> defines next to <name>. */
1076 #define SPEC_r1_even 1
1077 #define SPEC_r2_even 2
1078 #define SPEC_r3_even 4
1079 #define SPEC_r1_f128 8
1080 #define SPEC_r2_f128 16
1082 /* Return values from translate_one, indicating the state of the TB. */
1084 /* Continue the TB. */
1086 /* We have emitted one or more goto_tb. No fixup required. */
1088 /* We are not using a goto_tb (for whatever reason), but have updated
1089 the PC (for whatever reason), so there's no need to do it again on
1092 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1093 updated the PC for the next instruction to be executed. */
1095 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1096 No following code will be executed. */
1100 typedef enum DisasFacility
{
1101 FAC_Z
, /* zarch (default) */
1102 FAC_CASS
, /* compare and swap and store */
1103 FAC_CASS2
, /* compare and swap and store 2*/
1104 FAC_DFP
, /* decimal floating point */
1105 FAC_DFPR
, /* decimal floating point rounding */
1106 FAC_DO
, /* distinct operands */
1107 FAC_EE
, /* execute extensions */
1108 FAC_EI
, /* extended immediate */
1109 FAC_FPE
, /* floating point extension */
1110 FAC_FPSSH
, /* floating point support sign handling */
1111 FAC_FPRGR
, /* FPR-GR transfer */
1112 FAC_GIE
, /* general instructions extension */
1113 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1114 FAC_HW
, /* high-word */
1115 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1116 FAC_LOC
, /* load/store on condition */
1117 FAC_LD
, /* long displacement */
1118 FAC_PC
, /* population count */
1119 FAC_SCF
, /* store clock fast */
1120 FAC_SFLE
, /* store facility list extended */
1121 FAC_ILA
, /* interlocked access facility 1 */
1127 DisasFacility fac
:8;
1132 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1133 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1134 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1135 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1136 void (*help_cout
)(DisasContext
*, DisasOps
*);
1137 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1142 /* ====================================================================== */
1143 /* Miscellaneous helpers, used by several operations. */
1145 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1146 DisasOps
*o
, int mask
)
1148 int b2
= get_field(f
, b2
);
1149 int d2
= get_field(f
, d2
);
1152 o
->in2
= tcg_const_i64(d2
& mask
);
1154 o
->in2
= get_address(s
, 0, b2
, d2
);
1155 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1159 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1161 if (dest
== s
->next_pc
) {
1164 if (use_goto_tb(s
, dest
)) {
1167 tcg_gen_movi_i64(psw_addr
, dest
);
1168 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1169 return EXIT_GOTO_TB
;
1171 tcg_gen_movi_i64(psw_addr
, dest
);
1172 return EXIT_PC_UPDATED
;
1176 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1177 bool is_imm
, int imm
, TCGv_i64 cdest
)
1180 uint64_t dest
= s
->pc
+ 2 * imm
;
1183 /* Take care of the special cases first. */
1184 if (c
->cond
== TCG_COND_NEVER
) {
1189 if (dest
== s
->next_pc
) {
1190 /* Branch to next. */
1194 if (c
->cond
== TCG_COND_ALWAYS
) {
1195 ret
= help_goto_direct(s
, dest
);
1199 if (TCGV_IS_UNUSED_I64(cdest
)) {
1200 /* E.g. bcr %r0 -> no branch. */
1204 if (c
->cond
== TCG_COND_ALWAYS
) {
1205 tcg_gen_mov_i64(psw_addr
, cdest
);
1206 ret
= EXIT_PC_UPDATED
;
1211 if (use_goto_tb(s
, s
->next_pc
)) {
1212 if (is_imm
&& use_goto_tb(s
, dest
)) {
1213 /* Both exits can use goto_tb. */
1216 lab
= gen_new_label();
1218 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1220 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1223 /* Branch not taken. */
1225 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1226 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1231 tcg_gen_movi_i64(psw_addr
, dest
);
1232 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1236 /* Fallthru can use goto_tb, but taken branch cannot. */
1237 /* Store taken branch destination before the brcond. This
1238 avoids having to allocate a new local temp to hold it.
1239 We'll overwrite this in the not taken case anyway. */
1241 tcg_gen_mov_i64(psw_addr
, cdest
);
1244 lab
= gen_new_label();
1246 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1248 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1251 /* Branch not taken. */
1254 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1255 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1259 tcg_gen_movi_i64(psw_addr
, dest
);
1261 ret
= EXIT_PC_UPDATED
;
1264 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1265 Most commonly we're single-stepping or some other condition that
1266 disables all use of goto_tb. Just update the PC and exit. */
1268 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1270 cdest
= tcg_const_i64(dest
);
1274 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1277 TCGv_i32 t0
= tcg_temp_new_i32();
1278 TCGv_i64 t1
= tcg_temp_new_i64();
1279 TCGv_i64 z
= tcg_const_i64(0);
1280 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1281 tcg_gen_extu_i32_i64(t1
, t0
);
1282 tcg_temp_free_i32(t0
);
1283 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1284 tcg_temp_free_i64(t1
);
1285 tcg_temp_free_i64(z
);
1289 tcg_temp_free_i64(cdest
);
1291 tcg_temp_free_i64(next
);
1293 ret
= EXIT_PC_UPDATED
;
1301 /* ====================================================================== */
1302 /* The operations. These perform the bulk of the work for any insn,
1303 usually after the operands have been loaded and output initialized. */
1305 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1307 gen_helper_abs_i64(o
->out
, o
->in2
);
1311 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1313 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1317 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1319 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1323 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1325 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1326 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1330 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1332 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1336 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1341 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1343 /* The carry flag is the msb of CC, therefore the branch mask that would
1344 create that comparison is 3. Feeding the generated comparison to
1345 setcond produces the carry flag that we desire. */
1346 disas_jcc(s
, &cmp
, 3);
1347 carry
= tcg_temp_new_i64();
1349 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1351 TCGv_i32 t
= tcg_temp_new_i32();
1352 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1353 tcg_gen_extu_i32_i64(carry
, t
);
1354 tcg_temp_free_i32(t
);
1358 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1359 tcg_temp_free_i64(carry
);
1363 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1365 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1369 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1371 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1375 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1377 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1378 return_low128(o
->out2
);
1382 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1384 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1388 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1390 int shift
= s
->insn
->data
& 0xff;
1391 int size
= s
->insn
->data
>> 8;
1392 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1395 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1396 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1397 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1399 /* Produce the CC from only the bits manipulated. */
1400 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1401 set_cc_nz_u64(s
, cc_dst
);
1405 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1407 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1408 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1409 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1410 return EXIT_PC_UPDATED
;
1416 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1418 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1419 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1422 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1424 int m1
= get_field(s
->fields
, m1
);
1425 bool is_imm
= have_field(s
->fields
, i2
);
1426 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1429 disas_jcc(s
, &c
, m1
);
1430 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1433 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1435 int r1
= get_field(s
->fields
, r1
);
1436 bool is_imm
= have_field(s
->fields
, i2
);
1437 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1441 c
.cond
= TCG_COND_NE
;
1446 t
= tcg_temp_new_i64();
1447 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1448 store_reg32_i64(r1
, t
);
1449 c
.u
.s32
.a
= tcg_temp_new_i32();
1450 c
.u
.s32
.b
= tcg_const_i32(0);
1451 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1452 tcg_temp_free_i64(t
);
1454 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1457 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1459 int r1
= get_field(s
->fields
, r1
);
1460 bool is_imm
= have_field(s
->fields
, i2
);
1461 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1464 c
.cond
= TCG_COND_NE
;
1469 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1470 c
.u
.s64
.a
= regs
[r1
];
1471 c
.u
.s64
.b
= tcg_const_i64(0);
1473 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1476 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1478 int r1
= get_field(s
->fields
, r1
);
1479 int r3
= get_field(s
->fields
, r3
);
1480 bool is_imm
= have_field(s
->fields
, i2
);
1481 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1485 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1490 t
= tcg_temp_new_i64();
1491 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1492 c
.u
.s32
.a
= tcg_temp_new_i32();
1493 c
.u
.s32
.b
= tcg_temp_new_i32();
1494 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1495 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1496 store_reg32_i64(r1
, t
);
1497 tcg_temp_free_i64(t
);
1499 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1502 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1504 int r1
= get_field(s
->fields
, r1
);
1505 int r3
= get_field(s
->fields
, r3
);
1506 bool is_imm
= have_field(s
->fields
, i2
);
1507 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1510 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1513 if (r1
== (r3
| 1)) {
1514 c
.u
.s64
.b
= load_reg(r3
| 1);
1517 c
.u
.s64
.b
= regs
[r3
| 1];
1521 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1522 c
.u
.s64
.a
= regs
[r1
];
1525 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1528 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1530 int imm
, m3
= get_field(s
->fields
, m3
);
1534 c
.cond
= ltgt_cond
[m3
];
1535 if (s
->insn
->data
) {
1536 c
.cond
= tcg_unsigned_cond(c
.cond
);
1538 c
.is_64
= c
.g1
= c
.g2
= true;
1542 is_imm
= have_field(s
->fields
, i4
);
1544 imm
= get_field(s
->fields
, i4
);
1547 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1548 get_field(s
->fields
, d4
));
1551 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1554 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1556 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1561 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1563 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1568 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1570 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1575 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1577 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1578 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1579 tcg_temp_free_i32(m3
);
1580 gen_set_cc_nz_f32(s
, o
->in2
);
1584 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1586 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1587 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1588 tcg_temp_free_i32(m3
);
1589 gen_set_cc_nz_f64(s
, o
->in2
);
1593 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1595 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1596 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1597 tcg_temp_free_i32(m3
);
1598 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1602 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1604 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1605 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1606 tcg_temp_free_i32(m3
);
1607 gen_set_cc_nz_f32(s
, o
->in2
);
1611 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1613 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1614 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1615 tcg_temp_free_i32(m3
);
1616 gen_set_cc_nz_f64(s
, o
->in2
);
1620 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1622 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1623 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1624 tcg_temp_free_i32(m3
);
1625 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1629 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1631 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1632 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1633 tcg_temp_free_i32(m3
);
1634 gen_set_cc_nz_f32(s
, o
->in2
);
1638 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1640 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1641 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1642 tcg_temp_free_i32(m3
);
1643 gen_set_cc_nz_f64(s
, o
->in2
);
1647 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1649 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1650 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1651 tcg_temp_free_i32(m3
);
1652 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1656 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1658 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1659 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1660 tcg_temp_free_i32(m3
);
1661 gen_set_cc_nz_f32(s
, o
->in2
);
1665 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1667 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1668 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1669 tcg_temp_free_i32(m3
);
1670 gen_set_cc_nz_f64(s
, o
->in2
);
1674 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1676 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1677 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1678 tcg_temp_free_i32(m3
);
1679 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1683 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1685 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1686 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1687 tcg_temp_free_i32(m3
);
1691 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1693 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1694 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1695 tcg_temp_free_i32(m3
);
1699 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1701 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1702 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1703 tcg_temp_free_i32(m3
);
1704 return_low128(o
->out2
);
1708 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1710 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1711 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1712 tcg_temp_free_i32(m3
);
1716 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1718 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1719 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1720 tcg_temp_free_i32(m3
);
1724 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1726 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1727 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1728 tcg_temp_free_i32(m3
);
1729 return_low128(o
->out2
);
1733 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1735 int r2
= get_field(s
->fields
, r2
);
1736 TCGv_i64 len
= tcg_temp_new_i64();
1738 potential_page_fault(s
);
1739 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1741 return_low128(o
->out
);
1743 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1744 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1745 tcg_temp_free_i64(len
);
1750 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1752 int l
= get_field(s
->fields
, l1
);
1757 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1758 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1761 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1762 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1765 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1766 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1769 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1770 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1773 potential_page_fault(s
);
1774 vl
= tcg_const_i32(l
);
1775 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1776 tcg_temp_free_i32(vl
);
1780 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1784 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1786 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1787 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1788 potential_page_fault(s
);
1789 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1790 tcg_temp_free_i32(r1
);
1791 tcg_temp_free_i32(r3
);
1796 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1798 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1799 TCGv_i32 t1
= tcg_temp_new_i32();
1800 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1801 potential_page_fault(s
);
1802 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1804 tcg_temp_free_i32(t1
);
1805 tcg_temp_free_i32(m3
);
1809 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1811 potential_page_fault(s
);
1812 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1814 return_low128(o
->in2
);
1818 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1820 TCGv_i64 t
= tcg_temp_new_i64();
1821 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1822 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1823 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1824 tcg_temp_free_i64(t
);
1828 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1830 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1831 int d2
= get_field(s
->fields
, d2
);
1832 int b2
= get_field(s
->fields
, b2
);
1833 int is_64
= s
->insn
->data
;
1834 TCGv_i64 addr
, mem
, cc
, z
;
1836 /* Note that in1 = R3 (new value) and
1837 in2 = (zero-extended) R1 (expected value). */
1839 /* Load the memory into the (temporary) output. While the PoO only talks
1840 about moving the memory to R1 on inequality, if we include equality it
1841 means that R1 is equal to the memory in all conditions. */
1842 addr
= get_address(s
, 0, b2
, d2
);
1844 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1846 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1849 /* Are the memory and expected values (un)equal? Note that this setcond
1850 produces the output CC value, thus the NE sense of the test. */
1851 cc
= tcg_temp_new_i64();
1852 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1854 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1855 Recall that we are allowed to unconditionally issue the store (and
1856 thus any possible write trap), so (re-)store the original contents
1857 of MEM in case of inequality. */
1858 z
= tcg_const_i64(0);
1859 mem
= tcg_temp_new_i64();
1860 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1862 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1864 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1866 tcg_temp_free_i64(z
);
1867 tcg_temp_free_i64(mem
);
1868 tcg_temp_free_i64(addr
);
1870 /* Store CC back to cc_op. Wait until after the store so that any
1871 exception gets the old cc_op value. */
1872 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1873 tcg_temp_free_i64(cc
);
1878 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1880 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1881 int r1
= get_field(s
->fields
, r1
);
1882 int r3
= get_field(s
->fields
, r3
);
1883 int d2
= get_field(s
->fields
, d2
);
1884 int b2
= get_field(s
->fields
, b2
);
1885 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
1887 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1889 addrh
= get_address(s
, 0, b2
, d2
);
1890 addrl
= get_address(s
, 0, b2
, d2
+ 8);
1891 outh
= tcg_temp_new_i64();
1892 outl
= tcg_temp_new_i64();
1894 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
1895 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
1897 /* Fold the double-word compare with arithmetic. */
1898 cc
= tcg_temp_new_i64();
1899 z
= tcg_temp_new_i64();
1900 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
1901 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
1902 tcg_gen_or_i64(cc
, cc
, z
);
1903 tcg_gen_movi_i64(z
, 0);
1904 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
1906 memh
= tcg_temp_new_i64();
1907 meml
= tcg_temp_new_i64();
1908 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
1909 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
1910 tcg_temp_free_i64(z
);
1912 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
1913 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
1914 tcg_temp_free_i64(memh
);
1915 tcg_temp_free_i64(meml
);
1916 tcg_temp_free_i64(addrh
);
1917 tcg_temp_free_i64(addrl
);
1919 /* Save back state now that we've passed all exceptions. */
1920 tcg_gen_mov_i64(regs
[r1
], outh
);
1921 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
1922 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1923 tcg_temp_free_i64(outh
);
1924 tcg_temp_free_i64(outl
);
1925 tcg_temp_free_i64(cc
);
1930 #ifndef CONFIG_USER_ONLY
1931 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1933 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1934 check_privileged(s
);
1935 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1936 tcg_temp_free_i32(r1
);
1942 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1944 TCGv_i64 t1
= tcg_temp_new_i64();
1945 TCGv_i32 t2
= tcg_temp_new_i32();
1946 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1947 gen_helper_cvd(t1
, t2
);
1948 tcg_temp_free_i32(t2
);
1949 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1950 tcg_temp_free_i64(t1
);
1954 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
1956 int m3
= get_field(s
->fields
, m3
);
1957 TCGLabel
*lab
= gen_new_label();
1961 c
= tcg_invert_cond(ltgt_cond
[m3
]);
1962 if (s
->insn
->data
) {
1963 c
= tcg_unsigned_cond(c
);
1965 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
1967 /* Set DXC to 0xff. */
1968 t
= tcg_temp_new_i32();
1969 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1970 tcg_gen_ori_i32(t
, t
, 0xff00);
1971 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1972 tcg_temp_free_i32(t
);
1975 gen_program_exception(s
, PGM_DATA
);
1981 #ifndef CONFIG_USER_ONLY
1982 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
1986 check_privileged(s
);
1987 potential_page_fault(s
);
1989 /* We pretend the format is RX_a so that D2 is the field we want. */
1990 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
1991 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
1992 tcg_temp_free_i32(tmp
);
1997 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
1999 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2000 return_low128(o
->out
);
2004 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2006 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2007 return_low128(o
->out
);
2011 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2013 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2014 return_low128(o
->out
);
2018 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2020 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2021 return_low128(o
->out
);
2025 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2027 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2031 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2033 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2037 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2039 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2040 return_low128(o
->out2
);
2044 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2046 int r2
= get_field(s
->fields
, r2
);
2047 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2051 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2053 /* No cache information provided. */
2054 tcg_gen_movi_i64(o
->out
, -1);
2058 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2060 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2064 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2066 int r1
= get_field(s
->fields
, r1
);
2067 int r2
= get_field(s
->fields
, r2
);
2068 TCGv_i64 t
= tcg_temp_new_i64();
2070 /* Note the "subsequently" in the PoO, which implies a defined result
2071 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2072 tcg_gen_shri_i64(t
, psw_mask
, 32);
2073 store_reg32_i64(r1
, t
);
2075 store_reg32_i64(r2
, psw_mask
);
2078 tcg_temp_free_i64(t
);
2082 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2084 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2085 tb->flags, (ab)use the tb->cs_base field as the address of
2086 the template in memory, and grab 8 bits of tb->flags/cflags for
2087 the contents of the register. We would then recognize all this
2088 in gen_intermediate_code_internal, generating code for exactly
2089 one instruction. This new TB then gets executed normally.
2091 On the other hand, this seems to be mostly used for modifying
2092 MVC inside of memcpy, which needs a helper call anyway. So
2093 perhaps this doesn't bear thinking about any further. */
2100 tmp
= tcg_const_i64(s
->next_pc
);
2101 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2102 tcg_temp_free_i64(tmp
);
2108 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2110 /* We'll use the original input for cc computation, since we get to
2111 compare that against 0, which ought to be better than comparing
2112 the real output against 64. It also lets cc_dst be a convenient
2113 temporary during our computation. */
2114 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2116 /* R1 = IN ? CLZ(IN) : 64. */
2117 gen_helper_clz(o
->out
, o
->in2
);
2119 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2120 value by 64, which is undefined. But since the shift is 64 iff the
2121 input is zero, we still get the correct result after and'ing. */
2122 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2123 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2124 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2128 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2130 int m3
= get_field(s
->fields
, m3
);
2131 int pos
, len
, base
= s
->insn
->data
;
2132 TCGv_i64 tmp
= tcg_temp_new_i64();
2137 /* Effectively a 32-bit load. */
2138 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2145 /* Effectively a 16-bit load. */
2146 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2154 /* Effectively an 8-bit load. */
2155 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2160 pos
= base
+ ctz32(m3
) * 8;
2161 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2162 ccm
= ((1ull << len
) - 1) << pos
;
2166 /* This is going to be a sequence of loads and inserts. */
2167 pos
= base
+ 32 - 8;
2171 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2172 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2173 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2176 m3
= (m3
<< 1) & 0xf;
2182 tcg_gen_movi_i64(tmp
, ccm
);
2183 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2184 tcg_temp_free_i64(tmp
);
2188 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2190 int shift
= s
->insn
->data
& 0xff;
2191 int size
= s
->insn
->data
>> 8;
2192 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2196 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2201 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2203 t1
= tcg_temp_new_i64();
2204 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2205 tcg_gen_shri_i64(t1
, t1
, 36);
2206 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2208 tcg_gen_extu_i32_i64(t1
, cc_op
);
2209 tcg_gen_shli_i64(t1
, t1
, 28);
2210 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2211 tcg_temp_free_i64(t1
);
2215 #ifndef CONFIG_USER_ONLY
2216 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2218 check_privileged(s
);
2219 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2223 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2225 check_privileged(s
);
2226 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2231 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2233 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2237 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2239 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2243 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2245 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2249 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2251 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2255 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2257 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2258 return_low128(o
->out2
);
2262 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2264 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2265 return_low128(o
->out2
);
2269 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2271 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2275 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2277 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2281 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2283 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2287 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2289 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2293 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2295 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2299 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2301 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2305 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2307 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2311 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2313 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2317 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2321 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2324 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2328 TCGv_i32 t32
= tcg_temp_new_i32();
2331 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2334 t
= tcg_temp_new_i64();
2335 tcg_gen_extu_i32_i64(t
, t32
);
2336 tcg_temp_free_i32(t32
);
2338 z
= tcg_const_i64(0);
2339 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2340 tcg_temp_free_i64(t
);
2341 tcg_temp_free_i64(z
);
2347 #ifndef CONFIG_USER_ONLY
2348 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2350 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2351 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2352 check_privileged(s
);
2353 potential_page_fault(s
);
2354 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2355 tcg_temp_free_i32(r1
);
2356 tcg_temp_free_i32(r3
);
2360 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2362 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2363 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2364 check_privileged(s
);
2365 potential_page_fault(s
);
2366 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2367 tcg_temp_free_i32(r1
);
2368 tcg_temp_free_i32(r3
);
2371 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2373 check_privileged(s
);
2374 potential_page_fault(s
);
2375 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2380 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2384 check_privileged(s
);
2386 t1
= tcg_temp_new_i64();
2387 t2
= tcg_temp_new_i64();
2388 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2389 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2390 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2391 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2392 tcg_gen_shli_i64(t1
, t1
, 32);
2393 gen_helper_load_psw(cpu_env
, t1
, t2
);
2394 tcg_temp_free_i64(t1
);
2395 tcg_temp_free_i64(t2
);
2396 return EXIT_NORETURN
;
2399 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2403 check_privileged(s
);
2405 t1
= tcg_temp_new_i64();
2406 t2
= tcg_temp_new_i64();
2407 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2408 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2409 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2410 gen_helper_load_psw(cpu_env
, t1
, t2
);
2411 tcg_temp_free_i64(t1
);
2412 tcg_temp_free_i64(t2
);
2413 return EXIT_NORETURN
;
2417 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2419 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2420 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2421 potential_page_fault(s
);
2422 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2423 tcg_temp_free_i32(r1
);
2424 tcg_temp_free_i32(r3
);
2428 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2430 int r1
= get_field(s
->fields
, r1
);
2431 int r3
= get_field(s
->fields
, r3
);
2432 TCGv_i64 t
= tcg_temp_new_i64();
2433 TCGv_i64 t4
= tcg_const_i64(4);
2436 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2437 store_reg32_i64(r1
, t
);
2441 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2445 tcg_temp_free_i64(t
);
2446 tcg_temp_free_i64(t4
);
2450 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2452 int r1
= get_field(s
->fields
, r1
);
2453 int r3
= get_field(s
->fields
, r3
);
2454 TCGv_i64 t
= tcg_temp_new_i64();
2455 TCGv_i64 t4
= tcg_const_i64(4);
2458 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2459 store_reg32h_i64(r1
, t
);
2463 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2467 tcg_temp_free_i64(t
);
2468 tcg_temp_free_i64(t4
);
2472 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2474 int r1
= get_field(s
->fields
, r1
);
2475 int r3
= get_field(s
->fields
, r3
);
2476 TCGv_i64 t8
= tcg_const_i64(8);
2479 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2483 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2487 tcg_temp_free_i64(t8
);
2491 #ifndef CONFIG_USER_ONLY
2492 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2494 check_privileged(s
);
2495 potential_page_fault(s
);
2496 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2500 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2502 check_privileged(s
);
2503 potential_page_fault(s
);
2504 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2509 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2512 o
->g_out
= o
->g_in2
;
2513 TCGV_UNUSED_I64(o
->in2
);
2518 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2522 o
->g_out
= o
->g_in1
;
2523 o
->g_out2
= o
->g_in2
;
2524 TCGV_UNUSED_I64(o
->in1
);
2525 TCGV_UNUSED_I64(o
->in2
);
2526 o
->g_in1
= o
->g_in2
= false;
2530 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2532 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2533 potential_page_fault(s
);
2534 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2535 tcg_temp_free_i32(l
);
2539 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2541 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2542 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2543 potential_page_fault(s
);
2544 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2545 tcg_temp_free_i32(r1
);
2546 tcg_temp_free_i32(r2
);
2551 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2553 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2554 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2555 potential_page_fault(s
);
2556 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2557 tcg_temp_free_i32(r1
);
2558 tcg_temp_free_i32(r3
);
2563 #ifndef CONFIG_USER_ONLY
2564 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2566 int r1
= get_field(s
->fields
, l1
);
2567 check_privileged(s
);
2568 potential_page_fault(s
);
2569 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2574 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2576 int r1
= get_field(s
->fields
, l1
);
2577 check_privileged(s
);
2578 potential_page_fault(s
);
2579 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2585 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2587 potential_page_fault(s
);
2588 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2593 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2595 potential_page_fault(s
);
2596 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2598 return_low128(o
->in2
);
2602 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2604 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2608 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2610 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2614 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2616 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2620 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2622 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2626 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2628 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2632 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2634 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2635 return_low128(o
->out2
);
2639 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2641 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2642 return_low128(o
->out2
);
2646 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2648 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2649 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2650 tcg_temp_free_i64(r3
);
2654 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2656 int r3
= get_field(s
->fields
, r3
);
2657 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2661 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2663 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2664 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2665 tcg_temp_free_i64(r3
);
2669 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2671 int r3
= get_field(s
->fields
, r3
);
2672 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2676 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2678 gen_helper_nabs_i64(o
->out
, o
->in2
);
2682 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2684 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2688 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2690 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2694 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2696 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2697 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2701 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2703 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2704 potential_page_fault(s
);
2705 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2706 tcg_temp_free_i32(l
);
2711 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2713 tcg_gen_neg_i64(o
->out
, o
->in2
);
2717 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2719 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2723 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2725 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2729 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2731 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2732 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2736 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2738 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2739 potential_page_fault(s
);
2740 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2741 tcg_temp_free_i32(l
);
2746 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2748 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2752 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2754 int shift
= s
->insn
->data
& 0xff;
2755 int size
= s
->insn
->data
>> 8;
2756 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2759 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2760 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2762 /* Produce the CC from only the bits manipulated. */
2763 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2764 set_cc_nz_u64(s
, cc_dst
);
2768 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
2770 gen_helper_popcnt(o
->out
, o
->in2
);
2774 #ifndef CONFIG_USER_ONLY
2775 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2777 check_privileged(s
);
2778 gen_helper_ptlb(cpu_env
);
2783 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
2785 int i3
= get_field(s
->fields
, i3
);
2786 int i4
= get_field(s
->fields
, i4
);
2787 int i5
= get_field(s
->fields
, i5
);
2788 int do_zero
= i4
& 0x80;
2789 uint64_t mask
, imask
, pmask
;
2792 /* Adjust the arguments for the specific insn. */
2793 switch (s
->fields
->op2
) {
2794 case 0x55: /* risbg */
2799 case 0x5d: /* risbhg */
2802 pmask
= 0xffffffff00000000ull
;
2804 case 0x51: /* risblg */
2807 pmask
= 0x00000000ffffffffull
;
2813 /* MASK is the set of bits to be inserted from R2.
2814 Take care for I3/I4 wraparound. */
2817 mask
^= pmask
>> i4
>> 1;
2819 mask
|= ~(pmask
>> i4
>> 1);
2823 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2824 insns, we need to keep the other half of the register. */
2825 imask
= ~mask
| ~pmask
;
2827 if (s
->fields
->op2
== 0x55) {
2834 /* In some cases we can implement this with deposit, which can be more
2835 efficient on some hosts. */
2836 if (~mask
== imask
&& i3
<= i4
) {
2837 if (s
->fields
->op2
== 0x5d) {
2840 /* Note that we rotate the bits to be inserted to the lsb, not to
2841 the position as described in the PoO. */
2844 rot
= (i5
- pos
) & 63;
2850 /* Rotate the input as necessary. */
2851 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
2853 /* Insert the selected bits into the output. */
2855 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
2856 } else if (imask
== 0) {
2857 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
2859 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2860 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
2861 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2866 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
2868 int i3
= get_field(s
->fields
, i3
);
2869 int i4
= get_field(s
->fields
, i4
);
2870 int i5
= get_field(s
->fields
, i5
);
2873 /* If this is a test-only form, arrange to discard the result. */
2875 o
->out
= tcg_temp_new_i64();
2883 /* MASK is the set of bits to be operated on from R2.
2884 Take care for I3/I4 wraparound. */
2887 mask
^= ~0ull >> i4
>> 1;
2889 mask
|= ~(~0ull >> i4
>> 1);
2892 /* Rotate the input as necessary. */
2893 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
2896 switch (s
->fields
->op2
) {
2897 case 0x55: /* AND */
2898 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2899 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
2902 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2903 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2905 case 0x57: /* XOR */
2906 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2907 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
2914 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2915 set_cc_nz_u64(s
, cc_dst
);
2919 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2921 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2925 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2927 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2931 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2933 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2937 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2939 TCGv_i32 t1
= tcg_temp_new_i32();
2940 TCGv_i32 t2
= tcg_temp_new_i32();
2941 TCGv_i32 to
= tcg_temp_new_i32();
2942 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2943 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2944 tcg_gen_rotl_i32(to
, t1
, t2
);
2945 tcg_gen_extu_i32_i64(o
->out
, to
);
2946 tcg_temp_free_i32(t1
);
2947 tcg_temp_free_i32(t2
);
2948 tcg_temp_free_i32(to
);
2952 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2954 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2958 #ifndef CONFIG_USER_ONLY
2959 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
2961 check_privileged(s
);
2962 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
2967 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
2969 check_privileged(s
);
2970 gen_helper_sacf(cpu_env
, o
->in2
);
2971 /* Addressing mode has changed, so end the block. */
2972 return EXIT_PC_STALE
;
2976 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
2978 int sam
= s
->insn
->data
;
2994 /* Bizarre but true, we check the address of the current insn for the
2995 specification exception, not the next to be executed. Thus the PoO
2996 documents that Bad Things Happen two bytes before the end. */
2997 if (s
->pc
& ~mask
) {
2998 gen_program_exception(s
, PGM_SPECIFICATION
);
2999 return EXIT_NORETURN
;
3003 tsam
= tcg_const_i64(sam
);
3004 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3005 tcg_temp_free_i64(tsam
);
3007 /* Always exit the TB, since we (may have) changed execution mode. */
3008 return EXIT_PC_STALE
;
3011 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3013 int r1
= get_field(s
->fields
, r1
);
3014 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3018 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3020 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3024 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3026 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3030 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3032 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3033 return_low128(o
->out2
);
3037 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3039 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3043 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3045 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3049 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3051 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3052 return_low128(o
->out2
);
3056 #ifndef CONFIG_USER_ONLY
3057 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3059 check_privileged(s
);
3060 potential_page_fault(s
);
3061 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3066 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3068 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3069 check_privileged(s
);
3070 potential_page_fault(s
);
3071 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3072 tcg_temp_free_i32(r1
);
3077 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3084 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3086 /* We want to store when the condition is fulfilled, so branch
3087 out when it's not */
3088 c
.cond
= tcg_invert_cond(c
.cond
);
3090 lab
= gen_new_label();
3092 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3094 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3098 r1
= get_field(s
->fields
, r1
);
3099 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3100 if (s
->insn
->data
) {
3101 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3103 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3105 tcg_temp_free_i64(a
);
3111 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3113 uint64_t sign
= 1ull << s
->insn
->data
;
3114 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3115 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3116 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3117 /* The arithmetic left shift is curious in that it does not affect
3118 the sign bit. Copy that over from the source unchanged. */
3119 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3120 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3121 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3125 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3127 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3131 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3133 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3137 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3139 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3143 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3145 gen_helper_sfpc(cpu_env
, o
->in2
);
3149 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3151 gen_helper_sfas(cpu_env
, o
->in2
);
3155 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3157 int b2
= get_field(s
->fields
, b2
);
3158 int d2
= get_field(s
->fields
, d2
);
3159 TCGv_i64 t1
= tcg_temp_new_i64();
3160 TCGv_i64 t2
= tcg_temp_new_i64();
3163 switch (s
->fields
->op2
) {
3164 case 0x99: /* SRNM */
3167 case 0xb8: /* SRNMB */
3170 case 0xb9: /* SRNMT */
3176 mask
= (1 << len
) - 1;
3178 /* Insert the value into the appropriate field of the FPC. */
3180 tcg_gen_movi_i64(t1
, d2
& mask
);
3182 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3183 tcg_gen_andi_i64(t1
, t1
, mask
);
3185 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3186 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3187 tcg_temp_free_i64(t1
);
3189 /* Then install the new FPC to set the rounding mode in fpu_status. */
3190 gen_helper_sfpc(cpu_env
, t2
);
3191 tcg_temp_free_i64(t2
);
3195 #ifndef CONFIG_USER_ONLY
3196 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3198 check_privileged(s
);
3199 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3200 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3204 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3206 check_privileged(s
);
3207 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3211 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3213 check_privileged(s
);
3214 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3218 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3220 check_privileged(s
);
3221 /* ??? Surely cpu address != cpu number. In any case the previous
3222 version of this stored more than the required half-word, so it
3223 is unlikely this has ever been tested. */
3224 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3228 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3230 gen_helper_stck(o
->out
, cpu_env
);
3231 /* ??? We don't implement clock states. */
3232 gen_op_movi_cc(s
, 0);
3236 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3238 TCGv_i64 c1
= tcg_temp_new_i64();
3239 TCGv_i64 c2
= tcg_temp_new_i64();
3240 gen_helper_stck(c1
, cpu_env
);
3241 /* Shift the 64-bit value into its place as a zero-extended
3242 104-bit value. Note that "bit positions 64-103 are always
3243 non-zero so that they compare differently to STCK"; we set
3244 the least significant bit to 1. */
3245 tcg_gen_shli_i64(c2
, c1
, 56);
3246 tcg_gen_shri_i64(c1
, c1
, 8);
3247 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3248 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3249 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3250 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3251 tcg_temp_free_i64(c1
);
3252 tcg_temp_free_i64(c2
);
3253 /* ??? We don't implement clock states. */
3254 gen_op_movi_cc(s
, 0);
3258 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3260 check_privileged(s
);
3261 gen_helper_sckc(cpu_env
, o
->in2
);
3265 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3267 check_privileged(s
);
3268 gen_helper_stckc(o
->out
, cpu_env
);
3272 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3274 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3275 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3276 check_privileged(s
);
3277 potential_page_fault(s
);
3278 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3279 tcg_temp_free_i32(r1
);
3280 tcg_temp_free_i32(r3
);
3284 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3286 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3287 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3288 check_privileged(s
);
3289 potential_page_fault(s
);
3290 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3291 tcg_temp_free_i32(r1
);
3292 tcg_temp_free_i32(r3
);
3296 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3298 TCGv_i64 t1
= tcg_temp_new_i64();
3300 check_privileged(s
);
3301 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3302 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3303 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3304 tcg_temp_free_i64(t1
);
3309 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3311 check_privileged(s
);
3312 gen_helper_spt(cpu_env
, o
->in2
);
3316 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3319 /* We really ought to have more complete indication of facilities
3320 that we implement. Address this when STFLE is implemented. */
3321 check_privileged(s
);
3322 f
= tcg_const_i64(0xc0000000);
3323 a
= tcg_const_i64(200);
3324 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3325 tcg_temp_free_i64(f
);
3326 tcg_temp_free_i64(a
);
3330 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3332 check_privileged(s
);
3333 gen_helper_stpt(o
->out
, cpu_env
);
3337 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3339 check_privileged(s
);
3340 potential_page_fault(s
);
3341 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3346 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3348 check_privileged(s
);
3349 gen_helper_spx(cpu_env
, o
->in2
);
3353 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
3355 check_privileged(s
);
3356 /* Not operational. */
3357 gen_op_movi_cc(s
, 3);
3361 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3363 check_privileged(s
);
3364 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3365 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3369 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3371 uint64_t i2
= get_field(s
->fields
, i2
);
3374 check_privileged(s
);
3376 /* It is important to do what the instruction name says: STORE THEN.
3377 If we let the output hook perform the store then if we fault and
3378 restart, we'll have the wrong SYSTEM MASK in place. */
3379 t
= tcg_temp_new_i64();
3380 tcg_gen_shri_i64(t
, psw_mask
, 56);
3381 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3382 tcg_temp_free_i64(t
);
3384 if (s
->fields
->op
== 0xac) {
3385 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3386 (i2
<< 56) | 0x00ffffffffffffffull
);
3388 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3393 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3395 check_privileged(s
);
3396 potential_page_fault(s
);
3397 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3401 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3403 check_privileged(s
);
3404 potential_page_fault(s
);
3405 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3410 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3412 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3416 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3418 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3422 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3424 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3428 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3430 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3434 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3436 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3437 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3438 potential_page_fault(s
);
3439 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3440 tcg_temp_free_i32(r1
);
3441 tcg_temp_free_i32(r3
);
3445 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3447 int m3
= get_field(s
->fields
, m3
);
3448 int pos
, base
= s
->insn
->data
;
3449 TCGv_i64 tmp
= tcg_temp_new_i64();
3451 pos
= base
+ ctz32(m3
) * 8;
3454 /* Effectively a 32-bit store. */
3455 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3456 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3462 /* Effectively a 16-bit store. */
3463 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3464 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3471 /* Effectively an 8-bit store. */
3472 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3473 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3477 /* This is going to be a sequence of shifts and stores. */
3478 pos
= base
+ 32 - 8;
3481 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3482 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3483 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3485 m3
= (m3
<< 1) & 0xf;
3490 tcg_temp_free_i64(tmp
);
3494 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3496 int r1
= get_field(s
->fields
, r1
);
3497 int r3
= get_field(s
->fields
, r3
);
3498 int size
= s
->insn
->data
;
3499 TCGv_i64 tsize
= tcg_const_i64(size
);
3503 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3505 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3510 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3514 tcg_temp_free_i64(tsize
);
3518 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3520 int r1
= get_field(s
->fields
, r1
);
3521 int r3
= get_field(s
->fields
, r3
);
3522 TCGv_i64 t
= tcg_temp_new_i64();
3523 TCGv_i64 t4
= tcg_const_i64(4);
3524 TCGv_i64 t32
= tcg_const_i64(32);
3527 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3528 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3532 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3536 tcg_temp_free_i64(t
);
3537 tcg_temp_free_i64(t4
);
3538 tcg_temp_free_i64(t32
);
3542 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3544 potential_page_fault(s
);
3545 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3547 return_low128(o
->in2
);
3551 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3553 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3557 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3562 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3564 /* The !borrow flag is the msb of CC. Since we want the inverse of
3565 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3566 disas_jcc(s
, &cmp
, 8 | 4);
3567 borrow
= tcg_temp_new_i64();
3569 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3571 TCGv_i32 t
= tcg_temp_new_i32();
3572 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3573 tcg_gen_extu_i32_i64(borrow
, t
);
3574 tcg_temp_free_i32(t
);
3578 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3579 tcg_temp_free_i64(borrow
);
3583 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3590 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3591 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3592 tcg_temp_free_i32(t
);
3594 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3595 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3596 tcg_temp_free_i32(t
);
3598 gen_exception(EXCP_SVC
);
3599 return EXIT_NORETURN
;
3602 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3604 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3609 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3611 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3616 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3618 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3623 #ifndef CONFIG_USER_ONLY
3624 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3626 potential_page_fault(s
);
3627 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3633 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3635 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3636 potential_page_fault(s
);
3637 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3638 tcg_temp_free_i32(l
);
3643 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3645 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3646 potential_page_fault(s
);
3647 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3648 tcg_temp_free_i32(l
);
3652 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3654 int d1
= get_field(s
->fields
, d1
);
3655 int d2
= get_field(s
->fields
, d2
);
3656 int b1
= get_field(s
->fields
, b1
);
3657 int b2
= get_field(s
->fields
, b2
);
3658 int l
= get_field(s
->fields
, l1
);
3661 o
->addr1
= get_address(s
, 0, b1
, d1
);
3663 /* If the addresses are identical, this is a store/memset of zero. */
3664 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
3665 o
->in2
= tcg_const_i64(0);
3669 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
3672 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
3676 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
3679 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
3683 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
3686 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
3690 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
3692 gen_op_movi_cc(s
, 0);
3696 /* But in general we'll defer to a helper. */
3697 o
->in2
= get_address(s
, 0, b2
, d2
);
3698 t32
= tcg_const_i32(l
);
3699 potential_page_fault(s
);
3700 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
3701 tcg_temp_free_i32(t32
);
3706 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3708 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3712 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3714 int shift
= s
->insn
->data
& 0xff;
3715 int size
= s
->insn
->data
>> 8;
3716 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3719 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3720 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3722 /* Produce the CC from only the bits manipulated. */
3723 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3724 set_cc_nz_u64(s
, cc_dst
);
3728 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3730 o
->out
= tcg_const_i64(0);
3734 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3736 o
->out
= tcg_const_i64(0);
3742 /* ====================================================================== */
3743 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3744 the original inputs), update the various cc data structures in order to
3745 be able to compute the new condition code. */
3747 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3749 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3752 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3754 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3757 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3759 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3762 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3764 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3767 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3769 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3772 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3774 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3777 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3779 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3782 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3784 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3787 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3789 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3792 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3794 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3797 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3799 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3802 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3804 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3807 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3809 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3812 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3814 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3817 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3819 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3822 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3824 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3827 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3829 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3832 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3834 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3837 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3839 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3842 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3844 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3845 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3848 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3850 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3853 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3855 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3858 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3860 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3863 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3865 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3868 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3870 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3873 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3875 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3878 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3880 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3883 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3885 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3888 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3890 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3893 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3895 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3898 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3900 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3903 /* ====================================================================== */
3904 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3905 with the TCG register to which we will write. Used in combination with
3906 the "wout" generators, in some cases we need a new temporary, and in
3907 some cases we can write to a TCG global. */
3909 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3911 o
->out
= tcg_temp_new_i64();
3913 #define SPEC_prep_new 0
3915 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3917 o
->out
= tcg_temp_new_i64();
3918 o
->out2
= tcg_temp_new_i64();
3920 #define SPEC_prep_new_P 0
3922 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3924 o
->out
= regs
[get_field(f
, r1
)];
3927 #define SPEC_prep_r1 0
3929 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3931 int r1
= get_field(f
, r1
);
3933 o
->out2
= regs
[r1
+ 1];
3934 o
->g_out
= o
->g_out2
= true;
3936 #define SPEC_prep_r1_P SPEC_r1_even
3938 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3940 o
->out
= fregs
[get_field(f
, r1
)];
3943 #define SPEC_prep_f1 0
3945 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3947 int r1
= get_field(f
, r1
);
3949 o
->out2
= fregs
[r1
+ 2];
3950 o
->g_out
= o
->g_out2
= true;
3952 #define SPEC_prep_x1 SPEC_r1_f128
3954 /* ====================================================================== */
3955 /* The "Write OUTput" generators. These generally perform some non-trivial
3956 copy of data to TCG globals, or to main memory. The trivial cases are
3957 generally handled by having a "prep" generator install the TCG global
3958 as the destination of the operation. */
3960 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3962 store_reg(get_field(f
, r1
), o
->out
);
3964 #define SPEC_wout_r1 0
3966 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3968 int r1
= get_field(f
, r1
);
3969 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3971 #define SPEC_wout_r1_8 0
3973 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3975 int r1
= get_field(f
, r1
);
3976 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3978 #define SPEC_wout_r1_16 0
3980 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3982 store_reg32_i64(get_field(f
, r1
), o
->out
);
3984 #define SPEC_wout_r1_32 0
3986 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3988 int r1
= get_field(f
, r1
);
3989 store_reg32_i64(r1
, o
->out
);
3990 store_reg32_i64(r1
+ 1, o
->out2
);
3992 #define SPEC_wout_r1_P32 SPEC_r1_even
3994 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3996 int r1
= get_field(f
, r1
);
3997 store_reg32_i64(r1
+ 1, o
->out
);
3998 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3999 store_reg32_i64(r1
, o
->out
);
4001 #define SPEC_wout_r1_D32 SPEC_r1_even
4003 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4005 store_freg32_i64(get_field(f
, r1
), o
->out
);
4007 #define SPEC_wout_e1 0
4009 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4011 store_freg(get_field(f
, r1
), o
->out
);
4013 #define SPEC_wout_f1 0
4015 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4017 int f1
= get_field(s
->fields
, r1
);
4018 store_freg(f1
, o
->out
);
4019 store_freg(f1
+ 2, o
->out2
);
4021 #define SPEC_wout_x1 SPEC_r1_f128
4023 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4025 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4026 store_reg32_i64(get_field(f
, r1
), o
->out
);
4029 #define SPEC_wout_cond_r1r2_32 0
4031 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4033 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4034 store_freg32_i64(get_field(f
, r1
), o
->out
);
4037 #define SPEC_wout_cond_e1e2 0
4039 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4041 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4043 #define SPEC_wout_m1_8 0
4045 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4047 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4049 #define SPEC_wout_m1_16 0
4051 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4053 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4055 #define SPEC_wout_m1_32 0
4057 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4059 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4061 #define SPEC_wout_m1_64 0
4063 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4065 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4067 #define SPEC_wout_m2_32 0
4069 static void wout_m2_32_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4071 /* XXX release reservation */
4072 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4073 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4075 #define SPEC_wout_m2_32_r1_atomic 0
4077 static void wout_m2_64_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4079 /* XXX release reservation */
4080 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4081 store_reg(get_field(f
, r1
), o
->in2
);
4083 #define SPEC_wout_m2_64_r1_atomic 0
4085 /* ====================================================================== */
4086 /* The "INput 1" generators. These load the first operand to an insn. */
4088 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4090 o
->in1
= load_reg(get_field(f
, r1
));
4092 #define SPEC_in1_r1 0
4094 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4096 o
->in1
= regs
[get_field(f
, r1
)];
4099 #define SPEC_in1_r1_o 0
4101 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4103 o
->in1
= tcg_temp_new_i64();
4104 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4106 #define SPEC_in1_r1_32s 0
4108 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4110 o
->in1
= tcg_temp_new_i64();
4111 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4113 #define SPEC_in1_r1_32u 0
4115 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4117 o
->in1
= tcg_temp_new_i64();
4118 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4120 #define SPEC_in1_r1_sr32 0
4122 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4124 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4126 #define SPEC_in1_r1p1 SPEC_r1_even
4128 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4130 o
->in1
= tcg_temp_new_i64();
4131 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4133 #define SPEC_in1_r1p1_32s SPEC_r1_even
4135 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4137 o
->in1
= tcg_temp_new_i64();
4138 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4140 #define SPEC_in1_r1p1_32u SPEC_r1_even
4142 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4144 int r1
= get_field(f
, r1
);
4145 o
->in1
= tcg_temp_new_i64();
4146 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4148 #define SPEC_in1_r1_D32 SPEC_r1_even
4150 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4152 o
->in1
= load_reg(get_field(f
, r2
));
4154 #define SPEC_in1_r2 0
4156 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4158 o
->in1
= load_reg(get_field(f
, r3
));
4160 #define SPEC_in1_r3 0
4162 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4164 o
->in1
= regs
[get_field(f
, r3
)];
4167 #define SPEC_in1_r3_o 0
4169 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4171 o
->in1
= tcg_temp_new_i64();
4172 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4174 #define SPEC_in1_r3_32s 0
4176 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4178 o
->in1
= tcg_temp_new_i64();
4179 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4181 #define SPEC_in1_r3_32u 0
4183 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4185 int r3
= get_field(f
, r3
);
4186 o
->in1
= tcg_temp_new_i64();
4187 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4189 #define SPEC_in1_r3_D32 SPEC_r3_even
4191 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4193 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4195 #define SPEC_in1_e1 0
4197 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4199 o
->in1
= fregs
[get_field(f
, r1
)];
4202 #define SPEC_in1_f1_o 0
4204 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4206 int r1
= get_field(f
, r1
);
4208 o
->out2
= fregs
[r1
+ 2];
4209 o
->g_out
= o
->g_out2
= true;
4211 #define SPEC_in1_x1_o SPEC_r1_f128
4213 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4215 o
->in1
= fregs
[get_field(f
, r3
)];
4218 #define SPEC_in1_f3_o 0
4220 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4222 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4224 #define SPEC_in1_la1 0
4226 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4228 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4229 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4231 #define SPEC_in1_la2 0
4233 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4236 o
->in1
= tcg_temp_new_i64();
4237 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4239 #define SPEC_in1_m1_8u 0
4241 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4244 o
->in1
= tcg_temp_new_i64();
4245 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4247 #define SPEC_in1_m1_16s 0
4249 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4252 o
->in1
= tcg_temp_new_i64();
4253 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4255 #define SPEC_in1_m1_16u 0
4257 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4260 o
->in1
= tcg_temp_new_i64();
4261 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4263 #define SPEC_in1_m1_32s 0
4265 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4268 o
->in1
= tcg_temp_new_i64();
4269 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4271 #define SPEC_in1_m1_32u 0
4273 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4276 o
->in1
= tcg_temp_new_i64();
4277 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4279 #define SPEC_in1_m1_64 0
4281 /* ====================================================================== */
4282 /* The "INput 2" generators. These load the second operand to an insn. */
4284 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4286 o
->in2
= regs
[get_field(f
, r1
)];
4289 #define SPEC_in2_r1_o 0
4291 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4293 o
->in2
= tcg_temp_new_i64();
4294 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4296 #define SPEC_in2_r1_16u 0
4298 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4300 o
->in2
= tcg_temp_new_i64();
4301 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4303 #define SPEC_in2_r1_32u 0
4305 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4307 int r1
= get_field(f
, r1
);
4308 o
->in2
= tcg_temp_new_i64();
4309 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4311 #define SPEC_in2_r1_D32 SPEC_r1_even
4313 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4315 o
->in2
= load_reg(get_field(f
, r2
));
4317 #define SPEC_in2_r2 0
4319 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4321 o
->in2
= regs
[get_field(f
, r2
)];
4324 #define SPEC_in2_r2_o 0
4326 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4328 int r2
= get_field(f
, r2
);
4330 o
->in2
= load_reg(r2
);
4333 #define SPEC_in2_r2_nz 0
4335 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4337 o
->in2
= tcg_temp_new_i64();
4338 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4340 #define SPEC_in2_r2_8s 0
4342 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4344 o
->in2
= tcg_temp_new_i64();
4345 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4347 #define SPEC_in2_r2_8u 0
4349 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4351 o
->in2
= tcg_temp_new_i64();
4352 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4354 #define SPEC_in2_r2_16s 0
4356 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4358 o
->in2
= tcg_temp_new_i64();
4359 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4361 #define SPEC_in2_r2_16u 0
4363 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4365 o
->in2
= load_reg(get_field(f
, r3
));
4367 #define SPEC_in2_r3 0
4369 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4371 o
->in2
= tcg_temp_new_i64();
4372 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4374 #define SPEC_in2_r2_32s 0
4376 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4378 o
->in2
= tcg_temp_new_i64();
4379 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4381 #define SPEC_in2_r2_32u 0
4383 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4385 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4387 #define SPEC_in2_e2 0
4389 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4391 o
->in2
= fregs
[get_field(f
, r2
)];
4394 #define SPEC_in2_f2_o 0
4396 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4398 int r2
= get_field(f
, r2
);
4400 o
->in2
= fregs
[r2
+ 2];
4401 o
->g_in1
= o
->g_in2
= true;
4403 #define SPEC_in2_x2_o SPEC_r2_f128
4405 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4407 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4409 #define SPEC_in2_ra2 0
4411 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4413 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4414 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4416 #define SPEC_in2_a2 0
4418 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4420 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4422 #define SPEC_in2_ri2 0
4424 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4426 help_l2_shift(s
, f
, o
, 31);
4428 #define SPEC_in2_sh32 0
4430 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4432 help_l2_shift(s
, f
, o
, 63);
4434 #define SPEC_in2_sh64 0
4436 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4439 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4441 #define SPEC_in2_m2_8u 0
4443 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4446 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4448 #define SPEC_in2_m2_16s 0
4450 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4453 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4455 #define SPEC_in2_m2_16u 0
4457 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4460 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4462 #define SPEC_in2_m2_32s 0
4464 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4467 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4469 #define SPEC_in2_m2_32u 0
4471 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4474 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4476 #define SPEC_in2_m2_64 0
4478 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4481 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4483 #define SPEC_in2_mri2_16u 0
4485 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4488 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4490 #define SPEC_in2_mri2_32s 0
4492 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4495 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4497 #define SPEC_in2_mri2_32u 0
4499 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4502 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4504 #define SPEC_in2_mri2_64 0
4506 static void in2_m2_32s_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4508 /* XXX should reserve the address */
4510 o
->in2
= tcg_temp_new_i64();
4511 tcg_gen_qemu_ld32s(o
->in2
, o
->addr1
, get_mem_index(s
));
4513 #define SPEC_in2_m2_32s_atomic 0
4515 static void in2_m2_64_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4517 /* XXX should reserve the address */
4519 o
->in2
= tcg_temp_new_i64();
4520 tcg_gen_qemu_ld64(o
->in2
, o
->addr1
, get_mem_index(s
));
4522 #define SPEC_in2_m2_64_atomic 0
4524 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4526 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4528 #define SPEC_in2_i2 0
4530 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4532 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4534 #define SPEC_in2_i2_8u 0
4536 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4538 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4540 #define SPEC_in2_i2_16u 0
4542 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4544 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4546 #define SPEC_in2_i2_32u 0
4548 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4550 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4551 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4553 #define SPEC_in2_i2_16u_shl 0
4555 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4557 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4558 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4560 #define SPEC_in2_i2_32u_shl 0
4562 /* ====================================================================== */
4564 /* Find opc within the table of insns. This is formulated as a switch
4565 statement so that (1) we get compile-time notice of cut-paste errors
4566 for duplicated opcodes, and (2) the compiler generates the binary
4567 search tree, rather than us having to post-process the table. */
4569 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4570 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4572 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4574 enum DisasInsnEnum
{
4575 #include "insn-data.def"
4579 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4583 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4585 .help_in1 = in1_##I1, \
4586 .help_in2 = in2_##I2, \
4587 .help_prep = prep_##P, \
4588 .help_wout = wout_##W, \
4589 .help_cout = cout_##CC, \
4590 .help_op = op_##OP, \
4594 /* Allow 0 to be used for NULL in the table below. */
4602 #define SPEC_in1_0 0
4603 #define SPEC_in2_0 0
4604 #define SPEC_prep_0 0
4605 #define SPEC_wout_0 0
4607 static const DisasInsn insn_info
[] = {
4608 #include "insn-data.def"
4612 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4613 case OPC: return &insn_info[insn_ ## NM];
4615 static const DisasInsn
*lookup_opc(uint16_t opc
)
4618 #include "insn-data.def"
4627 /* Extract a field from the insn. The INSN should be left-aligned in
4628 the uint64_t so that we can more easily utilize the big-bit-endian
4629 definitions we extract from the Principals of Operation. */
4631 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4639 /* Zero extract the field from the insn. */
4640 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4642 /* Sign-extend, or un-swap the field as necessary. */
4644 case 0: /* unsigned */
4646 case 1: /* signed */
4647 assert(f
->size
<= 32);
4648 m
= 1u << (f
->size
- 1);
4651 case 2: /* dl+dh split, signed 20 bit. */
4652 r
= ((int8_t)r
<< 12) | (r
>> 8);
4658 /* Validate that the "compressed" encoding we selected above is valid.
4659 I.e. we havn't make two different original fields overlap. */
4660 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4661 o
->presentC
|= 1 << f
->indexC
;
4662 o
->presentO
|= 1 << f
->indexO
;
4664 o
->c
[f
->indexC
] = r
;
4667 /* Lookup the insn at the current PC, extracting the operands into O and
4668 returning the info struct for the insn. Returns NULL for invalid insn. */
4670 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4673 uint64_t insn
, pc
= s
->pc
;
4675 const DisasInsn
*info
;
4677 insn
= ld_code2(env
, pc
);
4678 op
= (insn
>> 8) & 0xff;
4679 ilen
= get_ilen(op
);
4680 s
->next_pc
= s
->pc
+ ilen
;
4687 insn
= ld_code4(env
, pc
) << 32;
4690 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4696 /* We can't actually determine the insn format until we've looked up
4697 the full insn opcode. Which we can't do without locating the
4698 secondary opcode. Assume by default that OP2 is at bit 40; for
4699 those smaller insns that don't actually have a secondary opcode
4700 this will correctly result in OP2 = 0. */
4706 case 0xb2: /* S, RRF, RRE */
4707 case 0xb3: /* RRE, RRD, RRF */
4708 case 0xb9: /* RRE, RRF */
4709 case 0xe5: /* SSE, SIL */
4710 op2
= (insn
<< 8) >> 56;
4714 case 0xc0: /* RIL */
4715 case 0xc2: /* RIL */
4716 case 0xc4: /* RIL */
4717 case 0xc6: /* RIL */
4718 case 0xc8: /* SSF */
4719 case 0xcc: /* RIL */
4720 op2
= (insn
<< 12) >> 60;
4722 case 0xd0 ... 0xdf: /* SS */
4728 case 0xee ... 0xf3: /* SS */
4729 case 0xf8 ... 0xfd: /* SS */
4733 op2
= (insn
<< 40) >> 56;
4737 memset(f
, 0, sizeof(*f
));
4741 /* Lookup the instruction. */
4742 info
= lookup_opc(op
<< 8 | op2
);
4744 /* If we found it, extract the operands. */
4746 DisasFormat fmt
= info
->fmt
;
4749 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4750 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4756 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4758 const DisasInsn
*insn
;
4759 ExitStatus ret
= NO_EXIT
;
4763 /* Search for the insn in the table. */
4764 insn
= extract_insn(env
, s
, &f
);
4766 /* Not found means unimplemented/illegal opcode. */
4768 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
4770 gen_illegal_opcode(s
);
4771 return EXIT_NORETURN
;
4774 /* Check for insn specification exceptions. */
4776 int spec
= insn
->spec
, excp
= 0, r
;
4778 if (spec
& SPEC_r1_even
) {
4779 r
= get_field(&f
, r1
);
4781 excp
= PGM_SPECIFICATION
;
4784 if (spec
& SPEC_r2_even
) {
4785 r
= get_field(&f
, r2
);
4787 excp
= PGM_SPECIFICATION
;
4790 if (spec
& SPEC_r3_even
) {
4791 r
= get_field(&f
, r3
);
4793 excp
= PGM_SPECIFICATION
;
4796 if (spec
& SPEC_r1_f128
) {
4797 r
= get_field(&f
, r1
);
4799 excp
= PGM_SPECIFICATION
;
4802 if (spec
& SPEC_r2_f128
) {
4803 r
= get_field(&f
, r2
);
4805 excp
= PGM_SPECIFICATION
;
4809 gen_program_exception(s
, excp
);
4810 return EXIT_NORETURN
;
4814 /* Set up the strutures we use to communicate with the helpers. */
4817 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4818 TCGV_UNUSED_I64(o
.out
);
4819 TCGV_UNUSED_I64(o
.out2
);
4820 TCGV_UNUSED_I64(o
.in1
);
4821 TCGV_UNUSED_I64(o
.in2
);
4822 TCGV_UNUSED_I64(o
.addr1
);
4824 /* Implement the instruction. */
4825 if (insn
->help_in1
) {
4826 insn
->help_in1(s
, &f
, &o
);
4828 if (insn
->help_in2
) {
4829 insn
->help_in2(s
, &f
, &o
);
4831 if (insn
->help_prep
) {
4832 insn
->help_prep(s
, &f
, &o
);
4834 if (insn
->help_op
) {
4835 ret
= insn
->help_op(s
, &o
);
4837 if (insn
->help_wout
) {
4838 insn
->help_wout(s
, &f
, &o
);
4840 if (insn
->help_cout
) {
4841 insn
->help_cout(s
, &o
);
4844 /* Free any temporaries created by the helpers. */
4845 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4846 tcg_temp_free_i64(o
.out
);
4848 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4849 tcg_temp_free_i64(o
.out2
);
4851 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4852 tcg_temp_free_i64(o
.in1
);
4854 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4855 tcg_temp_free_i64(o
.in2
);
4857 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4858 tcg_temp_free_i64(o
.addr1
);
4861 /* Advance to the next instruction. */
4866 static inline void gen_intermediate_code_internal(S390CPU
*cpu
,
4867 TranslationBlock
*tb
,
4870 CPUState
*cs
= CPU(cpu
);
4871 CPUS390XState
*env
= &cpu
->env
;
4873 target_ulong pc_start
;
4874 uint64_t next_page_start
;
4876 int num_insns
, max_insns
;
4884 if (!(tb
->flags
& FLAG_MASK_64
)) {
4885 pc_start
&= 0x7fffffff;
4890 dc
.cc_op
= CC_OP_DYNAMIC
;
4891 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
4893 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4896 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4897 if (max_insns
== 0) {
4898 max_insns
= CF_COUNT_MASK
;
4905 j
= tcg_op_buf_count();
4909 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4912 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4913 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4914 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4915 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4917 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4921 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4922 tcg_gen_debug_insn_start(dc
.pc
);
4926 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
4927 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
4928 if (bp
->pc
== dc
.pc
) {
4929 status
= EXIT_PC_STALE
;
4935 if (status
== NO_EXIT
) {
4936 status
= translate_one(env
, &dc
);
4939 /* If we reach a page boundary, are single stepping,
4940 or exhaust instruction count, stop generation. */
4941 if (status
== NO_EXIT
4942 && (dc
.pc
>= next_page_start
4943 || tcg_op_buf_full()
4944 || num_insns
>= max_insns
4946 || cs
->singlestep_enabled
)) {
4947 status
= EXIT_PC_STALE
;
4949 } while (status
== NO_EXIT
);
4951 if (tb
->cflags
& CF_LAST_IO
) {
4960 update_psw_addr(&dc
);
4962 case EXIT_PC_UPDATED
:
4963 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4964 cc op type is in env */
4966 /* Exit the TB, either by raising a debug exception or by return. */
4968 gen_exception(EXCP_DEBUG
);
4977 gen_tb_end(tb
, num_insns
);
4980 j
= tcg_op_buf_count();
4983 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4986 tb
->size
= dc
.pc
- pc_start
;
4987 tb
->icount
= num_insns
;
4990 #if defined(S390X_DEBUG_DISAS)
4991 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4992 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4993 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4999 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
5001 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, false);
5004 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
5006 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, true);
5009 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
5012 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
5013 cc_op
= gen_opc_cc_op
[pc_pos
];
5014 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {